]> Pileus Git - ~andy/linux/blob - drivers/net/ethernet/broadcom/tg3.c
6e676a7279f85a2f5f6f26cd9e4e6bf0af176425
[~andy/linux] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2013 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50
51 #include <net/checksum.h>
52 #include <net/ip.h>
53
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65
66 #define BAR_0   0
67 #define BAR_2   2
68
69 #include "tg3.h"
70
71 /* Functions & macros to verify TG3_FLAGS types */
72
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         return test_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         set_bit(flag, bits);
81 }
82
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85         clear_bit(flag, bits);
86 }
87
88 #define tg3_flag(tp, flag)                              \
89         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag)                          \
91         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag)                        \
93         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94
95 #define DRV_MODULE_NAME         "tg3"
96 #define TG3_MAJ_NUM                     3
97 #define TG3_MIN_NUM                     129
98 #define DRV_MODULE_VERSION      \
99         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE      "January 06, 2013"
101
102 #define RESET_KIND_SHUTDOWN     0
103 #define RESET_KIND_INIT         1
104 #define RESET_KIND_SUSPEND      2
105
106 #define TG3_DEF_RX_MODE         0
107 #define TG3_DEF_TX_MODE         0
108 #define TG3_DEF_MSG_ENABLE        \
109         (NETIF_MSG_DRV          | \
110          NETIF_MSG_PROBE        | \
111          NETIF_MSG_LINK         | \
112          NETIF_MSG_TIMER        | \
113          NETIF_MSG_IFDOWN       | \
114          NETIF_MSG_IFUP         | \
115          NETIF_MSG_RX_ERR       | \
116          NETIF_MSG_TX_ERR)
117
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
119
120 /* length of time before we decide the hardware is borked,
121  * and dev->tx_timeout() should be called to fix the problem
122  */
123
124 #define TG3_TX_TIMEOUT                  (5 * HZ)
125
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU                     60
128 #define TG3_MAX_MTU(tp) \
129         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132  * You can't change the ring sizes, but you can change where you place
133  * them in the NIC onboard memory.
134  */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING         200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
143
144 /* Do not place this n-ring entries value into the tp struct itself,
145  * we really want to expose these constants to GCC so that modulo et
146  * al.  operations are done with shifts and masks instead of with
147  * hw multiply/modulo instructions.  Another solution would be to
148  * replace things like '% foo' with '& (foo - 1)'.
149  */
150
151 #define TG3_TX_RING_SIZE                512
152 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
153
154 #define TG3_RX_STD_RING_BYTES(tp) \
155         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
161                                  TG3_TX_RING_SIZE)
162 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
163
164 #define TG3_DMA_BYTE_ENAB               64
165
166 #define TG3_RX_STD_DMA_SZ               1536
167 #define TG3_RX_JMB_DMA_SZ               9046
168
169 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
170
171 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181  * that are at least dword aligned when used in PCIX mode.  The driver
182  * works around this bug by double copying the packet.  This workaround
183  * is built into the normal double copy length check for efficiency.
184  *
185  * However, the double copy is only necessary on those architectures
186  * where unaligned memory accesses are inefficient.  For those architectures
187  * where unaligned memory accesses incur little penalty, we can reintegrate
188  * the 5701 in the normal rx path.  Doing so saves a device structure
189  * dereference by hardcoding the double copy threshold in place.
190  */
191 #define TG3_RX_COPY_THRESHOLD           256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
194 #else
195         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
196 #endif
197
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
202 #endif
203
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K            2048
207 #define TG3_TX_BD_DMA_MAX_4K            4096
208
209 #define TG3_RAW_IP_ALIGN 2
210
211 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
212 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213
214 #define FIRMWARE_TG3            "tigon/tg3.bin"
215 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
216 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
217
218 static char version[] =
219         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
220
221 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
222 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
223 MODULE_LICENSE("GPL");
224 MODULE_VERSION(DRV_MODULE_VERSION);
225 MODULE_FIRMWARE(FIRMWARE_TG3);
226 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
228
229 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
230 module_param(tg3_debug, int, 0);
231 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
232
233 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
234 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
235
236 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
256          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
257                         TG3_DRV_DATA_FLAG_5705_10_100},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
259          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
260                         TG3_DRV_DATA_FLAG_5705_10_100},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
263          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
264                         TG3_DRV_DATA_FLAG_5705_10_100},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
271          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
277          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
285         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
286                         PCI_VENDOR_ID_LENOVO,
287                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
288          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
291          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
308         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
309         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
310         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
311                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
312          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
313         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
314                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
315          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
316         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
317         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
318         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
319          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
320         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
322         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
326         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
327         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
329          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
330         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
331          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
332         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
333         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
334         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
335         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
336         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
337         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
338         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
339         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
340         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
341         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
342         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
343         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
344         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
345         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
346         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
347         {}
348 };
349
350 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
351
352 static const struct {
353         const char string[ETH_GSTRING_LEN];
354 } ethtool_stats_keys[] = {
355         { "rx_octets" },
356         { "rx_fragments" },
357         { "rx_ucast_packets" },
358         { "rx_mcast_packets" },
359         { "rx_bcast_packets" },
360         { "rx_fcs_errors" },
361         { "rx_align_errors" },
362         { "rx_xon_pause_rcvd" },
363         { "rx_xoff_pause_rcvd" },
364         { "rx_mac_ctrl_rcvd" },
365         { "rx_xoff_entered" },
366         { "rx_frame_too_long_errors" },
367         { "rx_jabbers" },
368         { "rx_undersize_packets" },
369         { "rx_in_length_errors" },
370         { "rx_out_length_errors" },
371         { "rx_64_or_less_octet_packets" },
372         { "rx_65_to_127_octet_packets" },
373         { "rx_128_to_255_octet_packets" },
374         { "rx_256_to_511_octet_packets" },
375         { "rx_512_to_1023_octet_packets" },
376         { "rx_1024_to_1522_octet_packets" },
377         { "rx_1523_to_2047_octet_packets" },
378         { "rx_2048_to_4095_octet_packets" },
379         { "rx_4096_to_8191_octet_packets" },
380         { "rx_8192_to_9022_octet_packets" },
381
382         { "tx_octets" },
383         { "tx_collisions" },
384
385         { "tx_xon_sent" },
386         { "tx_xoff_sent" },
387         { "tx_flow_control" },
388         { "tx_mac_errors" },
389         { "tx_single_collisions" },
390         { "tx_mult_collisions" },
391         { "tx_deferred" },
392         { "tx_excessive_collisions" },
393         { "tx_late_collisions" },
394         { "tx_collide_2times" },
395         { "tx_collide_3times" },
396         { "tx_collide_4times" },
397         { "tx_collide_5times" },
398         { "tx_collide_6times" },
399         { "tx_collide_7times" },
400         { "tx_collide_8times" },
401         { "tx_collide_9times" },
402         { "tx_collide_10times" },
403         { "tx_collide_11times" },
404         { "tx_collide_12times" },
405         { "tx_collide_13times" },
406         { "tx_collide_14times" },
407         { "tx_collide_15times" },
408         { "tx_ucast_packets" },
409         { "tx_mcast_packets" },
410         { "tx_bcast_packets" },
411         { "tx_carrier_sense_errors" },
412         { "tx_discards" },
413         { "tx_errors" },
414
415         { "dma_writeq_full" },
416         { "dma_write_prioq_full" },
417         { "rxbds_empty" },
418         { "rx_discards" },
419         { "rx_errors" },
420         { "rx_threshold_hit" },
421
422         { "dma_readq_full" },
423         { "dma_read_prioq_full" },
424         { "tx_comp_queue_full" },
425
426         { "ring_set_send_prod_index" },
427         { "ring_status_update" },
428         { "nic_irqs" },
429         { "nic_avoided_irqs" },
430         { "nic_tx_threshold_hit" },
431
432         { "mbuf_lwm_thresh_hit" },
433 };
434
435 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
436 #define TG3_NVRAM_TEST          0
437 #define TG3_LINK_TEST           1
438 #define TG3_REGISTER_TEST       2
439 #define TG3_MEMORY_TEST         3
440 #define TG3_MAC_LOOPB_TEST      4
441 #define TG3_PHY_LOOPB_TEST      5
442 #define TG3_EXT_LOOPB_TEST      6
443 #define TG3_INTERRUPT_TEST      7
444
445
446 static const struct {
447         const char string[ETH_GSTRING_LEN];
448 } ethtool_test_keys[] = {
449         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
450         [TG3_LINK_TEST]         = { "link test         (online) " },
451         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
452         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
453         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
454         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
455         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
456         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
457 };
458
459 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
460
461
462 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
463 {
464         writel(val, tp->regs + off);
465 }
466
467 static u32 tg3_read32(struct tg3 *tp, u32 off)
468 {
469         return readl(tp->regs + off);
470 }
471
472 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
473 {
474         writel(val, tp->aperegs + off);
475 }
476
477 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
478 {
479         return readl(tp->aperegs + off);
480 }
481
482 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
483 {
484         unsigned long flags;
485
486         spin_lock_irqsave(&tp->indirect_lock, flags);
487         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
488         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
489         spin_unlock_irqrestore(&tp->indirect_lock, flags);
490 }
491
492 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
493 {
494         writel(val, tp->regs + off);
495         readl(tp->regs + off);
496 }
497
498 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
499 {
500         unsigned long flags;
501         u32 val;
502
503         spin_lock_irqsave(&tp->indirect_lock, flags);
504         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
505         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
506         spin_unlock_irqrestore(&tp->indirect_lock, flags);
507         return val;
508 }
509
510 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
511 {
512         unsigned long flags;
513
514         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
515                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
516                                        TG3_64BIT_REG_LOW, val);
517                 return;
518         }
519         if (off == TG3_RX_STD_PROD_IDX_REG) {
520                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
521                                        TG3_64BIT_REG_LOW, val);
522                 return;
523         }
524
525         spin_lock_irqsave(&tp->indirect_lock, flags);
526         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
527         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
528         spin_unlock_irqrestore(&tp->indirect_lock, flags);
529
530         /* In indirect mode when disabling interrupts, we also need
531          * to clear the interrupt bit in the GRC local ctrl register.
532          */
533         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
534             (val == 0x1)) {
535                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
536                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
537         }
538 }
539
540 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
541 {
542         unsigned long flags;
543         u32 val;
544
545         spin_lock_irqsave(&tp->indirect_lock, flags);
546         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
547         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
548         spin_unlock_irqrestore(&tp->indirect_lock, flags);
549         return val;
550 }
551
552 /* usec_wait specifies the wait time in usec when writing to certain registers
553  * where it is unsafe to read back the register without some delay.
554  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
555  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
556  */
557 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
558 {
559         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
560                 /* Non-posted methods */
561                 tp->write32(tp, off, val);
562         else {
563                 /* Posted method */
564                 tg3_write32(tp, off, val);
565                 if (usec_wait)
566                         udelay(usec_wait);
567                 tp->read32(tp, off);
568         }
569         /* Wait again after the read for the posted method to guarantee that
570          * the wait time is met.
571          */
572         if (usec_wait)
573                 udelay(usec_wait);
574 }
575
576 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
577 {
578         tp->write32_mbox(tp, off, val);
579         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
580             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
581              !tg3_flag(tp, ICH_WORKAROUND)))
582                 tp->read32_mbox(tp, off);
583 }
584
585 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
586 {
587         void __iomem *mbox = tp->regs + off;
588         writel(val, mbox);
589         if (tg3_flag(tp, TXD_MBOX_HWBUG))
590                 writel(val, mbox);
591         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
592             tg3_flag(tp, FLUSH_POSTED_WRITES))
593                 readl(mbox);
594 }
595
596 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
597 {
598         return readl(tp->regs + off + GRCMBOX_BASE);
599 }
600
601 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
602 {
603         writel(val, tp->regs + off + GRCMBOX_BASE);
604 }
605
606 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
607 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
608 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
609 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
610 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
611
612 #define tw32(reg, val)                  tp->write32(tp, reg, val)
613 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
614 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
615 #define tr32(reg)                       tp->read32(tp, reg)
616
617 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
618 {
619         unsigned long flags;
620
621         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
622             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
623                 return;
624
625         spin_lock_irqsave(&tp->indirect_lock, flags);
626         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
627                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
628                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
629
630                 /* Always leave this as zero. */
631                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
632         } else {
633                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
634                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
635
636                 /* Always leave this as zero. */
637                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
638         }
639         spin_unlock_irqrestore(&tp->indirect_lock, flags);
640 }
641
642 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
643 {
644         unsigned long flags;
645
646         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
647             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
648                 *val = 0;
649                 return;
650         }
651
652         spin_lock_irqsave(&tp->indirect_lock, flags);
653         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
654                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
655                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
656
657                 /* Always leave this as zero. */
658                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
659         } else {
660                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
661                 *val = tr32(TG3PCI_MEM_WIN_DATA);
662
663                 /* Always leave this as zero. */
664                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
665         }
666         spin_unlock_irqrestore(&tp->indirect_lock, flags);
667 }
668
669 static void tg3_ape_lock_init(struct tg3 *tp)
670 {
671         int i;
672         u32 regbase, bit;
673
674         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
675                 regbase = TG3_APE_LOCK_GRANT;
676         else
677                 regbase = TG3_APE_PER_LOCK_GRANT;
678
679         /* Make sure the driver hasn't any stale locks. */
680         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
681                 switch (i) {
682                 case TG3_APE_LOCK_PHY0:
683                 case TG3_APE_LOCK_PHY1:
684                 case TG3_APE_LOCK_PHY2:
685                 case TG3_APE_LOCK_PHY3:
686                         bit = APE_LOCK_GRANT_DRIVER;
687                         break;
688                 default:
689                         if (!tp->pci_fn)
690                                 bit = APE_LOCK_GRANT_DRIVER;
691                         else
692                                 bit = 1 << tp->pci_fn;
693                 }
694                 tg3_ape_write32(tp, regbase + 4 * i, bit);
695         }
696
697 }
698
699 static int tg3_ape_lock(struct tg3 *tp, int locknum)
700 {
701         int i, off;
702         int ret = 0;
703         u32 status, req, gnt, bit;
704
705         if (!tg3_flag(tp, ENABLE_APE))
706                 return 0;
707
708         switch (locknum) {
709         case TG3_APE_LOCK_GPIO:
710                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
711                         return 0;
712         case TG3_APE_LOCK_GRC:
713         case TG3_APE_LOCK_MEM:
714                 if (!tp->pci_fn)
715                         bit = APE_LOCK_REQ_DRIVER;
716                 else
717                         bit = 1 << tp->pci_fn;
718                 break;
719         case TG3_APE_LOCK_PHY0:
720         case TG3_APE_LOCK_PHY1:
721         case TG3_APE_LOCK_PHY2:
722         case TG3_APE_LOCK_PHY3:
723                 bit = APE_LOCK_REQ_DRIVER;
724                 break;
725         default:
726                 return -EINVAL;
727         }
728
729         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
730                 req = TG3_APE_LOCK_REQ;
731                 gnt = TG3_APE_LOCK_GRANT;
732         } else {
733                 req = TG3_APE_PER_LOCK_REQ;
734                 gnt = TG3_APE_PER_LOCK_GRANT;
735         }
736
737         off = 4 * locknum;
738
739         tg3_ape_write32(tp, req + off, bit);
740
741         /* Wait for up to 1 millisecond to acquire lock. */
742         for (i = 0; i < 100; i++) {
743                 status = tg3_ape_read32(tp, gnt + off);
744                 if (status == bit)
745                         break;
746                 udelay(10);
747         }
748
749         if (status != bit) {
750                 /* Revoke the lock request. */
751                 tg3_ape_write32(tp, gnt + off, bit);
752                 ret = -EBUSY;
753         }
754
755         return ret;
756 }
757
758 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
759 {
760         u32 gnt, bit;
761
762         if (!tg3_flag(tp, ENABLE_APE))
763                 return;
764
765         switch (locknum) {
766         case TG3_APE_LOCK_GPIO:
767                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
768                         return;
769         case TG3_APE_LOCK_GRC:
770         case TG3_APE_LOCK_MEM:
771                 if (!tp->pci_fn)
772                         bit = APE_LOCK_GRANT_DRIVER;
773                 else
774                         bit = 1 << tp->pci_fn;
775                 break;
776         case TG3_APE_LOCK_PHY0:
777         case TG3_APE_LOCK_PHY1:
778         case TG3_APE_LOCK_PHY2:
779         case TG3_APE_LOCK_PHY3:
780                 bit = APE_LOCK_GRANT_DRIVER;
781                 break;
782         default:
783                 return;
784         }
785
786         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
787                 gnt = TG3_APE_LOCK_GRANT;
788         else
789                 gnt = TG3_APE_PER_LOCK_GRANT;
790
791         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
792 }
793
794 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
795 {
796         u32 apedata;
797
798         while (timeout_us) {
799                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
800                         return -EBUSY;
801
802                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
803                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
804                         break;
805
806                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
807
808                 udelay(10);
809                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
810         }
811
812         return timeout_us ? 0 : -EBUSY;
813 }
814
815 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
816 {
817         u32 i, apedata;
818
819         for (i = 0; i < timeout_us / 10; i++) {
820                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
821
822                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
823                         break;
824
825                 udelay(10);
826         }
827
828         return i == timeout_us / 10;
829 }
830
831 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
832                                    u32 len)
833 {
834         int err;
835         u32 i, bufoff, msgoff, maxlen, apedata;
836
837         if (!tg3_flag(tp, APE_HAS_NCSI))
838                 return 0;
839
840         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
841         if (apedata != APE_SEG_SIG_MAGIC)
842                 return -ENODEV;
843
844         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
845         if (!(apedata & APE_FW_STATUS_READY))
846                 return -EAGAIN;
847
848         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
849                  TG3_APE_SHMEM_BASE;
850         msgoff = bufoff + 2 * sizeof(u32);
851         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
852
853         while (len) {
854                 u32 length;
855
856                 /* Cap xfer sizes to scratchpad limits. */
857                 length = (len > maxlen) ? maxlen : len;
858                 len -= length;
859
860                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
861                 if (!(apedata & APE_FW_STATUS_READY))
862                         return -EAGAIN;
863
864                 /* Wait for up to 1 msec for APE to service previous event. */
865                 err = tg3_ape_event_lock(tp, 1000);
866                 if (err)
867                         return err;
868
869                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
870                           APE_EVENT_STATUS_SCRTCHPD_READ |
871                           APE_EVENT_STATUS_EVENT_PENDING;
872                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
873
874                 tg3_ape_write32(tp, bufoff, base_off);
875                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
876
877                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
878                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
879
880                 base_off += length;
881
882                 if (tg3_ape_wait_for_event(tp, 30000))
883                         return -EAGAIN;
884
885                 for (i = 0; length; i += 4, length -= 4) {
886                         u32 val = tg3_ape_read32(tp, msgoff + i);
887                         memcpy(data, &val, sizeof(u32));
888                         data++;
889                 }
890         }
891
892         return 0;
893 }
894
895 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
896 {
897         int err;
898         u32 apedata;
899
900         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
901         if (apedata != APE_SEG_SIG_MAGIC)
902                 return -EAGAIN;
903
904         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
905         if (!(apedata & APE_FW_STATUS_READY))
906                 return -EAGAIN;
907
908         /* Wait for up to 1 millisecond for APE to service previous event. */
909         err = tg3_ape_event_lock(tp, 1000);
910         if (err)
911                 return err;
912
913         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
914                         event | APE_EVENT_STATUS_EVENT_PENDING);
915
916         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
917         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
918
919         return 0;
920 }
921
922 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
923 {
924         u32 event;
925         u32 apedata;
926
927         if (!tg3_flag(tp, ENABLE_APE))
928                 return;
929
930         switch (kind) {
931         case RESET_KIND_INIT:
932                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
933                                 APE_HOST_SEG_SIG_MAGIC);
934                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
935                                 APE_HOST_SEG_LEN_MAGIC);
936                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
937                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
938                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
939                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
940                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
941                                 APE_HOST_BEHAV_NO_PHYLOCK);
942                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
943                                     TG3_APE_HOST_DRVR_STATE_START);
944
945                 event = APE_EVENT_STATUS_STATE_START;
946                 break;
947         case RESET_KIND_SHUTDOWN:
948                 /* With the interface we are currently using,
949                  * APE does not track driver state.  Wiping
950                  * out the HOST SEGMENT SIGNATURE forces
951                  * the APE to assume OS absent status.
952                  */
953                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
954
955                 if (device_may_wakeup(&tp->pdev->dev) &&
956                     tg3_flag(tp, WOL_ENABLE)) {
957                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
958                                             TG3_APE_HOST_WOL_SPEED_AUTO);
959                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
960                 } else
961                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
962
963                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
964
965                 event = APE_EVENT_STATUS_STATE_UNLOAD;
966                 break;
967         case RESET_KIND_SUSPEND:
968                 event = APE_EVENT_STATUS_STATE_SUSPEND;
969                 break;
970         default:
971                 return;
972         }
973
974         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
975
976         tg3_ape_send_event(tp, event);
977 }
978
979 static void tg3_disable_ints(struct tg3 *tp)
980 {
981         int i;
982
983         tw32(TG3PCI_MISC_HOST_CTRL,
984              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
985         for (i = 0; i < tp->irq_max; i++)
986                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
987 }
988
989 static void tg3_enable_ints(struct tg3 *tp)
990 {
991         int i;
992
993         tp->irq_sync = 0;
994         wmb();
995
996         tw32(TG3PCI_MISC_HOST_CTRL,
997              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
998
999         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1000         for (i = 0; i < tp->irq_cnt; i++) {
1001                 struct tg3_napi *tnapi = &tp->napi[i];
1002
1003                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1004                 if (tg3_flag(tp, 1SHOT_MSI))
1005                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1006
1007                 tp->coal_now |= tnapi->coal_now;
1008         }
1009
1010         /* Force an initial interrupt */
1011         if (!tg3_flag(tp, TAGGED_STATUS) &&
1012             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1013                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1014         else
1015                 tw32(HOSTCC_MODE, tp->coal_now);
1016
1017         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1018 }
1019
1020 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1021 {
1022         struct tg3 *tp = tnapi->tp;
1023         struct tg3_hw_status *sblk = tnapi->hw_status;
1024         unsigned int work_exists = 0;
1025
1026         /* check for phy events */
1027         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1028                 if (sblk->status & SD_STATUS_LINK_CHG)
1029                         work_exists = 1;
1030         }
1031
1032         /* check for TX work to do */
1033         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1034                 work_exists = 1;
1035
1036         /* check for RX work to do */
1037         if (tnapi->rx_rcb_prod_idx &&
1038             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1039                 work_exists = 1;
1040
1041         return work_exists;
1042 }
1043
1044 /* tg3_int_reenable
1045  *  similar to tg3_enable_ints, but it accurately determines whether there
1046  *  is new work pending and can return without flushing the PIO write
1047  *  which reenables interrupts
1048  */
1049 static void tg3_int_reenable(struct tg3_napi *tnapi)
1050 {
1051         struct tg3 *tp = tnapi->tp;
1052
1053         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1054         mmiowb();
1055
1056         /* When doing tagged status, this work check is unnecessary.
1057          * The last_tag we write above tells the chip which piece of
1058          * work we've completed.
1059          */
1060         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1061                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1062                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1063 }
1064
1065 static void tg3_switch_clocks(struct tg3 *tp)
1066 {
1067         u32 clock_ctrl;
1068         u32 orig_clock_ctrl;
1069
1070         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1071                 return;
1072
1073         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1074
1075         orig_clock_ctrl = clock_ctrl;
1076         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1077                        CLOCK_CTRL_CLKRUN_OENABLE |
1078                        0x1f);
1079         tp->pci_clock_ctrl = clock_ctrl;
1080
1081         if (tg3_flag(tp, 5705_PLUS)) {
1082                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1083                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1084                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1085                 }
1086         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1087                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1088                             clock_ctrl |
1089                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1090                             40);
1091                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1092                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1093                             40);
1094         }
1095         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1096 }
1097
1098 #define PHY_BUSY_LOOPS  5000
1099
1100 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1101                          u32 *val)
1102 {
1103         u32 frame_val;
1104         unsigned int loops;
1105         int ret;
1106
1107         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1108                 tw32_f(MAC_MI_MODE,
1109                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1110                 udelay(80);
1111         }
1112
1113         tg3_ape_lock(tp, tp->phy_ape_lock);
1114
1115         *val = 0x0;
1116
1117         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1118                       MI_COM_PHY_ADDR_MASK);
1119         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1120                       MI_COM_REG_ADDR_MASK);
1121         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1122
1123         tw32_f(MAC_MI_COM, frame_val);
1124
1125         loops = PHY_BUSY_LOOPS;
1126         while (loops != 0) {
1127                 udelay(10);
1128                 frame_val = tr32(MAC_MI_COM);
1129
1130                 if ((frame_val & MI_COM_BUSY) == 0) {
1131                         udelay(5);
1132                         frame_val = tr32(MAC_MI_COM);
1133                         break;
1134                 }
1135                 loops -= 1;
1136         }
1137
1138         ret = -EBUSY;
1139         if (loops != 0) {
1140                 *val = frame_val & MI_COM_DATA_MASK;
1141                 ret = 0;
1142         }
1143
1144         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1145                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1146                 udelay(80);
1147         }
1148
1149         tg3_ape_unlock(tp, tp->phy_ape_lock);
1150
1151         return ret;
1152 }
1153
1154 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1155 {
1156         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1157 }
1158
1159 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1160                           u32 val)
1161 {
1162         u32 frame_val;
1163         unsigned int loops;
1164         int ret;
1165
1166         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1167             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1168                 return 0;
1169
1170         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1171                 tw32_f(MAC_MI_MODE,
1172                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1173                 udelay(80);
1174         }
1175
1176         tg3_ape_lock(tp, tp->phy_ape_lock);
1177
1178         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1179                       MI_COM_PHY_ADDR_MASK);
1180         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1181                       MI_COM_REG_ADDR_MASK);
1182         frame_val |= (val & MI_COM_DATA_MASK);
1183         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1184
1185         tw32_f(MAC_MI_COM, frame_val);
1186
1187         loops = PHY_BUSY_LOOPS;
1188         while (loops != 0) {
1189                 udelay(10);
1190                 frame_val = tr32(MAC_MI_COM);
1191                 if ((frame_val & MI_COM_BUSY) == 0) {
1192                         udelay(5);
1193                         frame_val = tr32(MAC_MI_COM);
1194                         break;
1195                 }
1196                 loops -= 1;
1197         }
1198
1199         ret = -EBUSY;
1200         if (loops != 0)
1201                 ret = 0;
1202
1203         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1204                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1205                 udelay(80);
1206         }
1207
1208         tg3_ape_unlock(tp, tp->phy_ape_lock);
1209
1210         return ret;
1211 }
1212
1213 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1214 {
1215         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1216 }
1217
1218 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1219 {
1220         int err;
1221
1222         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1223         if (err)
1224                 goto done;
1225
1226         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1227         if (err)
1228                 goto done;
1229
1230         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1231                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1232         if (err)
1233                 goto done;
1234
1235         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1236
1237 done:
1238         return err;
1239 }
1240
1241 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1242 {
1243         int err;
1244
1245         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1246         if (err)
1247                 goto done;
1248
1249         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1250         if (err)
1251                 goto done;
1252
1253         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1254                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1255         if (err)
1256                 goto done;
1257
1258         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1259
1260 done:
1261         return err;
1262 }
1263
1264 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1265 {
1266         int err;
1267
1268         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1269         if (!err)
1270                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1271
1272         return err;
1273 }
1274
1275 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1276 {
1277         int err;
1278
1279         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1280         if (!err)
1281                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1282
1283         return err;
1284 }
1285
1286 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1287 {
1288         int err;
1289
1290         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1291                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1292                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1293         if (!err)
1294                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1295
1296         return err;
1297 }
1298
1299 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1300 {
1301         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1302                 set |= MII_TG3_AUXCTL_MISC_WREN;
1303
1304         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1305 }
1306
1307 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1308 {
1309         u32 val;
1310         int err;
1311
1312         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1313
1314         if (err)
1315                 return err;
1316         if (enable)
1317
1318                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1319         else
1320                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1321
1322         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1323                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1324
1325         return err;
1326 }
1327
1328 static int tg3_bmcr_reset(struct tg3 *tp)
1329 {
1330         u32 phy_control;
1331         int limit, err;
1332
1333         /* OK, reset it, and poll the BMCR_RESET bit until it
1334          * clears or we time out.
1335          */
1336         phy_control = BMCR_RESET;
1337         err = tg3_writephy(tp, MII_BMCR, phy_control);
1338         if (err != 0)
1339                 return -EBUSY;
1340
1341         limit = 5000;
1342         while (limit--) {
1343                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1344                 if (err != 0)
1345                         return -EBUSY;
1346
1347                 if ((phy_control & BMCR_RESET) == 0) {
1348                         udelay(40);
1349                         break;
1350                 }
1351                 udelay(10);
1352         }
1353         if (limit < 0)
1354                 return -EBUSY;
1355
1356         return 0;
1357 }
1358
1359 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1360 {
1361         struct tg3 *tp = bp->priv;
1362         u32 val;
1363
1364         spin_lock_bh(&tp->lock);
1365
1366         if (tg3_readphy(tp, reg, &val))
1367                 val = -EIO;
1368
1369         spin_unlock_bh(&tp->lock);
1370
1371         return val;
1372 }
1373
1374 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1375 {
1376         struct tg3 *tp = bp->priv;
1377         u32 ret = 0;
1378
1379         spin_lock_bh(&tp->lock);
1380
1381         if (tg3_writephy(tp, reg, val))
1382                 ret = -EIO;
1383
1384         spin_unlock_bh(&tp->lock);
1385
1386         return ret;
1387 }
1388
1389 static int tg3_mdio_reset(struct mii_bus *bp)
1390 {
1391         return 0;
1392 }
1393
1394 static void tg3_mdio_config_5785(struct tg3 *tp)
1395 {
1396         u32 val;
1397         struct phy_device *phydev;
1398
1399         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1400         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1401         case PHY_ID_BCM50610:
1402         case PHY_ID_BCM50610M:
1403                 val = MAC_PHYCFG2_50610_LED_MODES;
1404                 break;
1405         case PHY_ID_BCMAC131:
1406                 val = MAC_PHYCFG2_AC131_LED_MODES;
1407                 break;
1408         case PHY_ID_RTL8211C:
1409                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1410                 break;
1411         case PHY_ID_RTL8201E:
1412                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1413                 break;
1414         default:
1415                 return;
1416         }
1417
1418         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1419                 tw32(MAC_PHYCFG2, val);
1420
1421                 val = tr32(MAC_PHYCFG1);
1422                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1423                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1424                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1425                 tw32(MAC_PHYCFG1, val);
1426
1427                 return;
1428         }
1429
1430         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1431                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1432                        MAC_PHYCFG2_FMODE_MASK_MASK |
1433                        MAC_PHYCFG2_GMODE_MASK_MASK |
1434                        MAC_PHYCFG2_ACT_MASK_MASK   |
1435                        MAC_PHYCFG2_QUAL_MASK_MASK |
1436                        MAC_PHYCFG2_INBAND_ENABLE;
1437
1438         tw32(MAC_PHYCFG2, val);
1439
1440         val = tr32(MAC_PHYCFG1);
1441         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1442                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1443         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1444                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1445                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1446                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1447                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1448         }
1449         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1450                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1451         tw32(MAC_PHYCFG1, val);
1452
1453         val = tr32(MAC_EXT_RGMII_MODE);
1454         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1455                  MAC_RGMII_MODE_RX_QUALITY |
1456                  MAC_RGMII_MODE_RX_ACTIVITY |
1457                  MAC_RGMII_MODE_RX_ENG_DET |
1458                  MAC_RGMII_MODE_TX_ENABLE |
1459                  MAC_RGMII_MODE_TX_LOWPWR |
1460                  MAC_RGMII_MODE_TX_RESET);
1461         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1462                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1463                         val |= MAC_RGMII_MODE_RX_INT_B |
1464                                MAC_RGMII_MODE_RX_QUALITY |
1465                                MAC_RGMII_MODE_RX_ACTIVITY |
1466                                MAC_RGMII_MODE_RX_ENG_DET;
1467                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1468                         val |= MAC_RGMII_MODE_TX_ENABLE |
1469                                MAC_RGMII_MODE_TX_LOWPWR |
1470                                MAC_RGMII_MODE_TX_RESET;
1471         }
1472         tw32(MAC_EXT_RGMII_MODE, val);
1473 }
1474
1475 static void tg3_mdio_start(struct tg3 *tp)
1476 {
1477         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1478         tw32_f(MAC_MI_MODE, tp->mi_mode);
1479         udelay(80);
1480
1481         if (tg3_flag(tp, MDIOBUS_INITED) &&
1482             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1483                 tg3_mdio_config_5785(tp);
1484 }
1485
1486 static int tg3_mdio_init(struct tg3 *tp)
1487 {
1488         int i;
1489         u32 reg;
1490         struct phy_device *phydev;
1491
1492         if (tg3_flag(tp, 5717_PLUS)) {
1493                 u32 is_serdes;
1494
1495                 tp->phy_addr = tp->pci_fn + 1;
1496
1497                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1498                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1499                 else
1500                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1501                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1502                 if (is_serdes)
1503                         tp->phy_addr += 7;
1504         } else
1505                 tp->phy_addr = TG3_PHY_MII_ADDR;
1506
1507         tg3_mdio_start(tp);
1508
1509         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1510                 return 0;
1511
1512         tp->mdio_bus = mdiobus_alloc();
1513         if (tp->mdio_bus == NULL)
1514                 return -ENOMEM;
1515
1516         tp->mdio_bus->name     = "tg3 mdio bus";
1517         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1518                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1519         tp->mdio_bus->priv     = tp;
1520         tp->mdio_bus->parent   = &tp->pdev->dev;
1521         tp->mdio_bus->read     = &tg3_mdio_read;
1522         tp->mdio_bus->write    = &tg3_mdio_write;
1523         tp->mdio_bus->reset    = &tg3_mdio_reset;
1524         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1525         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1526
1527         for (i = 0; i < PHY_MAX_ADDR; i++)
1528                 tp->mdio_bus->irq[i] = PHY_POLL;
1529
1530         /* The bus registration will look for all the PHYs on the mdio bus.
1531          * Unfortunately, it does not ensure the PHY is powered up before
1532          * accessing the PHY ID registers.  A chip reset is the
1533          * quickest way to bring the device back to an operational state..
1534          */
1535         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1536                 tg3_bmcr_reset(tp);
1537
1538         i = mdiobus_register(tp->mdio_bus);
1539         if (i) {
1540                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1541                 mdiobus_free(tp->mdio_bus);
1542                 return i;
1543         }
1544
1545         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1546
1547         if (!phydev || !phydev->drv) {
1548                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1549                 mdiobus_unregister(tp->mdio_bus);
1550                 mdiobus_free(tp->mdio_bus);
1551                 return -ENODEV;
1552         }
1553
1554         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1555         case PHY_ID_BCM57780:
1556                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1557                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1558                 break;
1559         case PHY_ID_BCM50610:
1560         case PHY_ID_BCM50610M:
1561                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1562                                      PHY_BRCM_RX_REFCLK_UNUSED |
1563                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1564                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1565                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1566                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1567                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1568                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1569                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1570                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1571                 /* fallthru */
1572         case PHY_ID_RTL8211C:
1573                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1574                 break;
1575         case PHY_ID_RTL8201E:
1576         case PHY_ID_BCMAC131:
1577                 phydev->interface = PHY_INTERFACE_MODE_MII;
1578                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1579                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1580                 break;
1581         }
1582
1583         tg3_flag_set(tp, MDIOBUS_INITED);
1584
1585         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1586                 tg3_mdio_config_5785(tp);
1587
1588         return 0;
1589 }
1590
1591 static void tg3_mdio_fini(struct tg3 *tp)
1592 {
1593         if (tg3_flag(tp, MDIOBUS_INITED)) {
1594                 tg3_flag_clear(tp, MDIOBUS_INITED);
1595                 mdiobus_unregister(tp->mdio_bus);
1596                 mdiobus_free(tp->mdio_bus);
1597         }
1598 }
1599
1600 /* tp->lock is held. */
1601 static inline void tg3_generate_fw_event(struct tg3 *tp)
1602 {
1603         u32 val;
1604
1605         val = tr32(GRC_RX_CPU_EVENT);
1606         val |= GRC_RX_CPU_DRIVER_EVENT;
1607         tw32_f(GRC_RX_CPU_EVENT, val);
1608
1609         tp->last_event_jiffies = jiffies;
1610 }
1611
1612 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1613
1614 /* tp->lock is held. */
1615 static void tg3_wait_for_event_ack(struct tg3 *tp)
1616 {
1617         int i;
1618         unsigned int delay_cnt;
1619         long time_remain;
1620
1621         /* If enough time has passed, no wait is necessary. */
1622         time_remain = (long)(tp->last_event_jiffies + 1 +
1623                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1624                       (long)jiffies;
1625         if (time_remain < 0)
1626                 return;
1627
1628         /* Check if we can shorten the wait time. */
1629         delay_cnt = jiffies_to_usecs(time_remain);
1630         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1631                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1632         delay_cnt = (delay_cnt >> 3) + 1;
1633
1634         for (i = 0; i < delay_cnt; i++) {
1635                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1636                         break;
1637                 udelay(8);
1638         }
1639 }
1640
1641 /* tp->lock is held. */
1642 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1643 {
1644         u32 reg, val;
1645
1646         val = 0;
1647         if (!tg3_readphy(tp, MII_BMCR, &reg))
1648                 val = reg << 16;
1649         if (!tg3_readphy(tp, MII_BMSR, &reg))
1650                 val |= (reg & 0xffff);
1651         *data++ = val;
1652
1653         val = 0;
1654         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1655                 val = reg << 16;
1656         if (!tg3_readphy(tp, MII_LPA, &reg))
1657                 val |= (reg & 0xffff);
1658         *data++ = val;
1659
1660         val = 0;
1661         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1662                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1663                         val = reg << 16;
1664                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1665                         val |= (reg & 0xffff);
1666         }
1667         *data++ = val;
1668
1669         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1670                 val = reg << 16;
1671         else
1672                 val = 0;
1673         *data++ = val;
1674 }
1675
1676 /* tp->lock is held. */
1677 static void tg3_ump_link_report(struct tg3 *tp)
1678 {
1679         u32 data[4];
1680
1681         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1682                 return;
1683
1684         tg3_phy_gather_ump_data(tp, data);
1685
1686         tg3_wait_for_event_ack(tp);
1687
1688         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1689         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1690         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1691         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1692         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1693         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1694
1695         tg3_generate_fw_event(tp);
1696 }
1697
1698 /* tp->lock is held. */
1699 static void tg3_stop_fw(struct tg3 *tp)
1700 {
1701         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1702                 /* Wait for RX cpu to ACK the previous event. */
1703                 tg3_wait_for_event_ack(tp);
1704
1705                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1706
1707                 tg3_generate_fw_event(tp);
1708
1709                 /* Wait for RX cpu to ACK this event. */
1710                 tg3_wait_for_event_ack(tp);
1711         }
1712 }
1713
1714 /* tp->lock is held. */
1715 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1716 {
1717         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1718                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1719
1720         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1721                 switch (kind) {
1722                 case RESET_KIND_INIT:
1723                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1724                                       DRV_STATE_START);
1725                         break;
1726
1727                 case RESET_KIND_SHUTDOWN:
1728                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1729                                       DRV_STATE_UNLOAD);
1730                         break;
1731
1732                 case RESET_KIND_SUSPEND:
1733                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1734                                       DRV_STATE_SUSPEND);
1735                         break;
1736
1737                 default:
1738                         break;
1739                 }
1740         }
1741
1742         if (kind == RESET_KIND_INIT ||
1743             kind == RESET_KIND_SUSPEND)
1744                 tg3_ape_driver_state_change(tp, kind);
1745 }
1746
1747 /* tp->lock is held. */
1748 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1749 {
1750         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1751                 switch (kind) {
1752                 case RESET_KIND_INIT:
1753                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1754                                       DRV_STATE_START_DONE);
1755                         break;
1756
1757                 case RESET_KIND_SHUTDOWN:
1758                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1759                                       DRV_STATE_UNLOAD_DONE);
1760                         break;
1761
1762                 default:
1763                         break;
1764                 }
1765         }
1766
1767         if (kind == RESET_KIND_SHUTDOWN)
1768                 tg3_ape_driver_state_change(tp, kind);
1769 }
1770
1771 /* tp->lock is held. */
1772 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1773 {
1774         if (tg3_flag(tp, ENABLE_ASF)) {
1775                 switch (kind) {
1776                 case RESET_KIND_INIT:
1777                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1778                                       DRV_STATE_START);
1779                         break;
1780
1781                 case RESET_KIND_SHUTDOWN:
1782                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1783                                       DRV_STATE_UNLOAD);
1784                         break;
1785
1786                 case RESET_KIND_SUSPEND:
1787                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1788                                       DRV_STATE_SUSPEND);
1789                         break;
1790
1791                 default:
1792                         break;
1793                 }
1794         }
1795 }
1796
1797 static int tg3_poll_fw(struct tg3 *tp)
1798 {
1799         int i;
1800         u32 val;
1801
1802         if (tg3_flag(tp, IS_SSB_CORE)) {
1803                 /* We don't use firmware. */
1804                 return 0;
1805         }
1806
1807         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1808                 /* Wait up to 20ms for init done. */
1809                 for (i = 0; i < 200; i++) {
1810                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1811                                 return 0;
1812                         udelay(100);
1813                 }
1814                 return -ENODEV;
1815         }
1816
1817         /* Wait for firmware initialization to complete. */
1818         for (i = 0; i < 100000; i++) {
1819                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1820                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1821                         break;
1822                 udelay(10);
1823         }
1824
1825         /* Chip might not be fitted with firmware.  Some Sun onboard
1826          * parts are configured like that.  So don't signal the timeout
1827          * of the above loop as an error, but do report the lack of
1828          * running firmware once.
1829          */
1830         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1831                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1832
1833                 netdev_info(tp->dev, "No firmware running\n");
1834         }
1835
1836         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1837                 /* The 57765 A0 needs a little more
1838                  * time to do some important work.
1839                  */
1840                 mdelay(10);
1841         }
1842
1843         return 0;
1844 }
1845
1846 static void tg3_link_report(struct tg3 *tp)
1847 {
1848         if (!netif_carrier_ok(tp->dev)) {
1849                 netif_info(tp, link, tp->dev, "Link is down\n");
1850                 tg3_ump_link_report(tp);
1851         } else if (netif_msg_link(tp)) {
1852                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1853                             (tp->link_config.active_speed == SPEED_1000 ?
1854                              1000 :
1855                              (tp->link_config.active_speed == SPEED_100 ?
1856                               100 : 10)),
1857                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1858                              "full" : "half"));
1859
1860                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1861                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1862                             "on" : "off",
1863                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1864                             "on" : "off");
1865
1866                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1867                         netdev_info(tp->dev, "EEE is %s\n",
1868                                     tp->setlpicnt ? "enabled" : "disabled");
1869
1870                 tg3_ump_link_report(tp);
1871         }
1872 }
1873
1874 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1875 {
1876         u16 miireg;
1877
1878         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1879                 miireg = ADVERTISE_1000XPAUSE;
1880         else if (flow_ctrl & FLOW_CTRL_TX)
1881                 miireg = ADVERTISE_1000XPSE_ASYM;
1882         else if (flow_ctrl & FLOW_CTRL_RX)
1883                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1884         else
1885                 miireg = 0;
1886
1887         return miireg;
1888 }
1889
1890 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1891 {
1892         u8 cap = 0;
1893
1894         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1895                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1896         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1897                 if (lcladv & ADVERTISE_1000XPAUSE)
1898                         cap = FLOW_CTRL_RX;
1899                 if (rmtadv & ADVERTISE_1000XPAUSE)
1900                         cap = FLOW_CTRL_TX;
1901         }
1902
1903         return cap;
1904 }
1905
1906 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1907 {
1908         u8 autoneg;
1909         u8 flowctrl = 0;
1910         u32 old_rx_mode = tp->rx_mode;
1911         u32 old_tx_mode = tp->tx_mode;
1912
1913         if (tg3_flag(tp, USE_PHYLIB))
1914                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1915         else
1916                 autoneg = tp->link_config.autoneg;
1917
1918         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1919                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1920                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1921                 else
1922                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1923         } else
1924                 flowctrl = tp->link_config.flowctrl;
1925
1926         tp->link_config.active_flowctrl = flowctrl;
1927
1928         if (flowctrl & FLOW_CTRL_RX)
1929                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1930         else
1931                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1932
1933         if (old_rx_mode != tp->rx_mode)
1934                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1935
1936         if (flowctrl & FLOW_CTRL_TX)
1937                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1938         else
1939                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1940
1941         if (old_tx_mode != tp->tx_mode)
1942                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1943 }
1944
1945 static void tg3_adjust_link(struct net_device *dev)
1946 {
1947         u8 oldflowctrl, linkmesg = 0;
1948         u32 mac_mode, lcl_adv, rmt_adv;
1949         struct tg3 *tp = netdev_priv(dev);
1950         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1951
1952         spin_lock_bh(&tp->lock);
1953
1954         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1955                                     MAC_MODE_HALF_DUPLEX);
1956
1957         oldflowctrl = tp->link_config.active_flowctrl;
1958
1959         if (phydev->link) {
1960                 lcl_adv = 0;
1961                 rmt_adv = 0;
1962
1963                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1964                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1965                 else if (phydev->speed == SPEED_1000 ||
1966                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1967                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1968                 else
1969                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1970
1971                 if (phydev->duplex == DUPLEX_HALF)
1972                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1973                 else {
1974                         lcl_adv = mii_advertise_flowctrl(
1975                                   tp->link_config.flowctrl);
1976
1977                         if (phydev->pause)
1978                                 rmt_adv = LPA_PAUSE_CAP;
1979                         if (phydev->asym_pause)
1980                                 rmt_adv |= LPA_PAUSE_ASYM;
1981                 }
1982
1983                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1984         } else
1985                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1986
1987         if (mac_mode != tp->mac_mode) {
1988                 tp->mac_mode = mac_mode;
1989                 tw32_f(MAC_MODE, tp->mac_mode);
1990                 udelay(40);
1991         }
1992
1993         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1994                 if (phydev->speed == SPEED_10)
1995                         tw32(MAC_MI_STAT,
1996                              MAC_MI_STAT_10MBPS_MODE |
1997                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1998                 else
1999                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2000         }
2001
2002         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2003                 tw32(MAC_TX_LENGTHS,
2004                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2005                       (6 << TX_LENGTHS_IPG_SHIFT) |
2006                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2007         else
2008                 tw32(MAC_TX_LENGTHS,
2009                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2010                       (6 << TX_LENGTHS_IPG_SHIFT) |
2011                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2012
2013         if (phydev->link != tp->old_link ||
2014             phydev->speed != tp->link_config.active_speed ||
2015             phydev->duplex != tp->link_config.active_duplex ||
2016             oldflowctrl != tp->link_config.active_flowctrl)
2017                 linkmesg = 1;
2018
2019         tp->old_link = phydev->link;
2020         tp->link_config.active_speed = phydev->speed;
2021         tp->link_config.active_duplex = phydev->duplex;
2022
2023         spin_unlock_bh(&tp->lock);
2024
2025         if (linkmesg)
2026                 tg3_link_report(tp);
2027 }
2028
2029 static int tg3_phy_init(struct tg3 *tp)
2030 {
2031         struct phy_device *phydev;
2032
2033         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2034                 return 0;
2035
2036         /* Bring the PHY back to a known state. */
2037         tg3_bmcr_reset(tp);
2038
2039         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2040
2041         /* Attach the MAC to the PHY. */
2042         phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2043                              tg3_adjust_link, phydev->interface);
2044         if (IS_ERR(phydev)) {
2045                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2046                 return PTR_ERR(phydev);
2047         }
2048
2049         /* Mask with MAC supported features. */
2050         switch (phydev->interface) {
2051         case PHY_INTERFACE_MODE_GMII:
2052         case PHY_INTERFACE_MODE_RGMII:
2053                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2054                         phydev->supported &= (PHY_GBIT_FEATURES |
2055                                               SUPPORTED_Pause |
2056                                               SUPPORTED_Asym_Pause);
2057                         break;
2058                 }
2059                 /* fallthru */
2060         case PHY_INTERFACE_MODE_MII:
2061                 phydev->supported &= (PHY_BASIC_FEATURES |
2062                                       SUPPORTED_Pause |
2063                                       SUPPORTED_Asym_Pause);
2064                 break;
2065         default:
2066                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2067                 return -EINVAL;
2068         }
2069
2070         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2071
2072         phydev->advertising = phydev->supported;
2073
2074         return 0;
2075 }
2076
2077 static void tg3_phy_start(struct tg3 *tp)
2078 {
2079         struct phy_device *phydev;
2080
2081         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2082                 return;
2083
2084         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2085
2086         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2087                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2088                 phydev->speed = tp->link_config.speed;
2089                 phydev->duplex = tp->link_config.duplex;
2090                 phydev->autoneg = tp->link_config.autoneg;
2091                 phydev->advertising = tp->link_config.advertising;
2092         }
2093
2094         phy_start(phydev);
2095
2096         phy_start_aneg(phydev);
2097 }
2098
2099 static void tg3_phy_stop(struct tg3 *tp)
2100 {
2101         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2102                 return;
2103
2104         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2105 }
2106
2107 static void tg3_phy_fini(struct tg3 *tp)
2108 {
2109         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2110                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2111                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2112         }
2113 }
2114
2115 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2116 {
2117         int err;
2118         u32 val;
2119
2120         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2121                 return 0;
2122
2123         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2124                 /* Cannot do read-modify-write on 5401 */
2125                 err = tg3_phy_auxctl_write(tp,
2126                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2127                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2128                                            0x4c20);
2129                 goto done;
2130         }
2131
2132         err = tg3_phy_auxctl_read(tp,
2133                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2134         if (err)
2135                 return err;
2136
2137         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2138         err = tg3_phy_auxctl_write(tp,
2139                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2140
2141 done:
2142         return err;
2143 }
2144
2145 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2146 {
2147         u32 phytest;
2148
2149         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2150                 u32 phy;
2151
2152                 tg3_writephy(tp, MII_TG3_FET_TEST,
2153                              phytest | MII_TG3_FET_SHADOW_EN);
2154                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2155                         if (enable)
2156                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2157                         else
2158                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2159                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2160                 }
2161                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2162         }
2163 }
2164
2165 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2166 {
2167         u32 reg;
2168
2169         if (!tg3_flag(tp, 5705_PLUS) ||
2170             (tg3_flag(tp, 5717_PLUS) &&
2171              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2172                 return;
2173
2174         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2175                 tg3_phy_fet_toggle_apd(tp, enable);
2176                 return;
2177         }
2178
2179         reg = MII_TG3_MISC_SHDW_WREN |
2180               MII_TG3_MISC_SHDW_SCR5_SEL |
2181               MII_TG3_MISC_SHDW_SCR5_LPED |
2182               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2183               MII_TG3_MISC_SHDW_SCR5_SDTL |
2184               MII_TG3_MISC_SHDW_SCR5_C125OE;
2185         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2186                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2187
2188         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2189
2190
2191         reg = MII_TG3_MISC_SHDW_WREN |
2192               MII_TG3_MISC_SHDW_APD_SEL |
2193               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2194         if (enable)
2195                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2196
2197         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2198 }
2199
2200 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2201 {
2202         u32 phy;
2203
2204         if (!tg3_flag(tp, 5705_PLUS) ||
2205             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2206                 return;
2207
2208         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2209                 u32 ephy;
2210
2211                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2212                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2213
2214                         tg3_writephy(tp, MII_TG3_FET_TEST,
2215                                      ephy | MII_TG3_FET_SHADOW_EN);
2216                         if (!tg3_readphy(tp, reg, &phy)) {
2217                                 if (enable)
2218                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2219                                 else
2220                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2221                                 tg3_writephy(tp, reg, phy);
2222                         }
2223                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2224                 }
2225         } else {
2226                 int ret;
2227
2228                 ret = tg3_phy_auxctl_read(tp,
2229                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2230                 if (!ret) {
2231                         if (enable)
2232                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2233                         else
2234                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2235                         tg3_phy_auxctl_write(tp,
2236                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2237                 }
2238         }
2239 }
2240
2241 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2242 {
2243         int ret;
2244         u32 val;
2245
2246         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2247                 return;
2248
2249         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2250         if (!ret)
2251                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2252                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2253 }
2254
2255 static void tg3_phy_apply_otp(struct tg3 *tp)
2256 {
2257         u32 otp, phy;
2258
2259         if (!tp->phy_otp)
2260                 return;
2261
2262         otp = tp->phy_otp;
2263
2264         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2265                 return;
2266
2267         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2268         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2269         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2270
2271         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2272               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2273         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2274
2275         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2276         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2277         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2278
2279         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2280         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2281
2282         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2283         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2284
2285         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2286               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2287         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2288
2289         tg3_phy_toggle_auxctl_smdsp(tp, false);
2290 }
2291
2292 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2293 {
2294         u32 val;
2295
2296         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2297                 return;
2298
2299         tp->setlpicnt = 0;
2300
2301         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2302             current_link_up == 1 &&
2303             tp->link_config.active_duplex == DUPLEX_FULL &&
2304             (tp->link_config.active_speed == SPEED_100 ||
2305              tp->link_config.active_speed == SPEED_1000)) {
2306                 u32 eeectl;
2307
2308                 if (tp->link_config.active_speed == SPEED_1000)
2309                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2310                 else
2311                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2312
2313                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2314
2315                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2316                                   TG3_CL45_D7_EEERES_STAT, &val);
2317
2318                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2319                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2320                         tp->setlpicnt = 2;
2321         }
2322
2323         if (!tp->setlpicnt) {
2324                 if (current_link_up == 1 &&
2325                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2326                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2327                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2328                 }
2329
2330                 val = tr32(TG3_CPMU_EEE_MODE);
2331                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2332         }
2333 }
2334
2335 static void tg3_phy_eee_enable(struct tg3 *tp)
2336 {
2337         u32 val;
2338
2339         if (tp->link_config.active_speed == SPEED_1000 &&
2340             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2341              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2342              tg3_flag(tp, 57765_CLASS)) &&
2343             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2344                 val = MII_TG3_DSP_TAP26_ALNOKO |
2345                       MII_TG3_DSP_TAP26_RMRXSTO;
2346                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2347                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2348         }
2349
2350         val = tr32(TG3_CPMU_EEE_MODE);
2351         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2352 }
2353
2354 static int tg3_wait_macro_done(struct tg3 *tp)
2355 {
2356         int limit = 100;
2357
2358         while (limit--) {
2359                 u32 tmp32;
2360
2361                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2362                         if ((tmp32 & 0x1000) == 0)
2363                                 break;
2364                 }
2365         }
2366         if (limit < 0)
2367                 return -EBUSY;
2368
2369         return 0;
2370 }
2371
2372 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2373 {
2374         static const u32 test_pat[4][6] = {
2375         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2376         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2377         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2378         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2379         };
2380         int chan;
2381
2382         for (chan = 0; chan < 4; chan++) {
2383                 int i;
2384
2385                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2386                              (chan * 0x2000) | 0x0200);
2387                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2388
2389                 for (i = 0; i < 6; i++)
2390                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2391                                      test_pat[chan][i]);
2392
2393                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2394                 if (tg3_wait_macro_done(tp)) {
2395                         *resetp = 1;
2396                         return -EBUSY;
2397                 }
2398
2399                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2400                              (chan * 0x2000) | 0x0200);
2401                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2402                 if (tg3_wait_macro_done(tp)) {
2403                         *resetp = 1;
2404                         return -EBUSY;
2405                 }
2406
2407                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2408                 if (tg3_wait_macro_done(tp)) {
2409                         *resetp = 1;
2410                         return -EBUSY;
2411                 }
2412
2413                 for (i = 0; i < 6; i += 2) {
2414                         u32 low, high;
2415
2416                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2417                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2418                             tg3_wait_macro_done(tp)) {
2419                                 *resetp = 1;
2420                                 return -EBUSY;
2421                         }
2422                         low &= 0x7fff;
2423                         high &= 0x000f;
2424                         if (low != test_pat[chan][i] ||
2425                             high != test_pat[chan][i+1]) {
2426                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2427                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2428                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2429
2430                                 return -EBUSY;
2431                         }
2432                 }
2433         }
2434
2435         return 0;
2436 }
2437
2438 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2439 {
2440         int chan;
2441
2442         for (chan = 0; chan < 4; chan++) {
2443                 int i;
2444
2445                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2446                              (chan * 0x2000) | 0x0200);
2447                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2448                 for (i = 0; i < 6; i++)
2449                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2450                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2451                 if (tg3_wait_macro_done(tp))
2452                         return -EBUSY;
2453         }
2454
2455         return 0;
2456 }
2457
2458 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2459 {
2460         u32 reg32, phy9_orig;
2461         int retries, do_phy_reset, err;
2462
2463         retries = 10;
2464         do_phy_reset = 1;
2465         do {
2466                 if (do_phy_reset) {
2467                         err = tg3_bmcr_reset(tp);
2468                         if (err)
2469                                 return err;
2470                         do_phy_reset = 0;
2471                 }
2472
2473                 /* Disable transmitter and interrupt.  */
2474                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2475                         continue;
2476
2477                 reg32 |= 0x3000;
2478                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2479
2480                 /* Set full-duplex, 1000 mbps.  */
2481                 tg3_writephy(tp, MII_BMCR,
2482                              BMCR_FULLDPLX | BMCR_SPEED1000);
2483
2484                 /* Set to master mode.  */
2485                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2486                         continue;
2487
2488                 tg3_writephy(tp, MII_CTRL1000,
2489                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2490
2491                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2492                 if (err)
2493                         return err;
2494
2495                 /* Block the PHY control access.  */
2496                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2497
2498                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2499                 if (!err)
2500                         break;
2501         } while (--retries);
2502
2503         err = tg3_phy_reset_chanpat(tp);
2504         if (err)
2505                 return err;
2506
2507         tg3_phydsp_write(tp, 0x8005, 0x0000);
2508
2509         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2510         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2511
2512         tg3_phy_toggle_auxctl_smdsp(tp, false);
2513
2514         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2515
2516         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2517                 reg32 &= ~0x3000;
2518                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2519         } else if (!err)
2520                 err = -EBUSY;
2521
2522         return err;
2523 }
2524
2525 static void tg3_carrier_on(struct tg3 *tp)
2526 {
2527         netif_carrier_on(tp->dev);
2528         tp->link_up = true;
2529 }
2530
2531 static void tg3_carrier_off(struct tg3 *tp)
2532 {
2533         netif_carrier_off(tp->dev);
2534         tp->link_up = false;
2535 }
2536
2537 /* This will reset the tigon3 PHY if there is no valid
2538  * link unless the FORCE argument is non-zero.
2539  */
2540 static int tg3_phy_reset(struct tg3 *tp)
2541 {
2542         u32 val, cpmuctrl;
2543         int err;
2544
2545         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2546                 val = tr32(GRC_MISC_CFG);
2547                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2548                 udelay(40);
2549         }
2550         err  = tg3_readphy(tp, MII_BMSR, &val);
2551         err |= tg3_readphy(tp, MII_BMSR, &val);
2552         if (err != 0)
2553                 return -EBUSY;
2554
2555         if (netif_running(tp->dev) && tp->link_up) {
2556                 tg3_carrier_off(tp);
2557                 tg3_link_report(tp);
2558         }
2559
2560         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2561             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2562             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2563                 err = tg3_phy_reset_5703_4_5(tp);
2564                 if (err)
2565                         return err;
2566                 goto out;
2567         }
2568
2569         cpmuctrl = 0;
2570         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2571             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2572                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2573                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2574                         tw32(TG3_CPMU_CTRL,
2575                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2576         }
2577
2578         err = tg3_bmcr_reset(tp);
2579         if (err)
2580                 return err;
2581
2582         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2583                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2584                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2585
2586                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2587         }
2588
2589         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2590             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2591                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2592                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2593                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2594                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2595                         udelay(40);
2596                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2597                 }
2598         }
2599
2600         if (tg3_flag(tp, 5717_PLUS) &&
2601             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2602                 return 0;
2603
2604         tg3_phy_apply_otp(tp);
2605
2606         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2607                 tg3_phy_toggle_apd(tp, true);
2608         else
2609                 tg3_phy_toggle_apd(tp, false);
2610
2611 out:
2612         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2613             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2614                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2615                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2616                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2617         }
2618
2619         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2620                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2621                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2622         }
2623
2624         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2625                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2626                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2627                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2628                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2629                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2630                 }
2631         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2632                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2633                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2634                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2635                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2636                                 tg3_writephy(tp, MII_TG3_TEST1,
2637                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2638                         } else
2639                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2640
2641                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2642                 }
2643         }
2644
2645         /* Set Extended packet length bit (bit 14) on all chips that */
2646         /* support jumbo frames */
2647         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2648                 /* Cannot do read-modify-write on 5401 */
2649                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2650         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2651                 /* Set bit 14 with read-modify-write to preserve other bits */
2652                 err = tg3_phy_auxctl_read(tp,
2653                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2654                 if (!err)
2655                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2656                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2657         }
2658
2659         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2660          * jumbo frames transmission.
2661          */
2662         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2663                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2664                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2665                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2666         }
2667
2668         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2669                 /* adjust output voltage */
2670                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2671         }
2672
2673         if (tp->pci_chip_rev_id == CHIPREV_ID_5762_A0)
2674                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2675
2676         tg3_phy_toggle_automdix(tp, 1);
2677         tg3_phy_set_wirespeed(tp);
2678         return 0;
2679 }
2680
2681 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2682 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2683 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2684                                           TG3_GPIO_MSG_NEED_VAUX)
2685 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2686         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2687          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2688          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2689          (TG3_GPIO_MSG_DRVR_PRES << 12))
2690
2691 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2692         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2693          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2694          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2695          (TG3_GPIO_MSG_NEED_VAUX << 12))
2696
2697 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2698 {
2699         u32 status, shift;
2700
2701         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2702             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2703                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2704         else
2705                 status = tr32(TG3_CPMU_DRV_STATUS);
2706
2707         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2708         status &= ~(TG3_GPIO_MSG_MASK << shift);
2709         status |= (newstat << shift);
2710
2711         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2712             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2713                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2714         else
2715                 tw32(TG3_CPMU_DRV_STATUS, status);
2716
2717         return status >> TG3_APE_GPIO_MSG_SHIFT;
2718 }
2719
2720 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2721 {
2722         if (!tg3_flag(tp, IS_NIC))
2723                 return 0;
2724
2725         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2726             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2727             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2728                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2729                         return -EIO;
2730
2731                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2732
2733                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2734                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2735
2736                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2737         } else {
2738                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2739                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2740         }
2741
2742         return 0;
2743 }
2744
2745 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2746 {
2747         u32 grc_local_ctrl;
2748
2749         if (!tg3_flag(tp, IS_NIC) ||
2750             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2751             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2752                 return;
2753
2754         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2755
2756         tw32_wait_f(GRC_LOCAL_CTRL,
2757                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2758                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2759
2760         tw32_wait_f(GRC_LOCAL_CTRL,
2761                     grc_local_ctrl,
2762                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2763
2764         tw32_wait_f(GRC_LOCAL_CTRL,
2765                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2766                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2767 }
2768
2769 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2770 {
2771         if (!tg3_flag(tp, IS_NIC))
2772                 return;
2773
2774         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2775             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2776                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2777                             (GRC_LCLCTRL_GPIO_OE0 |
2778                              GRC_LCLCTRL_GPIO_OE1 |
2779                              GRC_LCLCTRL_GPIO_OE2 |
2780                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2781                              GRC_LCLCTRL_GPIO_OUTPUT1),
2782                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2783         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2784                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2785                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2786                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2787                                      GRC_LCLCTRL_GPIO_OE1 |
2788                                      GRC_LCLCTRL_GPIO_OE2 |
2789                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2790                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2791                                      tp->grc_local_ctrl;
2792                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2793                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2794
2795                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2796                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2797                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2798
2799                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2800                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2801                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2802         } else {
2803                 u32 no_gpio2;
2804                 u32 grc_local_ctrl = 0;
2805
2806                 /* Workaround to prevent overdrawing Amps. */
2807                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2808                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2809                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2810                                     grc_local_ctrl,
2811                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2812                 }
2813
2814                 /* On 5753 and variants, GPIO2 cannot be used. */
2815                 no_gpio2 = tp->nic_sram_data_cfg &
2816                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2817
2818                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2819                                   GRC_LCLCTRL_GPIO_OE1 |
2820                                   GRC_LCLCTRL_GPIO_OE2 |
2821                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2822                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2823                 if (no_gpio2) {
2824                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2825                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2826                 }
2827                 tw32_wait_f(GRC_LOCAL_CTRL,
2828                             tp->grc_local_ctrl | grc_local_ctrl,
2829                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2830
2831                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2832
2833                 tw32_wait_f(GRC_LOCAL_CTRL,
2834                             tp->grc_local_ctrl | grc_local_ctrl,
2835                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2836
2837                 if (!no_gpio2) {
2838                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2839                         tw32_wait_f(GRC_LOCAL_CTRL,
2840                                     tp->grc_local_ctrl | grc_local_ctrl,
2841                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2842                 }
2843         }
2844 }
2845
2846 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2847 {
2848         u32 msg = 0;
2849
2850         /* Serialize power state transitions */
2851         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2852                 return;
2853
2854         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2855                 msg = TG3_GPIO_MSG_NEED_VAUX;
2856
2857         msg = tg3_set_function_status(tp, msg);
2858
2859         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2860                 goto done;
2861
2862         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2863                 tg3_pwrsrc_switch_to_vaux(tp);
2864         else
2865                 tg3_pwrsrc_die_with_vmain(tp);
2866
2867 done:
2868         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2869 }
2870
2871 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2872 {
2873         bool need_vaux = false;
2874
2875         /* The GPIOs do something completely different on 57765. */
2876         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2877                 return;
2878
2879         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2880             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2881             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2882                 tg3_frob_aux_power_5717(tp, include_wol ?
2883                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2884                 return;
2885         }
2886
2887         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2888                 struct net_device *dev_peer;
2889
2890                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2891
2892                 /* remove_one() may have been run on the peer. */
2893                 if (dev_peer) {
2894                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2895
2896                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2897                                 return;
2898
2899                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2900                             tg3_flag(tp_peer, ENABLE_ASF))
2901                                 need_vaux = true;
2902                 }
2903         }
2904
2905         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2906             tg3_flag(tp, ENABLE_ASF))
2907                 need_vaux = true;
2908
2909         if (need_vaux)
2910                 tg3_pwrsrc_switch_to_vaux(tp);
2911         else
2912                 tg3_pwrsrc_die_with_vmain(tp);
2913 }
2914
2915 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2916 {
2917         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2918                 return 1;
2919         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2920                 if (speed != SPEED_10)
2921                         return 1;
2922         } else if (speed == SPEED_10)
2923                 return 1;
2924
2925         return 0;
2926 }
2927
2928 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2929 {
2930         u32 val;
2931
2932         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2933                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2934                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2935                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2936
2937                         sg_dig_ctrl |=
2938                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2939                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2940                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2941                 }
2942                 return;
2943         }
2944
2945         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2946                 tg3_bmcr_reset(tp);
2947                 val = tr32(GRC_MISC_CFG);
2948                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2949                 udelay(40);
2950                 return;
2951         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2952                 u32 phytest;
2953                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2954                         u32 phy;
2955
2956                         tg3_writephy(tp, MII_ADVERTISE, 0);
2957                         tg3_writephy(tp, MII_BMCR,
2958                                      BMCR_ANENABLE | BMCR_ANRESTART);
2959
2960                         tg3_writephy(tp, MII_TG3_FET_TEST,
2961                                      phytest | MII_TG3_FET_SHADOW_EN);
2962                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2963                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2964                                 tg3_writephy(tp,
2965                                              MII_TG3_FET_SHDW_AUXMODE4,
2966                                              phy);
2967                         }
2968                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2969                 }
2970                 return;
2971         } else if (do_low_power) {
2972                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2973                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2974
2975                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2976                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2977                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2978                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2979         }
2980
2981         /* The PHY should not be powered down on some chips because
2982          * of bugs.
2983          */
2984         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2985             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2986             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2987              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2988             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2989              !tp->pci_fn))
2990                 return;
2991
2992         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2993             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2994                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2995                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2996                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2997                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2998         }
2999
3000         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3001 }
3002
3003 /* tp->lock is held. */
3004 static int tg3_nvram_lock(struct tg3 *tp)
3005 {
3006         if (tg3_flag(tp, NVRAM)) {
3007                 int i;
3008
3009                 if (tp->nvram_lock_cnt == 0) {
3010                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3011                         for (i = 0; i < 8000; i++) {
3012                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3013                                         break;
3014                                 udelay(20);
3015                         }
3016                         if (i == 8000) {
3017                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3018                                 return -ENODEV;
3019                         }
3020                 }
3021                 tp->nvram_lock_cnt++;
3022         }
3023         return 0;
3024 }
3025
3026 /* tp->lock is held. */
3027 static void tg3_nvram_unlock(struct tg3 *tp)
3028 {
3029         if (tg3_flag(tp, NVRAM)) {
3030                 if (tp->nvram_lock_cnt > 0)
3031                         tp->nvram_lock_cnt--;
3032                 if (tp->nvram_lock_cnt == 0)
3033                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3034         }
3035 }
3036
3037 /* tp->lock is held. */
3038 static void tg3_enable_nvram_access(struct tg3 *tp)
3039 {
3040         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3041                 u32 nvaccess = tr32(NVRAM_ACCESS);
3042
3043                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3044         }
3045 }
3046
3047 /* tp->lock is held. */
3048 static void tg3_disable_nvram_access(struct tg3 *tp)
3049 {
3050         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3051                 u32 nvaccess = tr32(NVRAM_ACCESS);
3052
3053                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3054         }
3055 }
3056
3057 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3058                                         u32 offset, u32 *val)
3059 {
3060         u32 tmp;
3061         int i;
3062
3063         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3064                 return -EINVAL;
3065
3066         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3067                                         EEPROM_ADDR_DEVID_MASK |
3068                                         EEPROM_ADDR_READ);
3069         tw32(GRC_EEPROM_ADDR,
3070              tmp |
3071              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3072              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3073               EEPROM_ADDR_ADDR_MASK) |
3074              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3075
3076         for (i = 0; i < 1000; i++) {
3077                 tmp = tr32(GRC_EEPROM_ADDR);
3078
3079                 if (tmp & EEPROM_ADDR_COMPLETE)
3080                         break;
3081                 msleep(1);
3082         }
3083         if (!(tmp & EEPROM_ADDR_COMPLETE))
3084                 return -EBUSY;
3085
3086         tmp = tr32(GRC_EEPROM_DATA);
3087
3088         /*
3089          * The data will always be opposite the native endian
3090          * format.  Perform a blind byteswap to compensate.
3091          */
3092         *val = swab32(tmp);
3093
3094         return 0;
3095 }
3096
3097 #define NVRAM_CMD_TIMEOUT 10000
3098
3099 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3100 {
3101         int i;
3102
3103         tw32(NVRAM_CMD, nvram_cmd);
3104         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3105                 udelay(10);
3106                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3107                         udelay(10);
3108                         break;
3109                 }
3110         }
3111
3112         if (i == NVRAM_CMD_TIMEOUT)
3113                 return -EBUSY;
3114
3115         return 0;
3116 }
3117
3118 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3119 {
3120         if (tg3_flag(tp, NVRAM) &&
3121             tg3_flag(tp, NVRAM_BUFFERED) &&
3122             tg3_flag(tp, FLASH) &&
3123             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3124             (tp->nvram_jedecnum == JEDEC_ATMEL))
3125
3126                 addr = ((addr / tp->nvram_pagesize) <<
3127                         ATMEL_AT45DB0X1B_PAGE_POS) +
3128                        (addr % tp->nvram_pagesize);
3129
3130         return addr;
3131 }
3132
3133 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3134 {
3135         if (tg3_flag(tp, NVRAM) &&
3136             tg3_flag(tp, NVRAM_BUFFERED) &&
3137             tg3_flag(tp, FLASH) &&
3138             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3139             (tp->nvram_jedecnum == JEDEC_ATMEL))
3140
3141                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3142                         tp->nvram_pagesize) +
3143                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3144
3145         return addr;
3146 }
3147
3148 /* NOTE: Data read in from NVRAM is byteswapped according to
3149  * the byteswapping settings for all other register accesses.
3150  * tg3 devices are BE devices, so on a BE machine, the data
3151  * returned will be exactly as it is seen in NVRAM.  On a LE
3152  * machine, the 32-bit value will be byteswapped.
3153  */
3154 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3155 {
3156         int ret;
3157
3158         if (!tg3_flag(tp, NVRAM))
3159                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3160
3161         offset = tg3_nvram_phys_addr(tp, offset);
3162
3163         if (offset > NVRAM_ADDR_MSK)
3164                 return -EINVAL;
3165
3166         ret = tg3_nvram_lock(tp);
3167         if (ret)
3168                 return ret;
3169
3170         tg3_enable_nvram_access(tp);
3171
3172         tw32(NVRAM_ADDR, offset);
3173         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3174                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3175
3176         if (ret == 0)
3177                 *val = tr32(NVRAM_RDDATA);
3178
3179         tg3_disable_nvram_access(tp);
3180
3181         tg3_nvram_unlock(tp);
3182
3183         return ret;
3184 }
3185
3186 /* Ensures NVRAM data is in bytestream format. */
3187 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3188 {
3189         u32 v;
3190         int res = tg3_nvram_read(tp, offset, &v);
3191         if (!res)
3192                 *val = cpu_to_be32(v);
3193         return res;
3194 }
3195
3196 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3197                                     u32 offset, u32 len, u8 *buf)
3198 {
3199         int i, j, rc = 0;
3200         u32 val;
3201
3202         for (i = 0; i < len; i += 4) {
3203                 u32 addr;
3204                 __be32 data;
3205
3206                 addr = offset + i;
3207
3208                 memcpy(&data, buf + i, 4);
3209
3210                 /*
3211                  * The SEEPROM interface expects the data to always be opposite
3212                  * the native endian format.  We accomplish this by reversing
3213                  * all the operations that would have been performed on the
3214                  * data from a call to tg3_nvram_read_be32().
3215                  */
3216                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3217
3218                 val = tr32(GRC_EEPROM_ADDR);
3219                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3220
3221                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3222                         EEPROM_ADDR_READ);
3223                 tw32(GRC_EEPROM_ADDR, val |
3224                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3225                         (addr & EEPROM_ADDR_ADDR_MASK) |
3226                         EEPROM_ADDR_START |
3227                         EEPROM_ADDR_WRITE);
3228
3229                 for (j = 0; j < 1000; j++) {
3230                         val = tr32(GRC_EEPROM_ADDR);
3231
3232                         if (val & EEPROM_ADDR_COMPLETE)
3233                                 break;
3234                         msleep(1);
3235                 }
3236                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3237                         rc = -EBUSY;
3238                         break;
3239                 }
3240         }
3241
3242         return rc;
3243 }
3244
3245 /* offset and length are dword aligned */
3246 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3247                 u8 *buf)
3248 {
3249         int ret = 0;
3250         u32 pagesize = tp->nvram_pagesize;
3251         u32 pagemask = pagesize - 1;
3252         u32 nvram_cmd;
3253         u8 *tmp;
3254
3255         tmp = kmalloc(pagesize, GFP_KERNEL);
3256         if (tmp == NULL)
3257                 return -ENOMEM;
3258
3259         while (len) {
3260                 int j;
3261                 u32 phy_addr, page_off, size;
3262
3263                 phy_addr = offset & ~pagemask;
3264
3265                 for (j = 0; j < pagesize; j += 4) {
3266                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3267                                                   (__be32 *) (tmp + j));
3268                         if (ret)
3269                                 break;
3270                 }
3271                 if (ret)
3272                         break;
3273
3274                 page_off = offset & pagemask;
3275                 size = pagesize;
3276                 if (len < size)
3277                         size = len;
3278
3279                 len -= size;
3280
3281                 memcpy(tmp + page_off, buf, size);
3282
3283                 offset = offset + (pagesize - page_off);
3284
3285                 tg3_enable_nvram_access(tp);
3286
3287                 /*
3288                  * Before we can erase the flash page, we need
3289                  * to issue a special "write enable" command.
3290                  */
3291                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3292
3293                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3294                         break;
3295
3296                 /* Erase the target page */
3297                 tw32(NVRAM_ADDR, phy_addr);
3298
3299                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3300                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3301
3302                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3303                         break;
3304
3305                 /* Issue another write enable to start the write. */
3306                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3307
3308                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3309                         break;
3310
3311                 for (j = 0; j < pagesize; j += 4) {
3312                         __be32 data;
3313
3314                         data = *((__be32 *) (tmp + j));
3315
3316                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3317
3318                         tw32(NVRAM_ADDR, phy_addr + j);
3319
3320                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3321                                 NVRAM_CMD_WR;
3322
3323                         if (j == 0)
3324                                 nvram_cmd |= NVRAM_CMD_FIRST;
3325                         else if (j == (pagesize - 4))
3326                                 nvram_cmd |= NVRAM_CMD_LAST;
3327
3328                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3329                         if (ret)
3330                                 break;
3331                 }
3332                 if (ret)
3333                         break;
3334         }
3335
3336         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3337         tg3_nvram_exec_cmd(tp, nvram_cmd);
3338
3339         kfree(tmp);
3340
3341         return ret;
3342 }
3343
3344 /* offset and length are dword aligned */
3345 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3346                 u8 *buf)
3347 {
3348         int i, ret = 0;
3349
3350         for (i = 0; i < len; i += 4, offset += 4) {
3351                 u32 page_off, phy_addr, nvram_cmd;
3352                 __be32 data;
3353
3354                 memcpy(&data, buf + i, 4);
3355                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3356
3357                 page_off = offset % tp->nvram_pagesize;
3358
3359                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3360
3361                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3362
3363                 if (page_off == 0 || i == 0)
3364                         nvram_cmd |= NVRAM_CMD_FIRST;
3365                 if (page_off == (tp->nvram_pagesize - 4))
3366                         nvram_cmd |= NVRAM_CMD_LAST;
3367
3368                 if (i == (len - 4))
3369                         nvram_cmd |= NVRAM_CMD_LAST;
3370
3371                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3372                     !tg3_flag(tp, FLASH) ||
3373                     !tg3_flag(tp, 57765_PLUS))
3374                         tw32(NVRAM_ADDR, phy_addr);
3375
3376                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3377                     !tg3_flag(tp, 5755_PLUS) &&
3378                     (tp->nvram_jedecnum == JEDEC_ST) &&
3379                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3380                         u32 cmd;
3381
3382                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3383                         ret = tg3_nvram_exec_cmd(tp, cmd);
3384                         if (ret)
3385                                 break;
3386                 }
3387                 if (!tg3_flag(tp, FLASH)) {
3388                         /* We always do complete word writes to eeprom. */
3389                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3390                 }
3391
3392                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3393                 if (ret)
3394                         break;
3395         }
3396         return ret;
3397 }
3398
3399 /* offset and length are dword aligned */
3400 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3401 {
3402         int ret;
3403
3404         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3405                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3406                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3407                 udelay(40);
3408         }
3409
3410         if (!tg3_flag(tp, NVRAM)) {
3411                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3412         } else {
3413                 u32 grc_mode;
3414
3415                 ret = tg3_nvram_lock(tp);
3416                 if (ret)
3417                         return ret;
3418
3419                 tg3_enable_nvram_access(tp);
3420                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3421                         tw32(NVRAM_WRITE1, 0x406);
3422
3423                 grc_mode = tr32(GRC_MODE);
3424                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3425
3426                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3427                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3428                                 buf);
3429                 } else {
3430                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3431                                 buf);
3432                 }
3433
3434                 grc_mode = tr32(GRC_MODE);
3435                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3436
3437                 tg3_disable_nvram_access(tp);
3438                 tg3_nvram_unlock(tp);
3439         }
3440
3441         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3442                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3443                 udelay(40);
3444         }
3445
3446         return ret;
3447 }
3448
3449 #define RX_CPU_SCRATCH_BASE     0x30000
3450 #define RX_CPU_SCRATCH_SIZE     0x04000
3451 #define TX_CPU_SCRATCH_BASE     0x34000
3452 #define TX_CPU_SCRATCH_SIZE     0x04000
3453
3454 /* tp->lock is held. */
3455 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3456 {
3457         int i;
3458
3459         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3460
3461         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3462                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3463
3464                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3465                 return 0;
3466         }
3467         if (offset == RX_CPU_BASE) {
3468                 for (i = 0; i < 10000; i++) {
3469                         tw32(offset + CPU_STATE, 0xffffffff);
3470                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3471                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3472                                 break;
3473                 }
3474
3475                 tw32(offset + CPU_STATE, 0xffffffff);
3476                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3477                 udelay(10);
3478         } else {
3479                 /*
3480                  * There is only an Rx CPU for the 5750 derivative in the
3481                  * BCM4785.
3482                  */
3483                 if (tg3_flag(tp, IS_SSB_CORE))
3484                         return 0;
3485
3486                 for (i = 0; i < 10000; i++) {
3487                         tw32(offset + CPU_STATE, 0xffffffff);
3488                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3489                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3490                                 break;
3491                 }
3492         }
3493
3494         if (i >= 10000) {
3495                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3496                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3497                 return -ENODEV;
3498         }
3499
3500         /* Clear firmware's nvram arbitration. */
3501         if (tg3_flag(tp, NVRAM))
3502                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3503         return 0;
3504 }
3505
3506 struct fw_info {
3507         unsigned int fw_base;
3508         unsigned int fw_len;
3509         const __be32 *fw_data;
3510 };
3511
3512 /* tp->lock is held. */
3513 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3514                                  u32 cpu_scratch_base, int cpu_scratch_size,
3515                                  struct fw_info *info)
3516 {
3517         int err, lock_err, i;
3518         void (*write_op)(struct tg3 *, u32, u32);
3519
3520         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3521                 netdev_err(tp->dev,
3522                            "%s: Trying to load TX cpu firmware which is 5705\n",
3523                            __func__);
3524                 return -EINVAL;
3525         }
3526
3527         if (tg3_flag(tp, 5705_PLUS))
3528                 write_op = tg3_write_mem;
3529         else
3530                 write_op = tg3_write_indirect_reg32;
3531
3532         /* It is possible that bootcode is still loading at this point.
3533          * Get the nvram lock first before halting the cpu.
3534          */
3535         lock_err = tg3_nvram_lock(tp);
3536         err = tg3_halt_cpu(tp, cpu_base);
3537         if (!lock_err)
3538                 tg3_nvram_unlock(tp);
3539         if (err)
3540                 goto out;
3541
3542         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3543                 write_op(tp, cpu_scratch_base + i, 0);
3544         tw32(cpu_base + CPU_STATE, 0xffffffff);
3545         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3546         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3547                 write_op(tp, (cpu_scratch_base +
3548                               (info->fw_base & 0xffff) +
3549                               (i * sizeof(u32))),
3550                               be32_to_cpu(info->fw_data[i]));
3551
3552         err = 0;
3553
3554 out:
3555         return err;
3556 }
3557
3558 /* tp->lock is held. */
3559 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3560 {
3561         struct fw_info info;
3562         const __be32 *fw_data;
3563         int err, i;
3564
3565         fw_data = (void *)tp->fw->data;
3566
3567         /* Firmware blob starts with version numbers, followed by
3568            start address and length. We are setting complete length.
3569            length = end_address_of_bss - start_address_of_text.
3570            Remainder is the blob to be loaded contiguously
3571            from start address. */
3572
3573         info.fw_base = be32_to_cpu(fw_data[1]);
3574         info.fw_len = tp->fw->size - 12;
3575         info.fw_data = &fw_data[3];
3576
3577         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3578                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3579                                     &info);
3580         if (err)
3581                 return err;
3582
3583         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3584                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3585                                     &info);
3586         if (err)
3587                 return err;
3588
3589         /* Now startup only the RX cpu. */
3590         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3591         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3592
3593         for (i = 0; i < 5; i++) {
3594                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3595                         break;
3596                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3597                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3598                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3599                 udelay(1000);
3600         }
3601         if (i >= 5) {
3602                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3603                            "should be %08x\n", __func__,
3604                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3605                 return -ENODEV;
3606         }
3607         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3608         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3609
3610         return 0;
3611 }
3612
3613 /* tp->lock is held. */
3614 static int tg3_load_tso_firmware(struct tg3 *tp)
3615 {
3616         struct fw_info info;
3617         const __be32 *fw_data;
3618         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3619         int err, i;
3620
3621         if (tg3_flag(tp, HW_TSO_1) ||
3622             tg3_flag(tp, HW_TSO_2) ||
3623             tg3_flag(tp, HW_TSO_3))
3624                 return 0;
3625
3626         fw_data = (void *)tp->fw->data;
3627
3628         /* Firmware blob starts with version numbers, followed by
3629            start address and length. We are setting complete length.
3630            length = end_address_of_bss - start_address_of_text.
3631            Remainder is the blob to be loaded contiguously
3632            from start address. */
3633
3634         info.fw_base = be32_to_cpu(fw_data[1]);
3635         cpu_scratch_size = tp->fw_len;
3636         info.fw_len = tp->fw->size - 12;
3637         info.fw_data = &fw_data[3];
3638
3639         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3640                 cpu_base = RX_CPU_BASE;
3641                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3642         } else {
3643                 cpu_base = TX_CPU_BASE;
3644                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3645                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3646         }
3647
3648         err = tg3_load_firmware_cpu(tp, cpu_base,
3649                                     cpu_scratch_base, cpu_scratch_size,
3650                                     &info);
3651         if (err)
3652                 return err;
3653
3654         /* Now startup the cpu. */
3655         tw32(cpu_base + CPU_STATE, 0xffffffff);
3656         tw32_f(cpu_base + CPU_PC, info.fw_base);
3657
3658         for (i = 0; i < 5; i++) {
3659                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3660                         break;
3661                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3662                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3663                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3664                 udelay(1000);
3665         }
3666         if (i >= 5) {
3667                 netdev_err(tp->dev,
3668                            "%s fails to set CPU PC, is %08x should be %08x\n",
3669                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3670                 return -ENODEV;
3671         }
3672         tw32(cpu_base + CPU_STATE, 0xffffffff);
3673         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3674         return 0;
3675 }
3676
3677
3678 /* tp->lock is held. */
3679 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3680 {
3681         u32 addr_high, addr_low;
3682         int i;
3683
3684         addr_high = ((tp->dev->dev_addr[0] << 8) |
3685                      tp->dev->dev_addr[1]);
3686         addr_low = ((tp->dev->dev_addr[2] << 24) |
3687                     (tp->dev->dev_addr[3] << 16) |
3688                     (tp->dev->dev_addr[4] <<  8) |
3689                     (tp->dev->dev_addr[5] <<  0));
3690         for (i = 0; i < 4; i++) {
3691                 if (i == 1 && skip_mac_1)
3692                         continue;
3693                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3694                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3695         }
3696
3697         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3698             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3699                 for (i = 0; i < 12; i++) {
3700                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3701                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3702                 }
3703         }
3704
3705         addr_high = (tp->dev->dev_addr[0] +
3706                      tp->dev->dev_addr[1] +
3707                      tp->dev->dev_addr[2] +
3708                      tp->dev->dev_addr[3] +
3709                      tp->dev->dev_addr[4] +
3710                      tp->dev->dev_addr[5]) &
3711                 TX_BACKOFF_SEED_MASK;
3712         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3713 }
3714
3715 static void tg3_enable_register_access(struct tg3 *tp)
3716 {
3717         /*
3718          * Make sure register accesses (indirect or otherwise) will function
3719          * correctly.
3720          */
3721         pci_write_config_dword(tp->pdev,
3722                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3723 }
3724
3725 static int tg3_power_up(struct tg3 *tp)
3726 {
3727         int err;
3728
3729         tg3_enable_register_access(tp);
3730
3731         err = pci_set_power_state(tp->pdev, PCI_D0);
3732         if (!err) {
3733                 /* Switch out of Vaux if it is a NIC */
3734                 tg3_pwrsrc_switch_to_vmain(tp);
3735         } else {
3736                 netdev_err(tp->dev, "Transition to D0 failed\n");
3737         }
3738
3739         return err;
3740 }
3741
3742 static int tg3_setup_phy(struct tg3 *, int);
3743
3744 static int tg3_power_down_prepare(struct tg3 *tp)
3745 {
3746         u32 misc_host_ctrl;
3747         bool device_should_wake, do_low_power;
3748
3749         tg3_enable_register_access(tp);
3750
3751         /* Restore the CLKREQ setting. */
3752         if (tg3_flag(tp, CLKREQ_BUG))
3753                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3754                                          PCI_EXP_LNKCTL_CLKREQ_EN);
3755
3756         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3757         tw32(TG3PCI_MISC_HOST_CTRL,
3758              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3759
3760         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3761                              tg3_flag(tp, WOL_ENABLE);
3762
3763         if (tg3_flag(tp, USE_PHYLIB)) {
3764                 do_low_power = false;
3765                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3766                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3767                         struct phy_device *phydev;
3768                         u32 phyid, advertising;
3769
3770                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3771
3772                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3773
3774                         tp->link_config.speed = phydev->speed;
3775                         tp->link_config.duplex = phydev->duplex;
3776                         tp->link_config.autoneg = phydev->autoneg;
3777                         tp->link_config.advertising = phydev->advertising;
3778
3779                         advertising = ADVERTISED_TP |
3780                                       ADVERTISED_Pause |
3781                                       ADVERTISED_Autoneg |
3782                                       ADVERTISED_10baseT_Half;
3783
3784                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3785                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3786                                         advertising |=
3787                                                 ADVERTISED_100baseT_Half |
3788                                                 ADVERTISED_100baseT_Full |
3789                                                 ADVERTISED_10baseT_Full;
3790                                 else
3791                                         advertising |= ADVERTISED_10baseT_Full;
3792                         }
3793
3794                         phydev->advertising = advertising;
3795
3796                         phy_start_aneg(phydev);
3797
3798                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3799                         if (phyid != PHY_ID_BCMAC131) {
3800                                 phyid &= PHY_BCM_OUI_MASK;
3801                                 if (phyid == PHY_BCM_OUI_1 ||
3802                                     phyid == PHY_BCM_OUI_2 ||
3803                                     phyid == PHY_BCM_OUI_3)
3804                                         do_low_power = true;
3805                         }
3806                 }
3807         } else {
3808                 do_low_power = true;
3809
3810                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3811                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3812
3813                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3814                         tg3_setup_phy(tp, 0);
3815         }
3816
3817         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3818                 u32 val;
3819
3820                 val = tr32(GRC_VCPU_EXT_CTRL);
3821                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3822         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3823                 int i;
3824                 u32 val;
3825
3826                 for (i = 0; i < 200; i++) {
3827                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3828                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3829                                 break;
3830                         msleep(1);
3831                 }
3832         }
3833         if (tg3_flag(tp, WOL_CAP))
3834                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3835                                                      WOL_DRV_STATE_SHUTDOWN |
3836                                                      WOL_DRV_WOL |
3837                                                      WOL_SET_MAGIC_PKT);
3838
3839         if (device_should_wake) {
3840                 u32 mac_mode;
3841
3842                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3843                         if (do_low_power &&
3844                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3845                                 tg3_phy_auxctl_write(tp,
3846                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3847                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3848                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3849                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3850                                 udelay(40);
3851                         }
3852
3853                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3854                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3855                         else
3856                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3857
3858                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3859                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3860                             ASIC_REV_5700) {
3861                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3862                                              SPEED_100 : SPEED_10;
3863                                 if (tg3_5700_link_polarity(tp, speed))
3864                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3865                                 else
3866                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3867                         }
3868                 } else {
3869                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3870                 }
3871
3872                 if (!tg3_flag(tp, 5750_PLUS))
3873                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3874
3875                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3876                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3877                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3878                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3879
3880                 if (tg3_flag(tp, ENABLE_APE))
3881                         mac_mode |= MAC_MODE_APE_TX_EN |
3882                                     MAC_MODE_APE_RX_EN |
3883                                     MAC_MODE_TDE_ENABLE;
3884
3885                 tw32_f(MAC_MODE, mac_mode);
3886                 udelay(100);
3887
3888                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3889                 udelay(10);
3890         }
3891
3892         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3893             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3894              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3895                 u32 base_val;
3896
3897                 base_val = tp->pci_clock_ctrl;
3898                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3899                              CLOCK_CTRL_TXCLK_DISABLE);
3900
3901                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3902                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3903         } else if (tg3_flag(tp, 5780_CLASS) ||
3904                    tg3_flag(tp, CPMU_PRESENT) ||
3905                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3906                 /* do nothing */
3907         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3908                 u32 newbits1, newbits2;
3909
3910                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3911                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3912                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3913                                     CLOCK_CTRL_TXCLK_DISABLE |
3914                                     CLOCK_CTRL_ALTCLK);
3915                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3916                 } else if (tg3_flag(tp, 5705_PLUS)) {
3917                         newbits1 = CLOCK_CTRL_625_CORE;
3918                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3919                 } else {
3920                         newbits1 = CLOCK_CTRL_ALTCLK;
3921                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3922                 }
3923
3924                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3925                             40);
3926
3927                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3928                             40);
3929
3930                 if (!tg3_flag(tp, 5705_PLUS)) {
3931                         u32 newbits3;
3932
3933                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3934                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3935                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3936                                             CLOCK_CTRL_TXCLK_DISABLE |
3937                                             CLOCK_CTRL_44MHZ_CORE);
3938                         } else {
3939                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3940                         }
3941
3942                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3943                                     tp->pci_clock_ctrl | newbits3, 40);
3944                 }
3945         }
3946
3947         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3948                 tg3_power_down_phy(tp, do_low_power);
3949
3950         tg3_frob_aux_power(tp, true);
3951
3952         /* Workaround for unstable PLL clock */
3953         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
3954             ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3955              (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX))) {
3956                 u32 val = tr32(0x7d00);
3957
3958                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3959                 tw32(0x7d00, val);
3960                 if (!tg3_flag(tp, ENABLE_ASF)) {
3961                         int err;
3962
3963                         err = tg3_nvram_lock(tp);
3964                         tg3_halt_cpu(tp, RX_CPU_BASE);
3965                         if (!err)
3966                                 tg3_nvram_unlock(tp);
3967                 }
3968         }
3969
3970         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3971
3972         return 0;
3973 }
3974
3975 static void tg3_power_down(struct tg3 *tp)
3976 {
3977         tg3_power_down_prepare(tp);
3978
3979         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3980         pci_set_power_state(tp->pdev, PCI_D3hot);
3981 }
3982
3983 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3984 {
3985         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3986         case MII_TG3_AUX_STAT_10HALF:
3987                 *speed = SPEED_10;
3988                 *duplex = DUPLEX_HALF;
3989                 break;
3990
3991         case MII_TG3_AUX_STAT_10FULL:
3992                 *speed = SPEED_10;
3993                 *duplex = DUPLEX_FULL;
3994                 break;
3995
3996         case MII_TG3_AUX_STAT_100HALF:
3997                 *speed = SPEED_100;
3998                 *duplex = DUPLEX_HALF;
3999                 break;
4000
4001         case MII_TG3_AUX_STAT_100FULL:
4002                 *speed = SPEED_100;
4003                 *duplex = DUPLEX_FULL;
4004                 break;
4005
4006         case MII_TG3_AUX_STAT_1000HALF:
4007                 *speed = SPEED_1000;
4008                 *duplex = DUPLEX_HALF;
4009                 break;
4010
4011         case MII_TG3_AUX_STAT_1000FULL:
4012                 *speed = SPEED_1000;
4013                 *duplex = DUPLEX_FULL;
4014                 break;
4015
4016         default:
4017                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4018                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4019                                  SPEED_10;
4020                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4021                                   DUPLEX_HALF;
4022                         break;
4023                 }
4024                 *speed = SPEED_UNKNOWN;
4025                 *duplex = DUPLEX_UNKNOWN;
4026                 break;
4027         }
4028 }
4029
4030 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4031 {
4032         int err = 0;
4033         u32 val, new_adv;
4034
4035         new_adv = ADVERTISE_CSMA;
4036         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4037         new_adv |= mii_advertise_flowctrl(flowctrl);
4038
4039         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4040         if (err)
4041                 goto done;
4042
4043         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4044                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4045
4046                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4047                     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
4048                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4049
4050                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4051                 if (err)
4052                         goto done;
4053         }
4054
4055         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4056                 goto done;
4057
4058         tw32(TG3_CPMU_EEE_MODE,
4059              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4060
4061         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4062         if (!err) {
4063                 u32 err2;
4064
4065                 val = 0;
4066                 /* Advertise 100-BaseTX EEE ability */
4067                 if (advertise & ADVERTISED_100baseT_Full)
4068                         val |= MDIO_AN_EEE_ADV_100TX;
4069                 /* Advertise 1000-BaseT EEE ability */
4070                 if (advertise & ADVERTISED_1000baseT_Full)
4071                         val |= MDIO_AN_EEE_ADV_1000T;
4072                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4073                 if (err)
4074                         val = 0;
4075
4076                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
4077                 case ASIC_REV_5717:
4078                 case ASIC_REV_57765:
4079                 case ASIC_REV_57766:
4080                 case ASIC_REV_5719:
4081                         /* If we advertised any eee advertisements above... */
4082                         if (val)
4083                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4084                                       MII_TG3_DSP_TAP26_RMRXSTO |
4085                                       MII_TG3_DSP_TAP26_OPCSINPT;
4086                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4087                         /* Fall through */
4088                 case ASIC_REV_5720:
4089                 case ASIC_REV_5762:
4090                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4091                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4092                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4093                 }
4094
4095                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4096                 if (!err)
4097                         err = err2;
4098         }
4099
4100 done:
4101         return err;
4102 }
4103
4104 static void tg3_phy_copper_begin(struct tg3 *tp)
4105 {
4106         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4107             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4108                 u32 adv, fc;
4109
4110                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4111                         adv = ADVERTISED_10baseT_Half |
4112                               ADVERTISED_10baseT_Full;
4113                         if (tg3_flag(tp, WOL_SPEED_100MB))
4114                                 adv |= ADVERTISED_100baseT_Half |
4115                                        ADVERTISED_100baseT_Full;
4116
4117                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4118                 } else {
4119                         adv = tp->link_config.advertising;
4120                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4121                                 adv &= ~(ADVERTISED_1000baseT_Half |
4122                                          ADVERTISED_1000baseT_Full);
4123
4124                         fc = tp->link_config.flowctrl;
4125                 }
4126
4127                 tg3_phy_autoneg_cfg(tp, adv, fc);
4128
4129                 tg3_writephy(tp, MII_BMCR,
4130                              BMCR_ANENABLE | BMCR_ANRESTART);
4131         } else {
4132                 int i;
4133                 u32 bmcr, orig_bmcr;
4134
4135                 tp->link_config.active_speed = tp->link_config.speed;
4136                 tp->link_config.active_duplex = tp->link_config.duplex;
4137
4138                 bmcr = 0;
4139                 switch (tp->link_config.speed) {
4140                 default:
4141                 case SPEED_10:
4142                         break;
4143
4144                 case SPEED_100:
4145                         bmcr |= BMCR_SPEED100;
4146                         break;
4147
4148                 case SPEED_1000:
4149                         bmcr |= BMCR_SPEED1000;
4150                         break;
4151                 }
4152
4153                 if (tp->link_config.duplex == DUPLEX_FULL)
4154                         bmcr |= BMCR_FULLDPLX;
4155
4156                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4157                     (bmcr != orig_bmcr)) {
4158                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4159                         for (i = 0; i < 1500; i++) {
4160                                 u32 tmp;
4161
4162                                 udelay(10);
4163                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4164                                     tg3_readphy(tp, MII_BMSR, &tmp))
4165                                         continue;
4166                                 if (!(tmp & BMSR_LSTATUS)) {
4167                                         udelay(40);
4168                                         break;
4169                                 }
4170                         }
4171                         tg3_writephy(tp, MII_BMCR, bmcr);
4172                         udelay(40);
4173                 }
4174         }
4175 }
4176
4177 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4178 {
4179         int err;
4180
4181         /* Turn off tap power management. */
4182         /* Set Extended packet length bit */
4183         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4184
4185         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4186         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4187         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4188         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4189         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4190
4191         udelay(40);
4192
4193         return err;
4194 }
4195
4196 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4197 {
4198         u32 advmsk, tgtadv, advertising;
4199
4200         advertising = tp->link_config.advertising;
4201         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4202
4203         advmsk = ADVERTISE_ALL;
4204         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4205                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4206                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4207         }
4208
4209         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4210                 return false;
4211
4212         if ((*lcladv & advmsk) != tgtadv)
4213                 return false;
4214
4215         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4216                 u32 tg3_ctrl;
4217
4218                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4219
4220                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4221                         return false;
4222
4223                 if (tgtadv &&
4224                     (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4225                      tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4226                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4227                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4228                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4229                 } else {
4230                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4231                 }
4232
4233                 if (tg3_ctrl != tgtadv)
4234                         return false;
4235         }
4236
4237         return true;
4238 }
4239
4240 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4241 {
4242         u32 lpeth = 0;
4243
4244         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4245                 u32 val;
4246
4247                 if (tg3_readphy(tp, MII_STAT1000, &val))
4248                         return false;
4249
4250                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4251         }
4252
4253         if (tg3_readphy(tp, MII_LPA, rmtadv))
4254                 return false;
4255
4256         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4257         tp->link_config.rmt_adv = lpeth;
4258
4259         return true;
4260 }
4261
4262 static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
4263 {
4264         if (curr_link_up != tp->link_up) {
4265                 if (curr_link_up) {
4266                         tg3_carrier_on(tp);
4267                 } else {
4268                         tg3_carrier_off(tp);
4269                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4270                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4271                 }
4272
4273                 tg3_link_report(tp);
4274                 return true;
4275         }
4276
4277         return false;
4278 }
4279
4280 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4281 {
4282         int current_link_up;
4283         u32 bmsr, val;
4284         u32 lcl_adv, rmt_adv;
4285         u16 current_speed;
4286         u8 current_duplex;
4287         int i, err;
4288
4289         tw32(MAC_EVENT, 0);
4290
4291         tw32_f(MAC_STATUS,
4292              (MAC_STATUS_SYNC_CHANGED |
4293               MAC_STATUS_CFG_CHANGED |
4294               MAC_STATUS_MI_COMPLETION |
4295               MAC_STATUS_LNKSTATE_CHANGED));
4296         udelay(40);
4297
4298         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4299                 tw32_f(MAC_MI_MODE,
4300                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4301                 udelay(80);
4302         }
4303
4304         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4305
4306         /* Some third-party PHYs need to be reset on link going
4307          * down.
4308          */
4309         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4310              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4311              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4312             tp->link_up) {
4313                 tg3_readphy(tp, MII_BMSR, &bmsr);
4314                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4315                     !(bmsr & BMSR_LSTATUS))
4316                         force_reset = 1;
4317         }
4318         if (force_reset)
4319                 tg3_phy_reset(tp);
4320
4321         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4322                 tg3_readphy(tp, MII_BMSR, &bmsr);
4323                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4324                     !tg3_flag(tp, INIT_COMPLETE))
4325                         bmsr = 0;
4326
4327                 if (!(bmsr & BMSR_LSTATUS)) {
4328                         err = tg3_init_5401phy_dsp(tp);
4329                         if (err)
4330                                 return err;
4331
4332                         tg3_readphy(tp, MII_BMSR, &bmsr);
4333                         for (i = 0; i < 1000; i++) {
4334                                 udelay(10);
4335                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4336                                     (bmsr & BMSR_LSTATUS)) {
4337                                         udelay(40);
4338                                         break;
4339                                 }
4340                         }
4341
4342                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4343                             TG3_PHY_REV_BCM5401_B0 &&
4344                             !(bmsr & BMSR_LSTATUS) &&
4345                             tp->link_config.active_speed == SPEED_1000) {
4346                                 err = tg3_phy_reset(tp);
4347                                 if (!err)
4348                                         err = tg3_init_5401phy_dsp(tp);
4349                                 if (err)
4350                                         return err;
4351                         }
4352                 }
4353         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4354                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4355                 /* 5701 {A0,B0} CRC bug workaround */
4356                 tg3_writephy(tp, 0x15, 0x0a75);
4357                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4358                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4359                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4360         }
4361
4362         /* Clear pending interrupts... */
4363         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4364         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4365
4366         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4367                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4368         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4369                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4370
4371         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4372             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4373                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4374                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4375                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4376                 else
4377                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4378         }
4379
4380         current_link_up = 0;
4381         current_speed = SPEED_UNKNOWN;
4382         current_duplex = DUPLEX_UNKNOWN;
4383         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4384         tp->link_config.rmt_adv = 0;
4385
4386         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4387                 err = tg3_phy_auxctl_read(tp,
4388                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4389                                           &val);
4390                 if (!err && !(val & (1 << 10))) {
4391                         tg3_phy_auxctl_write(tp,
4392                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4393                                              val | (1 << 10));
4394                         goto relink;
4395                 }
4396         }
4397
4398         bmsr = 0;
4399         for (i = 0; i < 100; i++) {
4400                 tg3_readphy(tp, MII_BMSR, &bmsr);
4401                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4402                     (bmsr & BMSR_LSTATUS))
4403                         break;
4404                 udelay(40);
4405         }
4406
4407         if (bmsr & BMSR_LSTATUS) {
4408                 u32 aux_stat, bmcr;
4409
4410                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4411                 for (i = 0; i < 2000; i++) {
4412                         udelay(10);
4413                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4414                             aux_stat)
4415                                 break;
4416                 }
4417
4418                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4419                                              &current_speed,
4420                                              &current_duplex);
4421
4422                 bmcr = 0;
4423                 for (i = 0; i < 200; i++) {
4424                         tg3_readphy(tp, MII_BMCR, &bmcr);
4425                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4426                                 continue;
4427                         if (bmcr && bmcr != 0x7fff)
4428                                 break;
4429                         udelay(10);
4430                 }
4431
4432                 lcl_adv = 0;
4433                 rmt_adv = 0;
4434
4435                 tp->link_config.active_speed = current_speed;
4436                 tp->link_config.active_duplex = current_duplex;
4437
4438                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4439                         if ((bmcr & BMCR_ANENABLE) &&
4440                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4441                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4442                                 current_link_up = 1;
4443                 } else {
4444                         if (!(bmcr & BMCR_ANENABLE) &&
4445                             tp->link_config.speed == current_speed &&
4446                             tp->link_config.duplex == current_duplex &&
4447                             tp->link_config.flowctrl ==
4448                             tp->link_config.active_flowctrl) {
4449                                 current_link_up = 1;
4450                         }
4451                 }
4452
4453                 if (current_link_up == 1 &&
4454                     tp->link_config.active_duplex == DUPLEX_FULL) {
4455                         u32 reg, bit;
4456
4457                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4458                                 reg = MII_TG3_FET_GEN_STAT;
4459                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4460                         } else {
4461                                 reg = MII_TG3_EXT_STAT;
4462                                 bit = MII_TG3_EXT_STAT_MDIX;
4463                         }
4464
4465                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4466                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4467
4468                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4469                 }
4470         }
4471
4472 relink:
4473         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4474                 tg3_phy_copper_begin(tp);
4475
4476                 if (tg3_flag(tp, ROBOSWITCH)) {
4477                         current_link_up = 1;
4478                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4479                         current_speed = SPEED_1000;
4480                         current_duplex = DUPLEX_FULL;
4481                         tp->link_config.active_speed = current_speed;
4482                         tp->link_config.active_duplex = current_duplex;
4483                 }
4484
4485                 tg3_readphy(tp, MII_BMSR, &bmsr);
4486                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4487                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4488                         current_link_up = 1;
4489         }
4490
4491         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4492         if (current_link_up == 1) {
4493                 if (tp->link_config.active_speed == SPEED_100 ||
4494                     tp->link_config.active_speed == SPEED_10)
4495                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4496                 else
4497                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4498         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4499                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4500         else
4501                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4502
4503         /* In order for the 5750 core in BCM4785 chip to work properly
4504          * in RGMII mode, the Led Control Register must be set up.
4505          */
4506         if (tg3_flag(tp, RGMII_MODE)) {
4507                 u32 led_ctrl = tr32(MAC_LED_CTRL);
4508                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4509
4510                 if (tp->link_config.active_speed == SPEED_10)
4511                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4512                 else if (tp->link_config.active_speed == SPEED_100)
4513                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4514                                      LED_CTRL_100MBPS_ON);
4515                 else if (tp->link_config.active_speed == SPEED_1000)
4516                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4517                                      LED_CTRL_1000MBPS_ON);
4518
4519                 tw32(MAC_LED_CTRL, led_ctrl);
4520                 udelay(40);
4521         }
4522
4523         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4524         if (tp->link_config.active_duplex == DUPLEX_HALF)
4525                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4526
4527         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4528                 if (current_link_up == 1 &&
4529                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4530                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4531                 else
4532                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4533         }
4534
4535         /* ??? Without this setting Netgear GA302T PHY does not
4536          * ??? send/receive packets...
4537          */
4538         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4539             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4540                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4541                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4542                 udelay(80);
4543         }
4544
4545         tw32_f(MAC_MODE, tp->mac_mode);
4546         udelay(40);
4547
4548         tg3_phy_eee_adjust(tp, current_link_up);
4549
4550         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4551                 /* Polled via timer. */
4552                 tw32_f(MAC_EVENT, 0);
4553         } else {
4554                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4555         }
4556         udelay(40);
4557
4558         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4559             current_link_up == 1 &&
4560             tp->link_config.active_speed == SPEED_1000 &&
4561             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4562                 udelay(120);
4563                 tw32_f(MAC_STATUS,
4564                      (MAC_STATUS_SYNC_CHANGED |
4565                       MAC_STATUS_CFG_CHANGED));
4566                 udelay(40);
4567                 tg3_write_mem(tp,
4568                               NIC_SRAM_FIRMWARE_MBOX,
4569                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4570         }
4571
4572         /* Prevent send BD corruption. */
4573         if (tg3_flag(tp, CLKREQ_BUG)) {
4574                 if (tp->link_config.active_speed == SPEED_100 ||
4575                     tp->link_config.active_speed == SPEED_10)
4576                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4577                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
4578                 else
4579                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4580                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
4581         }
4582
4583         tg3_test_and_report_link_chg(tp, current_link_up);
4584
4585         return 0;
4586 }
4587
4588 struct tg3_fiber_aneginfo {
4589         int state;
4590 #define ANEG_STATE_UNKNOWN              0
4591 #define ANEG_STATE_AN_ENABLE            1
4592 #define ANEG_STATE_RESTART_INIT         2
4593 #define ANEG_STATE_RESTART              3
4594 #define ANEG_STATE_DISABLE_LINK_OK      4
4595 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4596 #define ANEG_STATE_ABILITY_DETECT       6
4597 #define ANEG_STATE_ACK_DETECT_INIT      7
4598 #define ANEG_STATE_ACK_DETECT           8
4599 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4600 #define ANEG_STATE_COMPLETE_ACK         10
4601 #define ANEG_STATE_IDLE_DETECT_INIT     11
4602 #define ANEG_STATE_IDLE_DETECT          12
4603 #define ANEG_STATE_LINK_OK              13
4604 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4605 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4606
4607         u32 flags;
4608 #define MR_AN_ENABLE            0x00000001
4609 #define MR_RESTART_AN           0x00000002
4610 #define MR_AN_COMPLETE          0x00000004
4611 #define MR_PAGE_RX              0x00000008
4612 #define MR_NP_LOADED            0x00000010
4613 #define MR_TOGGLE_TX            0x00000020
4614 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4615 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4616 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4617 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4618 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4619 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4620 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4621 #define MR_TOGGLE_RX            0x00002000
4622 #define MR_NP_RX                0x00004000
4623
4624 #define MR_LINK_OK              0x80000000
4625
4626         unsigned long link_time, cur_time;
4627
4628         u32 ability_match_cfg;
4629         int ability_match_count;
4630
4631         char ability_match, idle_match, ack_match;
4632
4633         u32 txconfig, rxconfig;
4634 #define ANEG_CFG_NP             0x00000080
4635 #define ANEG_CFG_ACK            0x00000040
4636 #define ANEG_CFG_RF2            0x00000020
4637 #define ANEG_CFG_RF1            0x00000010
4638 #define ANEG_CFG_PS2            0x00000001
4639 #define ANEG_CFG_PS1            0x00008000
4640 #define ANEG_CFG_HD             0x00004000
4641 #define ANEG_CFG_FD             0x00002000
4642 #define ANEG_CFG_INVAL          0x00001f06
4643
4644 };
4645 #define ANEG_OK         0
4646 #define ANEG_DONE       1
4647 #define ANEG_TIMER_ENAB 2
4648 #define ANEG_FAILED     -1
4649
4650 #define ANEG_STATE_SETTLE_TIME  10000
4651
4652 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4653                                    struct tg3_fiber_aneginfo *ap)
4654 {
4655         u16 flowctrl;
4656         unsigned long delta;
4657         u32 rx_cfg_reg;
4658         int ret;
4659
4660         if (ap->state == ANEG_STATE_UNKNOWN) {
4661                 ap->rxconfig = 0;
4662                 ap->link_time = 0;
4663                 ap->cur_time = 0;
4664                 ap->ability_match_cfg = 0;
4665                 ap->ability_match_count = 0;
4666                 ap->ability_match = 0;
4667                 ap->idle_match = 0;
4668                 ap->ack_match = 0;
4669         }
4670         ap->cur_time++;
4671
4672         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4673                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4674
4675                 if (rx_cfg_reg != ap->ability_match_cfg) {
4676                         ap->ability_match_cfg = rx_cfg_reg;
4677                         ap->ability_match = 0;
4678                         ap->ability_match_count = 0;
4679                 } else {
4680                         if (++ap->ability_match_count > 1) {
4681                                 ap->ability_match = 1;
4682                                 ap->ability_match_cfg = rx_cfg_reg;
4683                         }
4684                 }
4685                 if (rx_cfg_reg & ANEG_CFG_ACK)
4686                         ap->ack_match = 1;
4687                 else
4688                         ap->ack_match = 0;
4689
4690                 ap->idle_match = 0;
4691         } else {
4692                 ap->idle_match = 1;
4693                 ap->ability_match_cfg = 0;
4694                 ap->ability_match_count = 0;
4695                 ap->ability_match = 0;
4696                 ap->ack_match = 0;
4697
4698                 rx_cfg_reg = 0;
4699         }
4700
4701         ap->rxconfig = rx_cfg_reg;
4702         ret = ANEG_OK;
4703
4704         switch (ap->state) {
4705         case ANEG_STATE_UNKNOWN:
4706                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4707                         ap->state = ANEG_STATE_AN_ENABLE;
4708
4709                 /* fallthru */
4710         case ANEG_STATE_AN_ENABLE:
4711                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4712                 if (ap->flags & MR_AN_ENABLE) {
4713                         ap->link_time = 0;
4714                         ap->cur_time = 0;
4715                         ap->ability_match_cfg = 0;
4716                         ap->ability_match_count = 0;
4717                         ap->ability_match = 0;
4718                         ap->idle_match = 0;
4719                         ap->ack_match = 0;
4720
4721                         ap->state = ANEG_STATE_RESTART_INIT;
4722                 } else {
4723                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4724                 }
4725                 break;
4726
4727         case ANEG_STATE_RESTART_INIT:
4728                 ap->link_time = ap->cur_time;
4729                 ap->flags &= ~(MR_NP_LOADED);
4730                 ap->txconfig = 0;
4731                 tw32(MAC_TX_AUTO_NEG, 0);
4732                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4733                 tw32_f(MAC_MODE, tp->mac_mode);
4734                 udelay(40);
4735
4736                 ret = ANEG_TIMER_ENAB;
4737                 ap->state = ANEG_STATE_RESTART;
4738
4739                 /* fallthru */
4740         case ANEG_STATE_RESTART:
4741                 delta = ap->cur_time - ap->link_time;
4742                 if (delta > ANEG_STATE_SETTLE_TIME)
4743                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4744                 else
4745                         ret = ANEG_TIMER_ENAB;
4746                 break;
4747
4748         case ANEG_STATE_DISABLE_LINK_OK:
4749                 ret = ANEG_DONE;
4750                 break;
4751
4752         case ANEG_STATE_ABILITY_DETECT_INIT:
4753                 ap->flags &= ~(MR_TOGGLE_TX);
4754                 ap->txconfig = ANEG_CFG_FD;
4755                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4756                 if (flowctrl & ADVERTISE_1000XPAUSE)
4757                         ap->txconfig |= ANEG_CFG_PS1;
4758                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4759                         ap->txconfig |= ANEG_CFG_PS2;
4760                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4761                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4762                 tw32_f(MAC_MODE, tp->mac_mode);
4763                 udelay(40);
4764
4765                 ap->state = ANEG_STATE_ABILITY_DETECT;
4766                 break;
4767
4768         case ANEG_STATE_ABILITY_DETECT:
4769                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4770                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4771                 break;
4772
4773         case ANEG_STATE_ACK_DETECT_INIT:
4774                 ap->txconfig |= ANEG_CFG_ACK;
4775                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4776                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4777                 tw32_f(MAC_MODE, tp->mac_mode);
4778                 udelay(40);
4779
4780                 ap->state = ANEG_STATE_ACK_DETECT;
4781
4782                 /* fallthru */
4783         case ANEG_STATE_ACK_DETECT:
4784                 if (ap->ack_match != 0) {
4785                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4786                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4787                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4788                         } else {
4789                                 ap->state = ANEG_STATE_AN_ENABLE;
4790                         }
4791                 } else if (ap->ability_match != 0 &&
4792                            ap->rxconfig == 0) {
4793                         ap->state = ANEG_STATE_AN_ENABLE;
4794                 }
4795                 break;
4796
4797         case ANEG_STATE_COMPLETE_ACK_INIT:
4798                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4799                         ret = ANEG_FAILED;
4800                         break;
4801                 }
4802                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4803                                MR_LP_ADV_HALF_DUPLEX |
4804                                MR_LP_ADV_SYM_PAUSE |
4805                                MR_LP_ADV_ASYM_PAUSE |
4806                                MR_LP_ADV_REMOTE_FAULT1 |
4807                                MR_LP_ADV_REMOTE_FAULT2 |
4808                                MR_LP_ADV_NEXT_PAGE |
4809                                MR_TOGGLE_RX |
4810                                MR_NP_RX);
4811                 if (ap->rxconfig & ANEG_CFG_FD)
4812                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4813                 if (ap->rxconfig & ANEG_CFG_HD)
4814                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4815                 if (ap->rxconfig & ANEG_CFG_PS1)
4816                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4817                 if (ap->rxconfig & ANEG_CFG_PS2)
4818                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4819                 if (ap->rxconfig & ANEG_CFG_RF1)
4820                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4821                 if (ap->rxconfig & ANEG_CFG_RF2)
4822                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4823                 if (ap->rxconfig & ANEG_CFG_NP)
4824                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4825
4826                 ap->link_time = ap->cur_time;
4827
4828                 ap->flags ^= (MR_TOGGLE_TX);
4829                 if (ap->rxconfig & 0x0008)
4830                         ap->flags |= MR_TOGGLE_RX;
4831                 if (ap->rxconfig & ANEG_CFG_NP)
4832                         ap->flags |= MR_NP_RX;
4833                 ap->flags |= MR_PAGE_RX;
4834
4835                 ap->state = ANEG_STATE_COMPLETE_ACK;
4836                 ret = ANEG_TIMER_ENAB;
4837                 break;
4838
4839         case ANEG_STATE_COMPLETE_ACK:
4840                 if (ap->ability_match != 0 &&
4841                     ap->rxconfig == 0) {
4842                         ap->state = ANEG_STATE_AN_ENABLE;
4843                         break;
4844                 }
4845                 delta = ap->cur_time - ap->link_time;
4846                 if (delta > ANEG_STATE_SETTLE_TIME) {
4847                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4848                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4849                         } else {
4850                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4851                                     !(ap->flags & MR_NP_RX)) {
4852                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4853                                 } else {
4854                                         ret = ANEG_FAILED;
4855                                 }
4856                         }
4857                 }
4858                 break;
4859
4860         case ANEG_STATE_IDLE_DETECT_INIT:
4861                 ap->link_time = ap->cur_time;
4862                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4863                 tw32_f(MAC_MODE, tp->mac_mode);
4864                 udelay(40);
4865
4866                 ap->state = ANEG_STATE_IDLE_DETECT;
4867                 ret = ANEG_TIMER_ENAB;
4868                 break;
4869
4870         case ANEG_STATE_IDLE_DETECT:
4871                 if (ap->ability_match != 0 &&
4872                     ap->rxconfig == 0) {
4873                         ap->state = ANEG_STATE_AN_ENABLE;
4874                         break;
4875                 }
4876                 delta = ap->cur_time - ap->link_time;
4877                 if (delta > ANEG_STATE_SETTLE_TIME) {
4878                         /* XXX another gem from the Broadcom driver :( */
4879                         ap->state = ANEG_STATE_LINK_OK;
4880                 }
4881                 break;
4882
4883         case ANEG_STATE_LINK_OK:
4884                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4885                 ret = ANEG_DONE;
4886                 break;
4887
4888         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4889                 /* ??? unimplemented */
4890                 break;
4891
4892         case ANEG_STATE_NEXT_PAGE_WAIT:
4893                 /* ??? unimplemented */
4894                 break;
4895
4896         default:
4897                 ret = ANEG_FAILED;
4898                 break;
4899         }
4900
4901         return ret;
4902 }
4903
4904 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4905 {
4906         int res = 0;
4907         struct tg3_fiber_aneginfo aninfo;
4908         int status = ANEG_FAILED;
4909         unsigned int tick;
4910         u32 tmp;
4911
4912         tw32_f(MAC_TX_AUTO_NEG, 0);
4913
4914         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4915         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4916         udelay(40);
4917
4918         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4919         udelay(40);
4920
4921         memset(&aninfo, 0, sizeof(aninfo));
4922         aninfo.flags |= MR_AN_ENABLE;
4923         aninfo.state = ANEG_STATE_UNKNOWN;
4924         aninfo.cur_time = 0;
4925         tick = 0;
4926         while (++tick < 195000) {
4927                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4928                 if (status == ANEG_DONE || status == ANEG_FAILED)
4929                         break;
4930
4931                 udelay(1);
4932         }
4933
4934         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4935         tw32_f(MAC_MODE, tp->mac_mode);
4936         udelay(40);
4937
4938         *txflags = aninfo.txconfig;
4939         *rxflags = aninfo.flags;
4940
4941         if (status == ANEG_DONE &&
4942             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4943                              MR_LP_ADV_FULL_DUPLEX)))
4944                 res = 1;
4945
4946         return res;
4947 }
4948
4949 static void tg3_init_bcm8002(struct tg3 *tp)
4950 {
4951         u32 mac_status = tr32(MAC_STATUS);
4952         int i;
4953
4954         /* Reset when initting first time or we have a link. */
4955         if (tg3_flag(tp, INIT_COMPLETE) &&
4956             !(mac_status & MAC_STATUS_PCS_SYNCED))
4957                 return;
4958
4959         /* Set PLL lock range. */
4960         tg3_writephy(tp, 0x16, 0x8007);
4961
4962         /* SW reset */
4963         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4964
4965         /* Wait for reset to complete. */
4966         /* XXX schedule_timeout() ... */
4967         for (i = 0; i < 500; i++)
4968                 udelay(10);
4969
4970         /* Config mode; select PMA/Ch 1 regs. */
4971         tg3_writephy(tp, 0x10, 0x8411);
4972
4973         /* Enable auto-lock and comdet, select txclk for tx. */
4974         tg3_writephy(tp, 0x11, 0x0a10);
4975
4976         tg3_writephy(tp, 0x18, 0x00a0);
4977         tg3_writephy(tp, 0x16, 0x41ff);
4978
4979         /* Assert and deassert POR. */
4980         tg3_writephy(tp, 0x13, 0x0400);
4981         udelay(40);
4982         tg3_writephy(tp, 0x13, 0x0000);
4983
4984         tg3_writephy(tp, 0x11, 0x0a50);
4985         udelay(40);
4986         tg3_writephy(tp, 0x11, 0x0a10);
4987
4988         /* Wait for signal to stabilize */
4989         /* XXX schedule_timeout() ... */
4990         for (i = 0; i < 15000; i++)
4991                 udelay(10);
4992
4993         /* Deselect the channel register so we can read the PHYID
4994          * later.
4995          */
4996         tg3_writephy(tp, 0x10, 0x8011);
4997 }
4998
4999 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5000 {
5001         u16 flowctrl;
5002         u32 sg_dig_ctrl, sg_dig_status;
5003         u32 serdes_cfg, expected_sg_dig_ctrl;
5004         int workaround, port_a;
5005         int current_link_up;
5006
5007         serdes_cfg = 0;
5008         expected_sg_dig_ctrl = 0;
5009         workaround = 0;
5010         port_a = 1;
5011         current_link_up = 0;
5012
5013         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
5014             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
5015                 workaround = 1;
5016                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5017                         port_a = 0;
5018
5019                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5020                 /* preserve bits 20-23 for voltage regulator */
5021                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5022         }
5023
5024         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5025
5026         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5027                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5028                         if (workaround) {
5029                                 u32 val = serdes_cfg;
5030
5031                                 if (port_a)
5032                                         val |= 0xc010000;
5033                                 else
5034                                         val |= 0x4010000;
5035                                 tw32_f(MAC_SERDES_CFG, val);
5036                         }
5037
5038                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5039                 }
5040                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5041                         tg3_setup_flow_control(tp, 0, 0);
5042                         current_link_up = 1;
5043                 }
5044                 goto out;
5045         }
5046
5047         /* Want auto-negotiation.  */
5048         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5049
5050         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5051         if (flowctrl & ADVERTISE_1000XPAUSE)
5052                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5053         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5054                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5055
5056         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5057                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5058                     tp->serdes_counter &&
5059                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5060                                     MAC_STATUS_RCVD_CFG)) ==
5061                      MAC_STATUS_PCS_SYNCED)) {
5062                         tp->serdes_counter--;
5063                         current_link_up = 1;
5064                         goto out;
5065                 }
5066 restart_autoneg:
5067                 if (workaround)
5068                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5069                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5070                 udelay(5);
5071                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5072
5073                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5074                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5075         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5076                                  MAC_STATUS_SIGNAL_DET)) {
5077                 sg_dig_status = tr32(SG_DIG_STATUS);
5078                 mac_status = tr32(MAC_STATUS);
5079
5080                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5081                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5082                         u32 local_adv = 0, remote_adv = 0;
5083
5084                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5085                                 local_adv |= ADVERTISE_1000XPAUSE;
5086                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5087                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5088
5089                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5090                                 remote_adv |= LPA_1000XPAUSE;
5091                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5092                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5093
5094                         tp->link_config.rmt_adv =
5095                                            mii_adv_to_ethtool_adv_x(remote_adv);
5096
5097                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5098                         current_link_up = 1;
5099                         tp->serdes_counter = 0;
5100                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5101                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5102                         if (tp->serdes_counter)
5103                                 tp->serdes_counter--;
5104                         else {
5105                                 if (workaround) {
5106                                         u32 val = serdes_cfg;
5107
5108                                         if (port_a)
5109                                                 val |= 0xc010000;
5110                                         else
5111                                                 val |= 0x4010000;
5112
5113                                         tw32_f(MAC_SERDES_CFG, val);
5114                                 }
5115
5116                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5117                                 udelay(40);
5118
5119                                 /* Link parallel detection - link is up */
5120                                 /* only if we have PCS_SYNC and not */
5121                                 /* receiving config code words */
5122                                 mac_status = tr32(MAC_STATUS);
5123                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5124                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5125                                         tg3_setup_flow_control(tp, 0, 0);
5126                                         current_link_up = 1;
5127                                         tp->phy_flags |=
5128                                                 TG3_PHYFLG_PARALLEL_DETECT;
5129                                         tp->serdes_counter =
5130                                                 SERDES_PARALLEL_DET_TIMEOUT;
5131                                 } else
5132                                         goto restart_autoneg;
5133                         }
5134                 }
5135         } else {
5136                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5137                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5138         }
5139
5140 out:
5141         return current_link_up;
5142 }
5143
5144 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5145 {
5146         int current_link_up = 0;
5147
5148         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5149                 goto out;
5150
5151         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5152                 u32 txflags, rxflags;
5153                 int i;
5154
5155                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5156                         u32 local_adv = 0, remote_adv = 0;
5157
5158                         if (txflags & ANEG_CFG_PS1)
5159                                 local_adv |= ADVERTISE_1000XPAUSE;
5160                         if (txflags & ANEG_CFG_PS2)
5161                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5162
5163                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5164                                 remote_adv |= LPA_1000XPAUSE;
5165                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5166                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5167
5168                         tp->link_config.rmt_adv =
5169                                            mii_adv_to_ethtool_adv_x(remote_adv);
5170
5171                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5172
5173                         current_link_up = 1;
5174                 }
5175                 for (i = 0; i < 30; i++) {
5176                         udelay(20);
5177                         tw32_f(MAC_STATUS,
5178                                (MAC_STATUS_SYNC_CHANGED |
5179                                 MAC_STATUS_CFG_CHANGED));
5180                         udelay(40);
5181                         if ((tr32(MAC_STATUS) &
5182                              (MAC_STATUS_SYNC_CHANGED |
5183                               MAC_STATUS_CFG_CHANGED)) == 0)
5184                                 break;
5185                 }
5186
5187                 mac_status = tr32(MAC_STATUS);
5188                 if (current_link_up == 0 &&
5189                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5190                     !(mac_status & MAC_STATUS_RCVD_CFG))
5191                         current_link_up = 1;
5192         } else {
5193                 tg3_setup_flow_control(tp, 0, 0);
5194
5195                 /* Forcing 1000FD link up. */
5196                 current_link_up = 1;
5197
5198                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5199                 udelay(40);
5200
5201                 tw32_f(MAC_MODE, tp->mac_mode);
5202                 udelay(40);
5203         }
5204
5205 out:
5206         return current_link_up;
5207 }
5208
5209 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5210 {
5211         u32 orig_pause_cfg;
5212         u16 orig_active_speed;
5213         u8 orig_active_duplex;
5214         u32 mac_status;
5215         int current_link_up;
5216         int i;
5217
5218         orig_pause_cfg = tp->link_config.active_flowctrl;
5219         orig_active_speed = tp->link_config.active_speed;
5220         orig_active_duplex = tp->link_config.active_duplex;
5221
5222         if (!tg3_flag(tp, HW_AUTONEG) &&
5223             tp->link_up &&
5224             tg3_flag(tp, INIT_COMPLETE)) {
5225                 mac_status = tr32(MAC_STATUS);
5226                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5227                                MAC_STATUS_SIGNAL_DET |
5228                                MAC_STATUS_CFG_CHANGED |
5229                                MAC_STATUS_RCVD_CFG);
5230                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5231                                    MAC_STATUS_SIGNAL_DET)) {
5232                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5233                                             MAC_STATUS_CFG_CHANGED));
5234                         return 0;
5235                 }
5236         }
5237
5238         tw32_f(MAC_TX_AUTO_NEG, 0);
5239
5240         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5241         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5242         tw32_f(MAC_MODE, tp->mac_mode);
5243         udelay(40);
5244
5245         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5246                 tg3_init_bcm8002(tp);
5247
5248         /* Enable link change event even when serdes polling.  */
5249         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5250         udelay(40);
5251
5252         current_link_up = 0;
5253         tp->link_config.rmt_adv = 0;
5254         mac_status = tr32(MAC_STATUS);
5255
5256         if (tg3_flag(tp, HW_AUTONEG))
5257                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5258         else
5259                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5260
5261         tp->napi[0].hw_status->status =
5262                 (SD_STATUS_UPDATED |
5263                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5264
5265         for (i = 0; i < 100; i++) {
5266                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5267                                     MAC_STATUS_CFG_CHANGED));
5268                 udelay(5);
5269                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5270                                          MAC_STATUS_CFG_CHANGED |
5271                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5272                         break;
5273         }
5274
5275         mac_status = tr32(MAC_STATUS);
5276         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5277                 current_link_up = 0;
5278                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5279                     tp->serdes_counter == 0) {
5280                         tw32_f(MAC_MODE, (tp->mac_mode |
5281                                           MAC_MODE_SEND_CONFIGS));
5282                         udelay(1);
5283                         tw32_f(MAC_MODE, tp->mac_mode);
5284                 }
5285         }
5286
5287         if (current_link_up == 1) {
5288                 tp->link_config.active_speed = SPEED_1000;
5289                 tp->link_config.active_duplex = DUPLEX_FULL;
5290                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5291                                     LED_CTRL_LNKLED_OVERRIDE |
5292                                     LED_CTRL_1000MBPS_ON));
5293         } else {
5294                 tp->link_config.active_speed = SPEED_UNKNOWN;
5295                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5296                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5297                                     LED_CTRL_LNKLED_OVERRIDE |
5298                                     LED_CTRL_TRAFFIC_OVERRIDE));
5299         }
5300
5301         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5302                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5303                 if (orig_pause_cfg != now_pause_cfg ||
5304                     orig_active_speed != tp->link_config.active_speed ||
5305                     orig_active_duplex != tp->link_config.active_duplex)
5306                         tg3_link_report(tp);
5307         }
5308
5309         return 0;
5310 }
5311
5312 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5313 {
5314         int current_link_up, err = 0;
5315         u32 bmsr, bmcr;
5316         u16 current_speed;
5317         u8 current_duplex;
5318         u32 local_adv, remote_adv;
5319
5320         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5321         tw32_f(MAC_MODE, tp->mac_mode);
5322         udelay(40);
5323
5324         tw32(MAC_EVENT, 0);
5325
5326         tw32_f(MAC_STATUS,
5327              (MAC_STATUS_SYNC_CHANGED |
5328               MAC_STATUS_CFG_CHANGED |
5329               MAC_STATUS_MI_COMPLETION |
5330               MAC_STATUS_LNKSTATE_CHANGED));
5331         udelay(40);
5332
5333         if (force_reset)
5334                 tg3_phy_reset(tp);
5335
5336         current_link_up = 0;
5337         current_speed = SPEED_UNKNOWN;
5338         current_duplex = DUPLEX_UNKNOWN;
5339         tp->link_config.rmt_adv = 0;
5340
5341         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5342         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5343         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5344                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5345                         bmsr |= BMSR_LSTATUS;
5346                 else
5347                         bmsr &= ~BMSR_LSTATUS;
5348         }
5349
5350         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5351
5352         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5353             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5354                 /* do nothing, just check for link up at the end */
5355         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5356                 u32 adv, newadv;
5357
5358                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5359                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5360                                  ADVERTISE_1000XPAUSE |
5361                                  ADVERTISE_1000XPSE_ASYM |
5362                                  ADVERTISE_SLCT);
5363
5364                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5365                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5366
5367                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5368                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5369                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5370                         tg3_writephy(tp, MII_BMCR, bmcr);
5371
5372                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5373                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5374                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5375
5376                         return err;
5377                 }
5378         } else {
5379                 u32 new_bmcr;
5380
5381                 bmcr &= ~BMCR_SPEED1000;
5382                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5383
5384                 if (tp->link_config.duplex == DUPLEX_FULL)
5385                         new_bmcr |= BMCR_FULLDPLX;
5386
5387                 if (new_bmcr != bmcr) {
5388                         /* BMCR_SPEED1000 is a reserved bit that needs
5389                          * to be set on write.
5390                          */
5391                         new_bmcr |= BMCR_SPEED1000;
5392
5393                         /* Force a linkdown */
5394                         if (tp->link_up) {
5395                                 u32 adv;
5396
5397                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5398                                 adv &= ~(ADVERTISE_1000XFULL |
5399                                          ADVERTISE_1000XHALF |
5400                                          ADVERTISE_SLCT);
5401                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5402                                 tg3_writephy(tp, MII_BMCR, bmcr |
5403                                                            BMCR_ANRESTART |
5404                                                            BMCR_ANENABLE);
5405                                 udelay(10);
5406                                 tg3_carrier_off(tp);
5407                         }
5408                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5409                         bmcr = new_bmcr;
5410                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5411                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5412                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5413                             ASIC_REV_5714) {
5414                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5415                                         bmsr |= BMSR_LSTATUS;
5416                                 else
5417                                         bmsr &= ~BMSR_LSTATUS;
5418                         }
5419                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5420                 }
5421         }
5422
5423         if (bmsr & BMSR_LSTATUS) {
5424                 current_speed = SPEED_1000;
5425                 current_link_up = 1;
5426                 if (bmcr & BMCR_FULLDPLX)
5427                         current_duplex = DUPLEX_FULL;
5428                 else
5429                         current_duplex = DUPLEX_HALF;
5430
5431                 local_adv = 0;
5432                 remote_adv = 0;
5433
5434                 if (bmcr & BMCR_ANENABLE) {
5435                         u32 common;
5436
5437                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5438                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5439                         common = local_adv & remote_adv;
5440                         if (common & (ADVERTISE_1000XHALF |
5441                                       ADVERTISE_1000XFULL)) {
5442                                 if (common & ADVERTISE_1000XFULL)
5443                                         current_duplex = DUPLEX_FULL;
5444                                 else
5445                                         current_duplex = DUPLEX_HALF;
5446
5447                                 tp->link_config.rmt_adv =
5448                                            mii_adv_to_ethtool_adv_x(remote_adv);
5449                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5450                                 /* Link is up via parallel detect */
5451                         } else {
5452                                 current_link_up = 0;
5453                         }
5454                 }
5455         }
5456
5457         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5458                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5459
5460         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5461         if (tp->link_config.active_duplex == DUPLEX_HALF)
5462                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5463
5464         tw32_f(MAC_MODE, tp->mac_mode);
5465         udelay(40);
5466
5467         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5468
5469         tp->link_config.active_speed = current_speed;
5470         tp->link_config.active_duplex = current_duplex;
5471
5472         tg3_test_and_report_link_chg(tp, current_link_up);
5473         return err;
5474 }
5475
5476 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5477 {
5478         if (tp->serdes_counter) {
5479                 /* Give autoneg time to complete. */
5480                 tp->serdes_counter--;
5481                 return;
5482         }
5483
5484         if (!tp->link_up &&
5485             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5486                 u32 bmcr;
5487
5488                 tg3_readphy(tp, MII_BMCR, &bmcr);
5489                 if (bmcr & BMCR_ANENABLE) {
5490                         u32 phy1, phy2;
5491
5492                         /* Select shadow register 0x1f */
5493                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5494                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5495
5496                         /* Select expansion interrupt status register */
5497                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5498                                          MII_TG3_DSP_EXP1_INT_STAT);
5499                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5500                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5501
5502                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5503                                 /* We have signal detect and not receiving
5504                                  * config code words, link is up by parallel
5505                                  * detection.
5506                                  */
5507
5508                                 bmcr &= ~BMCR_ANENABLE;
5509                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5510                                 tg3_writephy(tp, MII_BMCR, bmcr);
5511                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5512                         }
5513                 }
5514         } else if (tp->link_up &&
5515                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5516                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5517                 u32 phy2;
5518
5519                 /* Select expansion interrupt status register */
5520                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5521                                  MII_TG3_DSP_EXP1_INT_STAT);
5522                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5523                 if (phy2 & 0x20) {
5524                         u32 bmcr;
5525
5526                         /* Config code words received, turn on autoneg. */
5527                         tg3_readphy(tp, MII_BMCR, &bmcr);
5528                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5529
5530                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5531
5532                 }
5533         }
5534 }
5535
5536 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5537 {
5538         u32 val;
5539         int err;
5540
5541         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5542                 err = tg3_setup_fiber_phy(tp, force_reset);
5543         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5544                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5545         else
5546                 err = tg3_setup_copper_phy(tp, force_reset);
5547
5548         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5549                 u32 scale;
5550
5551                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5552                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5553                         scale = 65;
5554                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5555                         scale = 6;
5556                 else
5557                         scale = 12;
5558
5559                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5560                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5561                 tw32(GRC_MISC_CFG, val);
5562         }
5563
5564         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5565               (6 << TX_LENGTHS_IPG_SHIFT);
5566         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
5567             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
5568                 val |= tr32(MAC_TX_LENGTHS) &
5569                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5570                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5571
5572         if (tp->link_config.active_speed == SPEED_1000 &&
5573             tp->link_config.active_duplex == DUPLEX_HALF)
5574                 tw32(MAC_TX_LENGTHS, val |
5575                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5576         else
5577                 tw32(MAC_TX_LENGTHS, val |
5578                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5579
5580         if (!tg3_flag(tp, 5705_PLUS)) {
5581                 if (tp->link_up) {
5582                         tw32(HOSTCC_STAT_COAL_TICKS,
5583                              tp->coal.stats_block_coalesce_usecs);
5584                 } else {
5585                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5586                 }
5587         }
5588
5589         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5590                 val = tr32(PCIE_PWR_MGMT_THRESH);
5591                 if (!tp->link_up)
5592                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5593                               tp->pwrmgmt_thresh;
5594                 else
5595                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5596                 tw32(PCIE_PWR_MGMT_THRESH, val);
5597         }
5598
5599         return err;
5600 }
5601
5602 /* tp->lock must be held */
5603 static u64 tg3_refclk_read(struct tg3 *tp)
5604 {
5605         u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
5606         return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
5607 }
5608
5609 /* tp->lock must be held */
5610 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
5611 {
5612         tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
5613         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
5614         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
5615         tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
5616 }
5617
5618 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
5619 static inline void tg3_full_unlock(struct tg3 *tp);
5620 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
5621 {
5622         struct tg3 *tp = netdev_priv(dev);
5623
5624         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5625                                 SOF_TIMESTAMPING_RX_SOFTWARE |
5626                                 SOF_TIMESTAMPING_SOFTWARE    |
5627                                 SOF_TIMESTAMPING_TX_HARDWARE |
5628                                 SOF_TIMESTAMPING_RX_HARDWARE |
5629                                 SOF_TIMESTAMPING_RAW_HARDWARE;
5630
5631         if (tp->ptp_clock)
5632                 info->phc_index = ptp_clock_index(tp->ptp_clock);
5633         else
5634                 info->phc_index = -1;
5635
5636         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
5637
5638         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
5639                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
5640                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
5641                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
5642         return 0;
5643 }
5644
5645 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
5646 {
5647         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5648         bool neg_adj = false;
5649         u32 correction = 0;
5650
5651         if (ppb < 0) {
5652                 neg_adj = true;
5653                 ppb = -ppb;
5654         }
5655
5656         /* Frequency adjustment is performed using hardware with a 24 bit
5657          * accumulator and a programmable correction value. On each clk, the
5658          * correction value gets added to the accumulator and when it
5659          * overflows, the time counter is incremented/decremented.
5660          *
5661          * So conversion from ppb to correction value is
5662          *              ppb * (1 << 24) / 1000000000
5663          */
5664         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
5665                      TG3_EAV_REF_CLK_CORRECT_MASK;
5666
5667         tg3_full_lock(tp, 0);
5668
5669         if (correction)
5670                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
5671                      TG3_EAV_REF_CLK_CORRECT_EN |
5672                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
5673         else
5674                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
5675
5676         tg3_full_unlock(tp);
5677
5678         return 0;
5679 }
5680
5681 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
5682 {
5683         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5684
5685         tg3_full_lock(tp, 0);
5686         tp->ptp_adjust += delta;
5687         tg3_full_unlock(tp);
5688
5689         return 0;
5690 }
5691
5692 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
5693 {
5694         u64 ns;
5695         u32 remainder;
5696         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5697
5698         tg3_full_lock(tp, 0);
5699         ns = tg3_refclk_read(tp);
5700         ns += tp->ptp_adjust;
5701         tg3_full_unlock(tp);
5702
5703         ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
5704         ts->tv_nsec = remainder;
5705
5706         return 0;
5707 }
5708
5709 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
5710                            const struct timespec *ts)
5711 {
5712         u64 ns;
5713         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5714
5715         ns = timespec_to_ns(ts);
5716
5717         tg3_full_lock(tp, 0);
5718         tg3_refclk_write(tp, ns);
5719         tp->ptp_adjust = 0;
5720         tg3_full_unlock(tp);
5721
5722         return 0;
5723 }
5724
5725 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
5726                           struct ptp_clock_request *rq, int on)
5727 {
5728         return -EOPNOTSUPP;
5729 }
5730
5731 static const struct ptp_clock_info tg3_ptp_caps = {
5732         .owner          = THIS_MODULE,
5733         .name           = "tg3 clock",
5734         .max_adj        = 250000000,
5735         .n_alarm        = 0,
5736         .n_ext_ts       = 0,
5737         .n_per_out      = 0,
5738         .pps            = 0,
5739         .adjfreq        = tg3_ptp_adjfreq,
5740         .adjtime        = tg3_ptp_adjtime,
5741         .gettime        = tg3_ptp_gettime,
5742         .settime        = tg3_ptp_settime,
5743         .enable         = tg3_ptp_enable,
5744 };
5745
5746 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
5747                                      struct skb_shared_hwtstamps *timestamp)
5748 {
5749         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
5750         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
5751                                            tp->ptp_adjust);
5752 }
5753
5754 /* tp->lock must be held */
5755 static void tg3_ptp_init(struct tg3 *tp)
5756 {
5757         if (!tg3_flag(tp, PTP_CAPABLE))
5758                 return;
5759
5760         /* Initialize the hardware clock to the system time. */
5761         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
5762         tp->ptp_adjust = 0;
5763         tp->ptp_info = tg3_ptp_caps;
5764 }
5765
5766 /* tp->lock must be held */
5767 static void tg3_ptp_resume(struct tg3 *tp)
5768 {
5769         if (!tg3_flag(tp, PTP_CAPABLE))
5770                 return;
5771
5772         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
5773         tp->ptp_adjust = 0;
5774 }
5775
5776 static void tg3_ptp_fini(struct tg3 *tp)
5777 {
5778         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
5779                 return;
5780
5781         ptp_clock_unregister(tp->ptp_clock);
5782         tp->ptp_clock = NULL;
5783         tp->ptp_adjust = 0;
5784 }
5785
5786 static inline int tg3_irq_sync(struct tg3 *tp)
5787 {
5788         return tp->irq_sync;
5789 }
5790
5791 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5792 {
5793         int i;
5794
5795         dst = (u32 *)((u8 *)dst + off);
5796         for (i = 0; i < len; i += sizeof(u32))
5797                 *dst++ = tr32(off + i);
5798 }
5799
5800 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5801 {
5802         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5803         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5804         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5805         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5806         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5807         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5808         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5809         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5810         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5811         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5812         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5813         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5814         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5815         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5816         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5817         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5818         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5819         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5820         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5821
5822         if (tg3_flag(tp, SUPPORT_MSIX))
5823                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5824
5825         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5826         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5827         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5828         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5829         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5830         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5831         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5832         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5833
5834         if (!tg3_flag(tp, 5705_PLUS)) {
5835                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5836                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5837                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5838         }
5839
5840         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5841         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5842         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5843         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5844         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5845
5846         if (tg3_flag(tp, NVRAM))
5847                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5848 }
5849
5850 static void tg3_dump_state(struct tg3 *tp)
5851 {
5852         int i;
5853         u32 *regs;
5854
5855         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5856         if (!regs)
5857                 return;
5858
5859         if (tg3_flag(tp, PCI_EXPRESS)) {
5860                 /* Read up to but not including private PCI registers */
5861                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5862                         regs[i / sizeof(u32)] = tr32(i);
5863         } else
5864                 tg3_dump_legacy_regs(tp, regs);
5865
5866         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5867                 if (!regs[i + 0] && !regs[i + 1] &&
5868                     !regs[i + 2] && !regs[i + 3])
5869                         continue;
5870
5871                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5872                            i * 4,
5873                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5874         }
5875
5876         kfree(regs);
5877
5878         for (i = 0; i < tp->irq_cnt; i++) {
5879                 struct tg3_napi *tnapi = &tp->napi[i];
5880
5881                 /* SW status block */
5882                 netdev_err(tp->dev,
5883                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5884                            i,
5885                            tnapi->hw_status->status,
5886                            tnapi->hw_status->status_tag,
5887                            tnapi->hw_status->rx_jumbo_consumer,
5888                            tnapi->hw_status->rx_consumer,
5889                            tnapi->hw_status->rx_mini_consumer,
5890                            tnapi->hw_status->idx[0].rx_producer,
5891                            tnapi->hw_status->idx[0].tx_consumer);
5892
5893                 netdev_err(tp->dev,
5894                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5895                            i,
5896                            tnapi->last_tag, tnapi->last_irq_tag,
5897                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5898                            tnapi->rx_rcb_ptr,
5899                            tnapi->prodring.rx_std_prod_idx,
5900                            tnapi->prodring.rx_std_cons_idx,
5901                            tnapi->prodring.rx_jmb_prod_idx,
5902                            tnapi->prodring.rx_jmb_cons_idx);
5903         }
5904 }
5905
5906 /* This is called whenever we suspect that the system chipset is re-
5907  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5908  * is bogus tx completions. We try to recover by setting the
5909  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5910  * in the workqueue.
5911  */
5912 static void tg3_tx_recover(struct tg3 *tp)
5913 {
5914         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5915                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5916
5917         netdev_warn(tp->dev,
5918                     "The system may be re-ordering memory-mapped I/O "
5919                     "cycles to the network device, attempting to recover. "
5920                     "Please report the problem to the driver maintainer "
5921                     "and include system chipset information.\n");
5922
5923         spin_lock(&tp->lock);
5924         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5925         spin_unlock(&tp->lock);
5926 }
5927
5928 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5929 {
5930         /* Tell compiler to fetch tx indices from memory. */
5931         barrier();
5932         return tnapi->tx_pending -
5933                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5934 }
5935
5936 /* Tigon3 never reports partial packet sends.  So we do not
5937  * need special logic to handle SKBs that have not had all
5938  * of their frags sent yet, like SunGEM does.
5939  */
5940 static void tg3_tx(struct tg3_napi *tnapi)
5941 {
5942         struct tg3 *tp = tnapi->tp;
5943         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5944         u32 sw_idx = tnapi->tx_cons;
5945         struct netdev_queue *txq;
5946         int index = tnapi - tp->napi;
5947         unsigned int pkts_compl = 0, bytes_compl = 0;
5948
5949         if (tg3_flag(tp, ENABLE_TSS))
5950                 index--;
5951
5952         txq = netdev_get_tx_queue(tp->dev, index);
5953
5954         while (sw_idx != hw_idx) {
5955                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5956                 struct sk_buff *skb = ri->skb;
5957                 int i, tx_bug = 0;
5958
5959                 if (unlikely(skb == NULL)) {
5960                         tg3_tx_recover(tp);
5961                         return;
5962                 }
5963
5964                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
5965                         struct skb_shared_hwtstamps timestamp;
5966                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
5967                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
5968
5969                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
5970
5971                         skb_tstamp_tx(skb, &timestamp);
5972                 }
5973
5974                 pci_unmap_single(tp->pdev,
5975                                  dma_unmap_addr(ri, mapping),
5976                                  skb_headlen(skb),
5977                                  PCI_DMA_TODEVICE);
5978
5979                 ri->skb = NULL;
5980
5981                 while (ri->fragmented) {
5982                         ri->fragmented = false;
5983                         sw_idx = NEXT_TX(sw_idx);
5984                         ri = &tnapi->tx_buffers[sw_idx];
5985                 }
5986
5987                 sw_idx = NEXT_TX(sw_idx);
5988
5989                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5990                         ri = &tnapi->tx_buffers[sw_idx];
5991                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5992                                 tx_bug = 1;
5993
5994                         pci_unmap_page(tp->pdev,
5995                                        dma_unmap_addr(ri, mapping),
5996                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5997                                        PCI_DMA_TODEVICE);
5998
5999                         while (ri->fragmented) {
6000                                 ri->fragmented = false;
6001                                 sw_idx = NEXT_TX(sw_idx);
6002                                 ri = &tnapi->tx_buffers[sw_idx];
6003                         }
6004
6005                         sw_idx = NEXT_TX(sw_idx);
6006                 }
6007
6008                 pkts_compl++;
6009                 bytes_compl += skb->len;
6010
6011                 dev_kfree_skb(skb);
6012
6013                 if (unlikely(tx_bug)) {
6014                         tg3_tx_recover(tp);
6015                         return;
6016                 }
6017         }
6018
6019         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6020
6021         tnapi->tx_cons = sw_idx;
6022
6023         /* Need to make the tx_cons update visible to tg3_start_xmit()
6024          * before checking for netif_queue_stopped().  Without the
6025          * memory barrier, there is a small possibility that tg3_start_xmit()
6026          * will miss it and cause the queue to be stopped forever.
6027          */
6028         smp_mb();
6029
6030         if (unlikely(netif_tx_queue_stopped(txq) &&
6031                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6032                 __netif_tx_lock(txq, smp_processor_id());
6033                 if (netif_tx_queue_stopped(txq) &&
6034                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6035                         netif_tx_wake_queue(txq);
6036                 __netif_tx_unlock(txq);
6037         }
6038 }
6039
6040 static void tg3_frag_free(bool is_frag, void *data)
6041 {
6042         if (is_frag)
6043                 put_page(virt_to_head_page(data));
6044         else
6045                 kfree(data);
6046 }
6047
6048 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6049 {
6050         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6051                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6052
6053         if (!ri->data)
6054                 return;
6055
6056         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6057                          map_sz, PCI_DMA_FROMDEVICE);
6058         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6059         ri->data = NULL;
6060 }
6061
6062
6063 /* Returns size of skb allocated or < 0 on error.
6064  *
6065  * We only need to fill in the address because the other members
6066  * of the RX descriptor are invariant, see tg3_init_rings.
6067  *
6068  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6069  * posting buffers we only dirty the first cache line of the RX
6070  * descriptor (containing the address).  Whereas for the RX status
6071  * buffers the cpu only reads the last cacheline of the RX descriptor
6072  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6073  */
6074 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6075                              u32 opaque_key, u32 dest_idx_unmasked,
6076                              unsigned int *frag_size)
6077 {
6078         struct tg3_rx_buffer_desc *desc;
6079         struct ring_info *map;
6080         u8 *data;
6081         dma_addr_t mapping;
6082         int skb_size, data_size, dest_idx;
6083
6084         switch (opaque_key) {
6085         case RXD_OPAQUE_RING_STD:
6086                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6087                 desc = &tpr->rx_std[dest_idx];
6088                 map = &tpr->rx_std_buffers[dest_idx];
6089                 data_size = tp->rx_pkt_map_sz;
6090                 break;
6091
6092         case RXD_OPAQUE_RING_JUMBO:
6093                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6094                 desc = &tpr->rx_jmb[dest_idx].std;
6095                 map = &tpr->rx_jmb_buffers[dest_idx];
6096                 data_size = TG3_RX_JMB_MAP_SZ;
6097                 break;
6098
6099         default:
6100                 return -EINVAL;
6101         }
6102
6103         /* Do not overwrite any of the map or rp information
6104          * until we are sure we can commit to a new buffer.
6105          *
6106          * Callers depend upon this behavior and assume that
6107          * we leave everything unchanged if we fail.
6108          */
6109         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6110                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6111         if (skb_size <= PAGE_SIZE) {
6112                 data = netdev_alloc_frag(skb_size);
6113                 *frag_size = skb_size;
6114         } else {
6115                 data = kmalloc(skb_size, GFP_ATOMIC);
6116                 *frag_size = 0;
6117         }
6118         if (!data)
6119                 return -ENOMEM;
6120
6121         mapping = pci_map_single(tp->pdev,
6122                                  data + TG3_RX_OFFSET(tp),
6123                                  data_size,
6124                                  PCI_DMA_FROMDEVICE);
6125         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6126                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6127                 return -EIO;
6128         }
6129
6130         map->data = data;
6131         dma_unmap_addr_set(map, mapping, mapping);
6132
6133         desc->addr_hi = ((u64)mapping >> 32);
6134         desc->addr_lo = ((u64)mapping & 0xffffffff);
6135
6136         return data_size;
6137 }
6138
6139 /* We only need to move over in the address because the other
6140  * members of the RX descriptor are invariant.  See notes above
6141  * tg3_alloc_rx_data for full details.
6142  */
6143 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6144                            struct tg3_rx_prodring_set *dpr,
6145                            u32 opaque_key, int src_idx,
6146                            u32 dest_idx_unmasked)
6147 {
6148         struct tg3 *tp = tnapi->tp;
6149         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6150         struct ring_info *src_map, *dest_map;
6151         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6152         int dest_idx;
6153
6154         switch (opaque_key) {
6155         case RXD_OPAQUE_RING_STD:
6156                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6157                 dest_desc = &dpr->rx_std[dest_idx];
6158                 dest_map = &dpr->rx_std_buffers[dest_idx];
6159                 src_desc = &spr->rx_std[src_idx];
6160                 src_map = &spr->rx_std_buffers[src_idx];
6161                 break;
6162
6163         case RXD_OPAQUE_RING_JUMBO:
6164                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6165                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6166                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6167                 src_desc = &spr->rx_jmb[src_idx].std;
6168                 src_map = &spr->rx_jmb_buffers[src_idx];
6169                 break;
6170
6171         default:
6172                 return;
6173         }
6174
6175         dest_map->data = src_map->data;
6176         dma_unmap_addr_set(dest_map, mapping,
6177                            dma_unmap_addr(src_map, mapping));
6178         dest_desc->addr_hi = src_desc->addr_hi;
6179         dest_desc->addr_lo = src_desc->addr_lo;
6180
6181         /* Ensure that the update to the skb happens after the physical
6182          * addresses have been transferred to the new BD location.
6183          */
6184         smp_wmb();
6185
6186         src_map->data = NULL;
6187 }
6188
6189 /* The RX ring scheme is composed of multiple rings which post fresh
6190  * buffers to the chip, and one special ring the chip uses to report
6191  * status back to the host.
6192  *
6193  * The special ring reports the status of received packets to the
6194  * host.  The chip does not write into the original descriptor the
6195  * RX buffer was obtained from.  The chip simply takes the original
6196  * descriptor as provided by the host, updates the status and length
6197  * field, then writes this into the next status ring entry.
6198  *
6199  * Each ring the host uses to post buffers to the chip is described
6200  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6201  * it is first placed into the on-chip ram.  When the packet's length
6202  * is known, it walks down the TG3_BDINFO entries to select the ring.
6203  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6204  * which is within the range of the new packet's length is chosen.
6205  *
6206  * The "separate ring for rx status" scheme may sound queer, but it makes
6207  * sense from a cache coherency perspective.  If only the host writes
6208  * to the buffer post rings, and only the chip writes to the rx status
6209  * rings, then cache lines never move beyond shared-modified state.
6210  * If both the host and chip were to write into the same ring, cache line
6211  * eviction could occur since both entities want it in an exclusive state.
6212  */
6213 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6214 {
6215         struct tg3 *tp = tnapi->tp;
6216         u32 work_mask, rx_std_posted = 0;
6217         u32 std_prod_idx, jmb_prod_idx;
6218         u32 sw_idx = tnapi->rx_rcb_ptr;
6219         u16 hw_idx;
6220         int received;
6221         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6222
6223         hw_idx = *(tnapi->rx_rcb_prod_idx);
6224         /*
6225          * We need to order the read of hw_idx and the read of
6226          * the opaque cookie.
6227          */
6228         rmb();
6229         work_mask = 0;
6230         received = 0;
6231         std_prod_idx = tpr->rx_std_prod_idx;
6232         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6233         while (sw_idx != hw_idx && budget > 0) {
6234                 struct ring_info *ri;
6235                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6236                 unsigned int len;
6237                 struct sk_buff *skb;
6238                 dma_addr_t dma_addr;
6239                 u32 opaque_key, desc_idx, *post_ptr;
6240                 u8 *data;
6241                 u64 tstamp = 0;
6242
6243                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6244                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6245                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6246                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6247                         dma_addr = dma_unmap_addr(ri, mapping);
6248                         data = ri->data;
6249                         post_ptr = &std_prod_idx;
6250                         rx_std_posted++;
6251                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6252                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6253                         dma_addr = dma_unmap_addr(ri, mapping);
6254                         data = ri->data;
6255                         post_ptr = &jmb_prod_idx;
6256                 } else
6257                         goto next_pkt_nopost;
6258
6259                 work_mask |= opaque_key;
6260
6261                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6262                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6263                 drop_it:
6264                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6265                                        desc_idx, *post_ptr);
6266                 drop_it_no_recycle:
6267                         /* Other statistics kept track of by card. */
6268                         tp->rx_dropped++;
6269                         goto next_pkt;
6270                 }
6271
6272                 prefetch(data + TG3_RX_OFFSET(tp));
6273                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6274                       ETH_FCS_LEN;
6275
6276                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6277                      RXD_FLAG_PTPSTAT_PTPV1 ||
6278                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6279                      RXD_FLAG_PTPSTAT_PTPV2) {
6280                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6281                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6282                 }
6283
6284                 if (len > TG3_RX_COPY_THRESH(tp)) {
6285                         int skb_size;
6286                         unsigned int frag_size;
6287
6288                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6289                                                     *post_ptr, &frag_size);
6290                         if (skb_size < 0)
6291                                 goto drop_it;
6292
6293                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6294                                          PCI_DMA_FROMDEVICE);
6295
6296                         skb = build_skb(data, frag_size);
6297                         if (!skb) {
6298                                 tg3_frag_free(frag_size != 0, data);
6299                                 goto drop_it_no_recycle;
6300                         }
6301                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6302                         /* Ensure that the update to the data happens
6303                          * after the usage of the old DMA mapping.
6304                          */
6305                         smp_wmb();
6306
6307                         ri->data = NULL;
6308
6309                 } else {
6310                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6311                                        desc_idx, *post_ptr);
6312
6313                         skb = netdev_alloc_skb(tp->dev,
6314                                                len + TG3_RAW_IP_ALIGN);
6315                         if (skb == NULL)
6316                                 goto drop_it_no_recycle;
6317
6318                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6319                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6320                         memcpy(skb->data,
6321                                data + TG3_RX_OFFSET(tp),
6322                                len);
6323                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6324                 }
6325
6326                 skb_put(skb, len);
6327                 if (tstamp)
6328                         tg3_hwclock_to_timestamp(tp, tstamp,
6329                                                  skb_hwtstamps(skb));
6330
6331                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6332                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6333                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6334                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6335                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6336                 else
6337                         skb_checksum_none_assert(skb);
6338
6339                 skb->protocol = eth_type_trans(skb, tp->dev);
6340
6341                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6342                     skb->protocol != htons(ETH_P_8021Q)) {
6343                         dev_kfree_skb(skb);
6344                         goto drop_it_no_recycle;
6345                 }
6346
6347                 if (desc->type_flags & RXD_FLAG_VLAN &&
6348                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6349                         __vlan_hwaccel_put_tag(skb,
6350                                                desc->err_vlan & RXD_VLAN_MASK);
6351
6352                 napi_gro_receive(&tnapi->napi, skb);
6353
6354                 received++;
6355                 budget--;
6356
6357 next_pkt:
6358                 (*post_ptr)++;
6359
6360                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6361                         tpr->rx_std_prod_idx = std_prod_idx &
6362                                                tp->rx_std_ring_mask;
6363                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6364                                      tpr->rx_std_prod_idx);
6365                         work_mask &= ~RXD_OPAQUE_RING_STD;
6366                         rx_std_posted = 0;
6367                 }
6368 next_pkt_nopost:
6369                 sw_idx++;
6370                 sw_idx &= tp->rx_ret_ring_mask;
6371
6372                 /* Refresh hw_idx to see if there is new work */
6373                 if (sw_idx == hw_idx) {
6374                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6375                         rmb();
6376                 }
6377         }
6378
6379         /* ACK the status ring. */
6380         tnapi->rx_rcb_ptr = sw_idx;
6381         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6382
6383         /* Refill RX ring(s). */
6384         if (!tg3_flag(tp, ENABLE_RSS)) {
6385                 /* Sync BD data before updating mailbox */
6386                 wmb();
6387
6388                 if (work_mask & RXD_OPAQUE_RING_STD) {
6389                         tpr->rx_std_prod_idx = std_prod_idx &
6390                                                tp->rx_std_ring_mask;
6391                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6392                                      tpr->rx_std_prod_idx);
6393                 }
6394                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6395                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6396                                                tp->rx_jmb_ring_mask;
6397                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6398                                      tpr->rx_jmb_prod_idx);
6399                 }
6400                 mmiowb();
6401         } else if (work_mask) {
6402                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6403                  * updated before the producer indices can be updated.
6404                  */
6405                 smp_wmb();
6406
6407                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6408                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6409
6410                 if (tnapi != &tp->napi[1]) {
6411                         tp->rx_refill = true;
6412                         napi_schedule(&tp->napi[1].napi);
6413                 }
6414         }
6415
6416         return received;
6417 }
6418
6419 static void tg3_poll_link(struct tg3 *tp)
6420 {
6421         /* handle link change and other phy events */
6422         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6423                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6424
6425                 if (sblk->status & SD_STATUS_LINK_CHG) {
6426                         sblk->status = SD_STATUS_UPDATED |
6427                                        (sblk->status & ~SD_STATUS_LINK_CHG);
6428                         spin_lock(&tp->lock);
6429                         if (tg3_flag(tp, USE_PHYLIB)) {
6430                                 tw32_f(MAC_STATUS,
6431                                      (MAC_STATUS_SYNC_CHANGED |
6432                                       MAC_STATUS_CFG_CHANGED |
6433                                       MAC_STATUS_MI_COMPLETION |
6434                                       MAC_STATUS_LNKSTATE_CHANGED));
6435                                 udelay(40);
6436                         } else
6437                                 tg3_setup_phy(tp, 0);
6438                         spin_unlock(&tp->lock);
6439                 }
6440         }
6441 }
6442
6443 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6444                                 struct tg3_rx_prodring_set *dpr,
6445                                 struct tg3_rx_prodring_set *spr)
6446 {
6447         u32 si, di, cpycnt, src_prod_idx;
6448         int i, err = 0;
6449
6450         while (1) {
6451                 src_prod_idx = spr->rx_std_prod_idx;
6452
6453                 /* Make sure updates to the rx_std_buffers[] entries and the
6454                  * standard producer index are seen in the correct order.
6455                  */
6456                 smp_rmb();
6457
6458                 if (spr->rx_std_cons_idx == src_prod_idx)
6459                         break;
6460
6461                 if (spr->rx_std_cons_idx < src_prod_idx)
6462                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6463                 else
6464                         cpycnt = tp->rx_std_ring_mask + 1 -
6465                                  spr->rx_std_cons_idx;
6466
6467                 cpycnt = min(cpycnt,
6468                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6469
6470                 si = spr->rx_std_cons_idx;
6471                 di = dpr->rx_std_prod_idx;
6472
6473                 for (i = di; i < di + cpycnt; i++) {
6474                         if (dpr->rx_std_buffers[i].data) {
6475                                 cpycnt = i - di;
6476                                 err = -ENOSPC;
6477                                 break;
6478                         }
6479                 }
6480
6481                 if (!cpycnt)
6482                         break;
6483
6484                 /* Ensure that updates to the rx_std_buffers ring and the
6485                  * shadowed hardware producer ring from tg3_recycle_skb() are
6486                  * ordered correctly WRT the skb check above.
6487                  */
6488                 smp_rmb();
6489
6490                 memcpy(&dpr->rx_std_buffers[di],
6491                        &spr->rx_std_buffers[si],
6492                        cpycnt * sizeof(struct ring_info));
6493
6494                 for (i = 0; i < cpycnt; i++, di++, si++) {
6495                         struct tg3_rx_buffer_desc *sbd, *dbd;
6496                         sbd = &spr->rx_std[si];
6497                         dbd = &dpr->rx_std[di];
6498                         dbd->addr_hi = sbd->addr_hi;
6499                         dbd->addr_lo = sbd->addr_lo;
6500                 }
6501
6502                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6503                                        tp->rx_std_ring_mask;
6504                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6505                                        tp->rx_std_ring_mask;
6506         }
6507
6508         while (1) {
6509                 src_prod_idx = spr->rx_jmb_prod_idx;
6510
6511                 /* Make sure updates to the rx_jmb_buffers[] entries and
6512                  * the jumbo producer index are seen in the correct order.
6513                  */
6514                 smp_rmb();
6515
6516                 if (spr->rx_jmb_cons_idx == src_prod_idx)
6517                         break;
6518
6519                 if (spr->rx_jmb_cons_idx < src_prod_idx)
6520                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6521                 else
6522                         cpycnt = tp->rx_jmb_ring_mask + 1 -
6523                                  spr->rx_jmb_cons_idx;
6524
6525                 cpycnt = min(cpycnt,
6526                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6527
6528                 si = spr->rx_jmb_cons_idx;
6529                 di = dpr->rx_jmb_prod_idx;
6530
6531                 for (i = di; i < di + cpycnt; i++) {
6532                         if (dpr->rx_jmb_buffers[i].data) {
6533                                 cpycnt = i - di;
6534                                 err = -ENOSPC;
6535                                 break;
6536                         }
6537                 }
6538
6539                 if (!cpycnt)
6540                         break;
6541
6542                 /* Ensure that updates to the rx_jmb_buffers ring and the
6543                  * shadowed hardware producer ring from tg3_recycle_skb() are
6544                  * ordered correctly WRT the skb check above.
6545                  */
6546                 smp_rmb();
6547
6548                 memcpy(&dpr->rx_jmb_buffers[di],
6549                        &spr->rx_jmb_buffers[si],
6550                        cpycnt * sizeof(struct ring_info));
6551
6552                 for (i = 0; i < cpycnt; i++, di++, si++) {
6553                         struct tg3_rx_buffer_desc *sbd, *dbd;
6554                         sbd = &spr->rx_jmb[si].std;
6555                         dbd = &dpr->rx_jmb[di].std;
6556                         dbd->addr_hi = sbd->addr_hi;
6557                         dbd->addr_lo = sbd->addr_lo;
6558                 }
6559
6560                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6561                                        tp->rx_jmb_ring_mask;
6562                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6563                                        tp->rx_jmb_ring_mask;
6564         }
6565
6566         return err;
6567 }
6568
6569 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6570 {
6571         struct tg3 *tp = tnapi->tp;
6572
6573         /* run TX completion thread */
6574         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6575                 tg3_tx(tnapi);
6576                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6577                         return work_done;
6578         }
6579
6580         if (!tnapi->rx_rcb_prod_idx)
6581                 return work_done;
6582
6583         /* run RX thread, within the bounds set by NAPI.
6584          * All RX "locking" is done by ensuring outside
6585          * code synchronizes with tg3->napi.poll()
6586          */
6587         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6588                 work_done += tg3_rx(tnapi, budget - work_done);
6589
6590         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6591                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6592                 int i, err = 0;
6593                 u32 std_prod_idx = dpr->rx_std_prod_idx;
6594                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6595
6596                 tp->rx_refill = false;
6597                 for (i = 1; i <= tp->rxq_cnt; i++)
6598                         err |= tg3_rx_prodring_xfer(tp, dpr,
6599                                                     &tp->napi[i].prodring);
6600
6601                 wmb();
6602
6603                 if (std_prod_idx != dpr->rx_std_prod_idx)
6604                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6605                                      dpr->rx_std_prod_idx);
6606
6607                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6608                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6609                                      dpr->rx_jmb_prod_idx);
6610
6611                 mmiowb();
6612
6613                 if (err)
6614                         tw32_f(HOSTCC_MODE, tp->coal_now);
6615         }
6616
6617         return work_done;
6618 }
6619
6620 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6621 {
6622         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6623                 schedule_work(&tp->reset_task);
6624 }
6625
6626 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6627 {
6628         cancel_work_sync(&tp->reset_task);
6629         tg3_flag_clear(tp, RESET_TASK_PENDING);
6630         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6631 }
6632
6633 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6634 {
6635         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6636         struct tg3 *tp = tnapi->tp;
6637         int work_done = 0;
6638         struct tg3_hw_status *sblk = tnapi->hw_status;
6639
6640         while (1) {
6641                 work_done = tg3_poll_work(tnapi, work_done, budget);
6642
6643                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6644                         goto tx_recovery;
6645
6646                 if (unlikely(work_done >= budget))
6647                         break;
6648
6649                 /* tp->last_tag is used in tg3_int_reenable() below
6650                  * to tell the hw how much work has been processed,
6651                  * so we must read it before checking for more work.
6652                  */
6653                 tnapi->last_tag = sblk->status_tag;
6654                 tnapi->last_irq_tag = tnapi->last_tag;
6655                 rmb();
6656
6657                 /* check for RX/TX work to do */
6658                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6659                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6660
6661                         /* This test here is not race free, but will reduce
6662                          * the number of interrupts by looping again.
6663                          */
6664                         if (tnapi == &tp->napi[1] && tp->rx_refill)
6665                                 continue;
6666
6667                         napi_complete(napi);
6668                         /* Reenable interrupts. */
6669                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6670
6671                         /* This test here is synchronized by napi_schedule()
6672                          * and napi_complete() to close the race condition.
6673                          */
6674                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6675                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
6676                                                   HOSTCC_MODE_ENABLE |
6677                                                   tnapi->coal_now);
6678                         }
6679                         mmiowb();
6680                         break;
6681                 }
6682         }
6683
6684         return work_done;
6685
6686 tx_recovery:
6687         /* work_done is guaranteed to be less than budget. */
6688         napi_complete(napi);
6689         tg3_reset_task_schedule(tp);
6690         return work_done;
6691 }
6692
6693 static void tg3_process_error(struct tg3 *tp)
6694 {
6695         u32 val;
6696         bool real_error = false;
6697
6698         if (tg3_flag(tp, ERROR_PROCESSED))
6699                 return;
6700
6701         /* Check Flow Attention register */
6702         val = tr32(HOSTCC_FLOW_ATTN);
6703         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6704                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6705                 real_error = true;
6706         }
6707
6708         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6709                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6710                 real_error = true;
6711         }
6712
6713         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6714                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6715                 real_error = true;
6716         }
6717
6718         if (!real_error)
6719                 return;
6720
6721         tg3_dump_state(tp);
6722
6723         tg3_flag_set(tp, ERROR_PROCESSED);
6724         tg3_reset_task_schedule(tp);
6725 }
6726
6727 static int tg3_poll(struct napi_struct *napi, int budget)
6728 {
6729         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6730         struct tg3 *tp = tnapi->tp;
6731         int work_done = 0;
6732         struct tg3_hw_status *sblk = tnapi->hw_status;
6733
6734         while (1) {
6735                 if (sblk->status & SD_STATUS_ERROR)
6736                         tg3_process_error(tp);
6737
6738                 tg3_poll_link(tp);
6739
6740                 work_done = tg3_poll_work(tnapi, work_done, budget);
6741
6742                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6743                         goto tx_recovery;
6744
6745                 if (unlikely(work_done >= budget))
6746                         break;
6747
6748                 if (tg3_flag(tp, TAGGED_STATUS)) {
6749                         /* tp->last_tag is used in tg3_int_reenable() below
6750                          * to tell the hw how much work has been processed,
6751                          * so we must read it before checking for more work.
6752                          */
6753                         tnapi->last_tag = sblk->status_tag;
6754                         tnapi->last_irq_tag = tnapi->last_tag;
6755                         rmb();
6756                 } else
6757                         sblk->status &= ~SD_STATUS_UPDATED;
6758
6759                 if (likely(!tg3_has_work(tnapi))) {
6760                         napi_complete(napi);
6761                         tg3_int_reenable(tnapi);
6762                         break;
6763                 }
6764         }
6765
6766         return work_done;
6767
6768 tx_recovery:
6769         /* work_done is guaranteed to be less than budget. */
6770         napi_complete(napi);
6771         tg3_reset_task_schedule(tp);
6772         return work_done;
6773 }
6774
6775 static void tg3_napi_disable(struct tg3 *tp)
6776 {
6777         int i;
6778
6779         for (i = tp->irq_cnt - 1; i >= 0; i--)
6780                 napi_disable(&tp->napi[i].napi);
6781 }
6782
6783 static void tg3_napi_enable(struct tg3 *tp)
6784 {
6785         int i;
6786
6787         for (i = 0; i < tp->irq_cnt; i++)
6788                 napi_enable(&tp->napi[i].napi);
6789 }
6790
6791 static void tg3_napi_init(struct tg3 *tp)
6792 {
6793         int i;
6794
6795         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6796         for (i = 1; i < tp->irq_cnt; i++)
6797                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6798 }
6799
6800 static void tg3_napi_fini(struct tg3 *tp)
6801 {
6802         int i;
6803
6804         for (i = 0; i < tp->irq_cnt; i++)
6805                 netif_napi_del(&tp->napi[i].napi);
6806 }
6807
6808 static inline void tg3_netif_stop(struct tg3 *tp)
6809 {
6810         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6811         tg3_napi_disable(tp);
6812         netif_carrier_off(tp->dev);
6813         netif_tx_disable(tp->dev);
6814 }
6815
6816 /* tp->lock must be held */
6817 static inline void tg3_netif_start(struct tg3 *tp)
6818 {
6819         tg3_ptp_resume(tp);
6820
6821         /* NOTE: unconditional netif_tx_wake_all_queues is only
6822          * appropriate so long as all callers are assured to
6823          * have free tx slots (such as after tg3_init_hw)
6824          */
6825         netif_tx_wake_all_queues(tp->dev);
6826
6827         if (tp->link_up)
6828                 netif_carrier_on(tp->dev);
6829
6830         tg3_napi_enable(tp);
6831         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6832         tg3_enable_ints(tp);
6833 }
6834
6835 static void tg3_irq_quiesce(struct tg3 *tp)
6836 {
6837         int i;
6838
6839         BUG_ON(tp->irq_sync);
6840
6841         tp->irq_sync = 1;
6842         smp_mb();
6843
6844         for (i = 0; i < tp->irq_cnt; i++)
6845                 synchronize_irq(tp->napi[i].irq_vec);
6846 }
6847
6848 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6849  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6850  * with as well.  Most of the time, this is not necessary except when
6851  * shutting down the device.
6852  */
6853 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6854 {
6855         spin_lock_bh(&tp->lock);
6856         if (irq_sync)
6857                 tg3_irq_quiesce(tp);
6858 }
6859
6860 static inline void tg3_full_unlock(struct tg3 *tp)
6861 {
6862         spin_unlock_bh(&tp->lock);
6863 }
6864
6865 /* One-shot MSI handler - Chip automatically disables interrupt
6866  * after sending MSI so driver doesn't have to do it.
6867  */
6868 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6869 {
6870         struct tg3_napi *tnapi = dev_id;
6871         struct tg3 *tp = tnapi->tp;
6872
6873         prefetch(tnapi->hw_status);
6874         if (tnapi->rx_rcb)
6875                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6876
6877         if (likely(!tg3_irq_sync(tp)))
6878                 napi_schedule(&tnapi->napi);
6879
6880         return IRQ_HANDLED;
6881 }
6882
6883 /* MSI ISR - No need to check for interrupt sharing and no need to
6884  * flush status block and interrupt mailbox. PCI ordering rules
6885  * guarantee that MSI will arrive after the status block.
6886  */
6887 static irqreturn_t tg3_msi(int irq, void *dev_id)
6888 {
6889         struct tg3_napi *tnapi = dev_id;
6890         struct tg3 *tp = tnapi->tp;
6891
6892         prefetch(tnapi->hw_status);
6893         if (tnapi->rx_rcb)
6894                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6895         /*
6896          * Writing any value to intr-mbox-0 clears PCI INTA# and
6897          * chip-internal interrupt pending events.
6898          * Writing non-zero to intr-mbox-0 additional tells the
6899          * NIC to stop sending us irqs, engaging "in-intr-handler"
6900          * event coalescing.
6901          */
6902         tw32_mailbox(tnapi->int_mbox, 0x00000001);
6903         if (likely(!tg3_irq_sync(tp)))
6904                 napi_schedule(&tnapi->napi);
6905
6906         return IRQ_RETVAL(1);
6907 }
6908
6909 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6910 {
6911         struct tg3_napi *tnapi = dev_id;
6912         struct tg3 *tp = tnapi->tp;
6913         struct tg3_hw_status *sblk = tnapi->hw_status;
6914         unsigned int handled = 1;
6915
6916         /* In INTx mode, it is possible for the interrupt to arrive at
6917          * the CPU before the status block posted prior to the interrupt.
6918          * Reading the PCI State register will confirm whether the
6919          * interrupt is ours and will flush the status block.
6920          */
6921         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6922                 if (tg3_flag(tp, CHIP_RESETTING) ||
6923                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6924                         handled = 0;
6925                         goto out;
6926                 }
6927         }
6928
6929         /*
6930          * Writing any value to intr-mbox-0 clears PCI INTA# and
6931          * chip-internal interrupt pending events.
6932          * Writing non-zero to intr-mbox-0 additional tells the
6933          * NIC to stop sending us irqs, engaging "in-intr-handler"
6934          * event coalescing.
6935          *
6936          * Flush the mailbox to de-assert the IRQ immediately to prevent
6937          * spurious interrupts.  The flush impacts performance but
6938          * excessive spurious interrupts can be worse in some cases.
6939          */
6940         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6941         if (tg3_irq_sync(tp))
6942                 goto out;
6943         sblk->status &= ~SD_STATUS_UPDATED;
6944         if (likely(tg3_has_work(tnapi))) {
6945                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6946                 napi_schedule(&tnapi->napi);
6947         } else {
6948                 /* No work, shared interrupt perhaps?  re-enable
6949                  * interrupts, and flush that PCI write
6950                  */
6951                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6952                                0x00000000);
6953         }
6954 out:
6955         return IRQ_RETVAL(handled);
6956 }
6957
6958 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6959 {
6960         struct tg3_napi *tnapi = dev_id;
6961         struct tg3 *tp = tnapi->tp;
6962         struct tg3_hw_status *sblk = tnapi->hw_status;
6963         unsigned int handled = 1;
6964
6965         /* In INTx mode, it is possible for the interrupt to arrive at
6966          * the CPU before the status block posted prior to the interrupt.
6967          * Reading the PCI State register will confirm whether the
6968          * interrupt is ours and will flush the status block.
6969          */
6970         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6971                 if (tg3_flag(tp, CHIP_RESETTING) ||
6972                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6973                         handled = 0;
6974                         goto out;
6975                 }
6976         }
6977
6978         /*
6979          * writing any value to intr-mbox-0 clears PCI INTA# and
6980          * chip-internal interrupt pending events.
6981          * writing non-zero to intr-mbox-0 additional tells the
6982          * NIC to stop sending us irqs, engaging "in-intr-handler"
6983          * event coalescing.
6984          *
6985          * Flush the mailbox to de-assert the IRQ immediately to prevent
6986          * spurious interrupts.  The flush impacts performance but
6987          * excessive spurious interrupts can be worse in some cases.
6988          */
6989         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6990
6991         /*
6992          * In a shared interrupt configuration, sometimes other devices'
6993          * interrupts will scream.  We record the current status tag here
6994          * so that the above check can report that the screaming interrupts
6995          * are unhandled.  Eventually they will be silenced.
6996          */
6997         tnapi->last_irq_tag = sblk->status_tag;
6998
6999         if (tg3_irq_sync(tp))
7000                 goto out;
7001
7002         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7003
7004         napi_schedule(&tnapi->napi);
7005
7006 out:
7007         return IRQ_RETVAL(handled);
7008 }
7009
7010 /* ISR for interrupt test */
7011 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7012 {
7013         struct tg3_napi *tnapi = dev_id;
7014         struct tg3 *tp = tnapi->tp;
7015         struct tg3_hw_status *sblk = tnapi->hw_status;
7016
7017         if ((sblk->status & SD_STATUS_UPDATED) ||
7018             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7019                 tg3_disable_ints(tp);
7020                 return IRQ_RETVAL(1);
7021         }
7022         return IRQ_RETVAL(0);
7023 }
7024
7025 #ifdef CONFIG_NET_POLL_CONTROLLER
7026 static void tg3_poll_controller(struct net_device *dev)
7027 {
7028         int i;
7029         struct tg3 *tp = netdev_priv(dev);
7030
7031         if (tg3_irq_sync(tp))
7032                 return;
7033
7034         for (i = 0; i < tp->irq_cnt; i++)
7035                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7036 }
7037 #endif
7038
7039 static void tg3_tx_timeout(struct net_device *dev)
7040 {
7041         struct tg3 *tp = netdev_priv(dev);
7042
7043         if (netif_msg_tx_err(tp)) {
7044                 netdev_err(dev, "transmit timed out, resetting\n");
7045                 tg3_dump_state(tp);
7046         }
7047
7048         tg3_reset_task_schedule(tp);
7049 }
7050
7051 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7052 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7053 {
7054         u32 base = (u32) mapping & 0xffffffff;
7055
7056         return (base > 0xffffdcc0) && (base + len + 8 < base);
7057 }
7058
7059 /* Test for DMA addresses > 40-bit */
7060 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7061                                           int len)
7062 {
7063 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7064         if (tg3_flag(tp, 40BIT_DMA_BUG))
7065                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7066         return 0;
7067 #else
7068         return 0;
7069 #endif
7070 }
7071
7072 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7073                                  dma_addr_t mapping, u32 len, u32 flags,
7074                                  u32 mss, u32 vlan)
7075 {
7076         txbd->addr_hi = ((u64) mapping >> 32);
7077         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7078         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7079         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7080 }
7081
7082 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7083                             dma_addr_t map, u32 len, u32 flags,
7084                             u32 mss, u32 vlan)
7085 {
7086         struct tg3 *tp = tnapi->tp;
7087         bool hwbug = false;
7088
7089         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7090                 hwbug = true;
7091
7092         if (tg3_4g_overflow_test(map, len))
7093                 hwbug = true;
7094
7095         if (tg3_40bit_overflow_test(tp, map, len))
7096                 hwbug = true;
7097
7098         if (tp->dma_limit) {
7099                 u32 prvidx = *entry;
7100                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7101                 while (len > tp->dma_limit && *budget) {
7102                         u32 frag_len = tp->dma_limit;
7103                         len -= tp->dma_limit;
7104
7105                         /* Avoid the 8byte DMA problem */
7106                         if (len <= 8) {
7107                                 len += tp->dma_limit / 2;
7108                                 frag_len = tp->dma_limit / 2;
7109                         }
7110
7111                         tnapi->tx_buffers[*entry].fragmented = true;
7112
7113                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7114                                       frag_len, tmp_flag, mss, vlan);
7115                         *budget -= 1;
7116                         prvidx = *entry;
7117                         *entry = NEXT_TX(*entry);
7118
7119                         map += frag_len;
7120                 }
7121
7122                 if (len) {
7123                         if (*budget) {
7124                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7125                                               len, flags, mss, vlan);
7126                                 *budget -= 1;
7127                                 *entry = NEXT_TX(*entry);
7128                         } else {
7129                                 hwbug = true;
7130                                 tnapi->tx_buffers[prvidx].fragmented = false;
7131                         }
7132                 }
7133         } else {
7134                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7135                               len, flags, mss, vlan);
7136                 *entry = NEXT_TX(*entry);
7137         }
7138
7139         return hwbug;
7140 }
7141
7142 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7143 {
7144         int i;
7145         struct sk_buff *skb;
7146         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7147
7148         skb = txb->skb;
7149         txb->skb = NULL;
7150
7151         pci_unmap_single(tnapi->tp->pdev,
7152                          dma_unmap_addr(txb, mapping),
7153                          skb_headlen(skb),
7154                          PCI_DMA_TODEVICE);
7155
7156         while (txb->fragmented) {
7157                 txb->fragmented = false;
7158                 entry = NEXT_TX(entry);
7159                 txb = &tnapi->tx_buffers[entry];
7160         }
7161
7162         for (i = 0; i <= last; i++) {
7163                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7164
7165                 entry = NEXT_TX(entry);
7166                 txb = &tnapi->tx_buffers[entry];
7167
7168                 pci_unmap_page(tnapi->tp->pdev,
7169                                dma_unmap_addr(txb, mapping),
7170                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7171
7172                 while (txb->fragmented) {
7173                         txb->fragmented = false;
7174                         entry = NEXT_TX(entry);
7175                         txb = &tnapi->tx_buffers[entry];
7176                 }
7177         }
7178 }
7179
7180 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7181 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7182                                        struct sk_buff **pskb,
7183                                        u32 *entry, u32 *budget,
7184                                        u32 base_flags, u32 mss, u32 vlan)
7185 {
7186         struct tg3 *tp = tnapi->tp;
7187         struct sk_buff *new_skb, *skb = *pskb;
7188         dma_addr_t new_addr = 0;
7189         int ret = 0;
7190
7191         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
7192                 new_skb = skb_copy(skb, GFP_ATOMIC);
7193         else {
7194                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7195
7196                 new_skb = skb_copy_expand(skb,
7197                                           skb_headroom(skb) + more_headroom,
7198                                           skb_tailroom(skb), GFP_ATOMIC);
7199         }
7200
7201         if (!new_skb) {
7202                 ret = -1;
7203         } else {
7204                 /* New SKB is guaranteed to be linear. */
7205                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7206                                           PCI_DMA_TODEVICE);
7207                 /* Make sure the mapping succeeded */
7208                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7209                         dev_kfree_skb(new_skb);
7210                         ret = -1;
7211                 } else {
7212                         u32 save_entry = *entry;
7213
7214                         base_flags |= TXD_FLAG_END;
7215
7216                         tnapi->tx_buffers[*entry].skb = new_skb;
7217                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7218                                            mapping, new_addr);
7219
7220                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7221                                             new_skb->len, base_flags,
7222                                             mss, vlan)) {
7223                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7224                                 dev_kfree_skb(new_skb);
7225                                 ret = -1;
7226                         }
7227                 }
7228         }
7229
7230         dev_kfree_skb(skb);
7231         *pskb = new_skb;
7232         return ret;
7233 }
7234
7235 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7236
7237 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7238  * TSO header is greater than 80 bytes.
7239  */
7240 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7241 {
7242         struct sk_buff *segs, *nskb;
7243         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7244
7245         /* Estimate the number of fragments in the worst case */
7246         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7247                 netif_stop_queue(tp->dev);
7248
7249                 /* netif_tx_stop_queue() must be done before checking
7250                  * checking tx index in tg3_tx_avail() below, because in
7251                  * tg3_tx(), we update tx index before checking for
7252                  * netif_tx_queue_stopped().
7253                  */
7254                 smp_mb();
7255                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7256                         return NETDEV_TX_BUSY;
7257
7258                 netif_wake_queue(tp->dev);
7259         }
7260
7261         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7262         if (IS_ERR(segs))
7263                 goto tg3_tso_bug_end;
7264
7265         do {
7266                 nskb = segs;
7267                 segs = segs->next;
7268                 nskb->next = NULL;
7269                 tg3_start_xmit(nskb, tp->dev);
7270         } while (segs);
7271
7272 tg3_tso_bug_end:
7273         dev_kfree_skb(skb);
7274
7275         return NETDEV_TX_OK;
7276 }
7277
7278 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7279  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7280  */
7281 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7282 {
7283         struct tg3 *tp = netdev_priv(dev);
7284         u32 len, entry, base_flags, mss, vlan = 0;
7285         u32 budget;
7286         int i = -1, would_hit_hwbug;
7287         dma_addr_t mapping;
7288         struct tg3_napi *tnapi;
7289         struct netdev_queue *txq;
7290         unsigned int last;
7291
7292         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7293         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7294         if (tg3_flag(tp, ENABLE_TSS))
7295                 tnapi++;
7296
7297         budget = tg3_tx_avail(tnapi);
7298
7299         /* We are running in BH disabled context with netif_tx_lock
7300          * and TX reclaim runs via tp->napi.poll inside of a software
7301          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7302          * no IRQ context deadlocks to worry about either.  Rejoice!
7303          */
7304         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7305                 if (!netif_tx_queue_stopped(txq)) {
7306                         netif_tx_stop_queue(txq);
7307
7308                         /* This is a hard error, log it. */
7309                         netdev_err(dev,
7310                                    "BUG! Tx Ring full when queue awake!\n");
7311                 }
7312                 return NETDEV_TX_BUSY;
7313         }
7314
7315         entry = tnapi->tx_prod;
7316         base_flags = 0;
7317         if (skb->ip_summed == CHECKSUM_PARTIAL)
7318                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7319
7320         mss = skb_shinfo(skb)->gso_size;
7321         if (mss) {
7322                 struct iphdr *iph;
7323                 u32 tcp_opt_len, hdr_len;
7324
7325                 if (skb_header_cloned(skb) &&
7326                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7327                         goto drop;
7328
7329                 iph = ip_hdr(skb);
7330                 tcp_opt_len = tcp_optlen(skb);
7331
7332                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7333
7334                 if (!skb_is_gso_v6(skb)) {
7335                         iph->check = 0;
7336                         iph->tot_len = htons(mss + hdr_len);
7337                 }
7338
7339                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7340                     tg3_flag(tp, TSO_BUG))
7341                         return tg3_tso_bug(tp, skb);
7342
7343                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7344                                TXD_FLAG_CPU_POST_DMA);
7345
7346                 if (tg3_flag(tp, HW_TSO_1) ||
7347                     tg3_flag(tp, HW_TSO_2) ||
7348                     tg3_flag(tp, HW_TSO_3)) {
7349                         tcp_hdr(skb)->check = 0;
7350                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7351                 } else
7352                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7353                                                                  iph->daddr, 0,
7354                                                                  IPPROTO_TCP,
7355                                                                  0);
7356
7357                 if (tg3_flag(tp, HW_TSO_3)) {
7358                         mss |= (hdr_len & 0xc) << 12;
7359                         if (hdr_len & 0x10)
7360                                 base_flags |= 0x00000010;
7361                         base_flags |= (hdr_len & 0x3e0) << 5;
7362                 } else if (tg3_flag(tp, HW_TSO_2))
7363                         mss |= hdr_len << 9;
7364                 else if (tg3_flag(tp, HW_TSO_1) ||
7365                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7366                         if (tcp_opt_len || iph->ihl > 5) {
7367                                 int tsflags;
7368
7369                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7370                                 mss |= (tsflags << 11);
7371                         }
7372                 } else {
7373                         if (tcp_opt_len || iph->ihl > 5) {
7374                                 int tsflags;
7375
7376                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7377                                 base_flags |= tsflags << 12;
7378                         }
7379                 }
7380         }
7381
7382         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7383             !mss && skb->len > VLAN_ETH_FRAME_LEN)
7384                 base_flags |= TXD_FLAG_JMB_PKT;
7385
7386         if (vlan_tx_tag_present(skb)) {
7387                 base_flags |= TXD_FLAG_VLAN;
7388                 vlan = vlan_tx_tag_get(skb);
7389         }
7390
7391         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7392             tg3_flag(tp, TX_TSTAMP_EN)) {
7393                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7394                 base_flags |= TXD_FLAG_HWTSTAMP;
7395         }
7396
7397         len = skb_headlen(skb);
7398
7399         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7400         if (pci_dma_mapping_error(tp->pdev, mapping))
7401                 goto drop;
7402
7403
7404         tnapi->tx_buffers[entry].skb = skb;
7405         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7406
7407         would_hit_hwbug = 0;
7408
7409         if (tg3_flag(tp, 5701_DMA_BUG))
7410                 would_hit_hwbug = 1;
7411
7412         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7413                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7414                             mss, vlan)) {
7415                 would_hit_hwbug = 1;
7416         } else if (skb_shinfo(skb)->nr_frags > 0) {
7417                 u32 tmp_mss = mss;
7418
7419                 if (!tg3_flag(tp, HW_TSO_1) &&
7420                     !tg3_flag(tp, HW_TSO_2) &&
7421                     !tg3_flag(tp, HW_TSO_3))
7422                         tmp_mss = 0;
7423
7424                 /* Now loop through additional data
7425                  * fragments, and queue them.
7426                  */
7427                 last = skb_shinfo(skb)->nr_frags - 1;
7428                 for (i = 0; i <= last; i++) {
7429                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7430
7431                         len = skb_frag_size(frag);
7432                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7433                                                    len, DMA_TO_DEVICE);
7434
7435                         tnapi->tx_buffers[entry].skb = NULL;
7436                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7437                                            mapping);
7438                         if (dma_mapping_error(&tp->pdev->dev, mapping))
7439                                 goto dma_error;
7440
7441                         if (!budget ||
7442                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7443                                             len, base_flags |
7444                                             ((i == last) ? TXD_FLAG_END : 0),
7445                                             tmp_mss, vlan)) {
7446                                 would_hit_hwbug = 1;
7447                                 break;
7448                         }
7449                 }
7450         }
7451
7452         if (would_hit_hwbug) {
7453                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7454
7455                 /* If the workaround fails due to memory/mapping
7456                  * failure, silently drop this packet.
7457                  */
7458                 entry = tnapi->tx_prod;
7459                 budget = tg3_tx_avail(tnapi);
7460                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7461                                                 base_flags, mss, vlan))
7462                         goto drop_nofree;
7463         }
7464
7465         skb_tx_timestamp(skb);
7466         netdev_tx_sent_queue(txq, skb->len);
7467
7468         /* Sync BD data before updating mailbox */
7469         wmb();
7470
7471         /* Packets are ready, update Tx producer idx local and on card. */
7472         tw32_tx_mbox(tnapi->prodmbox, entry);
7473
7474         tnapi->tx_prod = entry;
7475         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7476                 netif_tx_stop_queue(txq);
7477
7478                 /* netif_tx_stop_queue() must be done before checking
7479                  * checking tx index in tg3_tx_avail() below, because in
7480                  * tg3_tx(), we update tx index before checking for
7481                  * netif_tx_queue_stopped().
7482                  */
7483                 smp_mb();
7484                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7485                         netif_tx_wake_queue(txq);
7486         }
7487
7488         mmiowb();
7489         return NETDEV_TX_OK;
7490
7491 dma_error:
7492         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7493         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7494 drop:
7495         dev_kfree_skb(skb);
7496 drop_nofree:
7497         tp->tx_dropped++;
7498         return NETDEV_TX_OK;
7499 }
7500
7501 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7502 {
7503         if (enable) {
7504                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7505                                   MAC_MODE_PORT_MODE_MASK);
7506
7507                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7508
7509                 if (!tg3_flag(tp, 5705_PLUS))
7510                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7511
7512                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7513                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7514                 else
7515                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7516         } else {
7517                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7518
7519                 if (tg3_flag(tp, 5705_PLUS) ||
7520                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7521                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7522                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7523         }
7524
7525         tw32(MAC_MODE, tp->mac_mode);
7526         udelay(40);
7527 }
7528
7529 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7530 {
7531         u32 val, bmcr, mac_mode, ptest = 0;
7532
7533         tg3_phy_toggle_apd(tp, false);
7534         tg3_phy_toggle_automdix(tp, 0);
7535
7536         if (extlpbk && tg3_phy_set_extloopbk(tp))
7537                 return -EIO;
7538
7539         bmcr = BMCR_FULLDPLX;
7540         switch (speed) {
7541         case SPEED_10:
7542                 break;
7543         case SPEED_100:
7544                 bmcr |= BMCR_SPEED100;
7545                 break;
7546         case SPEED_1000:
7547         default:
7548                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7549                         speed = SPEED_100;
7550                         bmcr |= BMCR_SPEED100;
7551                 } else {
7552                         speed = SPEED_1000;
7553                         bmcr |= BMCR_SPEED1000;
7554                 }
7555         }
7556
7557         if (extlpbk) {
7558                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7559                         tg3_readphy(tp, MII_CTRL1000, &val);
7560                         val |= CTL1000_AS_MASTER |
7561                                CTL1000_ENABLE_MASTER;
7562                         tg3_writephy(tp, MII_CTRL1000, val);
7563                 } else {
7564                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7565                                 MII_TG3_FET_PTEST_TRIM_2;
7566                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7567                 }
7568         } else
7569                 bmcr |= BMCR_LOOPBACK;
7570
7571         tg3_writephy(tp, MII_BMCR, bmcr);
7572
7573         /* The write needs to be flushed for the FETs */
7574         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7575                 tg3_readphy(tp, MII_BMCR, &bmcr);
7576
7577         udelay(40);
7578
7579         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7580             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7581                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7582                              MII_TG3_FET_PTEST_FRC_TX_LINK |
7583                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
7584
7585                 /* The write needs to be flushed for the AC131 */
7586                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7587         }
7588
7589         /* Reset to prevent losing 1st rx packet intermittently */
7590         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7591             tg3_flag(tp, 5780_CLASS)) {
7592                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7593                 udelay(10);
7594                 tw32_f(MAC_RX_MODE, tp->rx_mode);
7595         }
7596
7597         mac_mode = tp->mac_mode &
7598                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7599         if (speed == SPEED_1000)
7600                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7601         else
7602                 mac_mode |= MAC_MODE_PORT_MODE_MII;
7603
7604         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7605                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7606
7607                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7608                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
7609                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7610                         mac_mode |= MAC_MODE_LINK_POLARITY;
7611
7612                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7613                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7614         }
7615
7616         tw32(MAC_MODE, mac_mode);
7617         udelay(40);
7618
7619         return 0;
7620 }
7621
7622 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7623 {
7624         struct tg3 *tp = netdev_priv(dev);
7625
7626         if (features & NETIF_F_LOOPBACK) {
7627                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7628                         return;
7629
7630                 spin_lock_bh(&tp->lock);
7631                 tg3_mac_loopback(tp, true);
7632                 netif_carrier_on(tp->dev);
7633                 spin_unlock_bh(&tp->lock);
7634                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7635         } else {
7636                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7637                         return;
7638
7639                 spin_lock_bh(&tp->lock);
7640                 tg3_mac_loopback(tp, false);
7641                 /* Force link status check */
7642                 tg3_setup_phy(tp, 1);
7643                 spin_unlock_bh(&tp->lock);
7644                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7645         }
7646 }
7647
7648 static netdev_features_t tg3_fix_features(struct net_device *dev,
7649         netdev_features_t features)
7650 {
7651         struct tg3 *tp = netdev_priv(dev);
7652
7653         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7654                 features &= ~NETIF_F_ALL_TSO;
7655
7656         return features;
7657 }
7658
7659 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7660 {
7661         netdev_features_t changed = dev->features ^ features;
7662
7663         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7664                 tg3_set_loopback(dev, features);
7665
7666         return 0;
7667 }
7668
7669 static void tg3_rx_prodring_free(struct tg3 *tp,
7670                                  struct tg3_rx_prodring_set *tpr)
7671 {
7672         int i;
7673
7674         if (tpr != &tp->napi[0].prodring) {
7675                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7676                      i = (i + 1) & tp->rx_std_ring_mask)
7677                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7678                                         tp->rx_pkt_map_sz);
7679
7680                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7681                         for (i = tpr->rx_jmb_cons_idx;
7682                              i != tpr->rx_jmb_prod_idx;
7683                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7684                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7685                                                 TG3_RX_JMB_MAP_SZ);
7686                         }
7687                 }
7688
7689                 return;
7690         }
7691
7692         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7693                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7694                                 tp->rx_pkt_map_sz);
7695
7696         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7697                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7698                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7699                                         TG3_RX_JMB_MAP_SZ);
7700         }
7701 }
7702
7703 /* Initialize rx rings for packet processing.
7704  *
7705  * The chip has been shut down and the driver detached from
7706  * the networking, so no interrupts or new tx packets will
7707  * end up in the driver.  tp->{tx,}lock are held and thus
7708  * we may not sleep.
7709  */
7710 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7711                                  struct tg3_rx_prodring_set *tpr)
7712 {
7713         u32 i, rx_pkt_dma_sz;
7714
7715         tpr->rx_std_cons_idx = 0;
7716         tpr->rx_std_prod_idx = 0;
7717         tpr->rx_jmb_cons_idx = 0;
7718         tpr->rx_jmb_prod_idx = 0;
7719
7720         if (tpr != &tp->napi[0].prodring) {
7721                 memset(&tpr->rx_std_buffers[0], 0,
7722                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7723                 if (tpr->rx_jmb_buffers)
7724                         memset(&tpr->rx_jmb_buffers[0], 0,
7725                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7726                 goto done;
7727         }
7728
7729         /* Zero out all descriptors. */
7730         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7731
7732         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7733         if (tg3_flag(tp, 5780_CLASS) &&
7734             tp->dev->mtu > ETH_DATA_LEN)
7735                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7736         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7737
7738         /* Initialize invariants of the rings, we only set this
7739          * stuff once.  This works because the card does not
7740          * write into the rx buffer posting rings.
7741          */
7742         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7743                 struct tg3_rx_buffer_desc *rxd;
7744
7745                 rxd = &tpr->rx_std[i];
7746                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7747                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7748                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7749                                (i << RXD_OPAQUE_INDEX_SHIFT));
7750         }
7751
7752         /* Now allocate fresh SKBs for each rx ring. */
7753         for (i = 0; i < tp->rx_pending; i++) {
7754                 unsigned int frag_size;
7755
7756                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7757                                       &frag_size) < 0) {
7758                         netdev_warn(tp->dev,
7759                                     "Using a smaller RX standard ring. Only "
7760                                     "%d out of %d buffers were allocated "
7761                                     "successfully\n", i, tp->rx_pending);
7762                         if (i == 0)
7763                                 goto initfail;
7764                         tp->rx_pending = i;
7765                         break;
7766                 }
7767         }
7768
7769         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7770                 goto done;
7771
7772         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7773
7774         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7775                 goto done;
7776
7777         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7778                 struct tg3_rx_buffer_desc *rxd;
7779
7780                 rxd = &tpr->rx_jmb[i].std;
7781                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7782                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7783                                   RXD_FLAG_JUMBO;
7784                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7785                        (i << RXD_OPAQUE_INDEX_SHIFT));
7786         }
7787
7788         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7789                 unsigned int frag_size;
7790
7791                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7792                                       &frag_size) < 0) {
7793                         netdev_warn(tp->dev,
7794                                     "Using a smaller RX jumbo ring. Only %d "
7795                                     "out of %d buffers were allocated "
7796                                     "successfully\n", i, tp->rx_jumbo_pending);
7797                         if (i == 0)
7798                                 goto initfail;
7799                         tp->rx_jumbo_pending = i;
7800                         break;
7801                 }
7802         }
7803
7804 done:
7805         return 0;
7806
7807 initfail:
7808         tg3_rx_prodring_free(tp, tpr);
7809         return -ENOMEM;
7810 }
7811
7812 static void tg3_rx_prodring_fini(struct tg3 *tp,
7813                                  struct tg3_rx_prodring_set *tpr)
7814 {
7815         kfree(tpr->rx_std_buffers);
7816         tpr->rx_std_buffers = NULL;
7817         kfree(tpr->rx_jmb_buffers);
7818         tpr->rx_jmb_buffers = NULL;
7819         if (tpr->rx_std) {
7820                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7821                                   tpr->rx_std, tpr->rx_std_mapping);
7822                 tpr->rx_std = NULL;
7823         }
7824         if (tpr->rx_jmb) {
7825                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7826                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7827                 tpr->rx_jmb = NULL;
7828         }
7829 }
7830
7831 static int tg3_rx_prodring_init(struct tg3 *tp,
7832                                 struct tg3_rx_prodring_set *tpr)
7833 {
7834         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7835                                       GFP_KERNEL);
7836         if (!tpr->rx_std_buffers)
7837                 return -ENOMEM;
7838
7839         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7840                                          TG3_RX_STD_RING_BYTES(tp),
7841                                          &tpr->rx_std_mapping,
7842                                          GFP_KERNEL);
7843         if (!tpr->rx_std)
7844                 goto err_out;
7845
7846         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7847                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7848                                               GFP_KERNEL);
7849                 if (!tpr->rx_jmb_buffers)
7850                         goto err_out;
7851
7852                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7853                                                  TG3_RX_JMB_RING_BYTES(tp),
7854                                                  &tpr->rx_jmb_mapping,
7855                                                  GFP_KERNEL);
7856                 if (!tpr->rx_jmb)
7857                         goto err_out;
7858         }
7859
7860         return 0;
7861
7862 err_out:
7863         tg3_rx_prodring_fini(tp, tpr);
7864         return -ENOMEM;
7865 }
7866
7867 /* Free up pending packets in all rx/tx rings.
7868  *
7869  * The chip has been shut down and the driver detached from
7870  * the networking, so no interrupts or new tx packets will
7871  * end up in the driver.  tp->{tx,}lock is not held and we are not
7872  * in an interrupt context and thus may sleep.
7873  */
7874 static void tg3_free_rings(struct tg3 *tp)
7875 {
7876         int i, j;
7877
7878         for (j = 0; j < tp->irq_cnt; j++) {
7879                 struct tg3_napi *tnapi = &tp->napi[j];
7880
7881                 tg3_rx_prodring_free(tp, &tnapi->prodring);
7882
7883                 if (!tnapi->tx_buffers)
7884                         continue;
7885
7886                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7887                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7888
7889                         if (!skb)
7890                                 continue;
7891
7892                         tg3_tx_skb_unmap(tnapi, i,
7893                                          skb_shinfo(skb)->nr_frags - 1);
7894
7895                         dev_kfree_skb_any(skb);
7896                 }
7897                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7898         }
7899 }
7900
7901 /* Initialize tx/rx rings for packet processing.
7902  *
7903  * The chip has been shut down and the driver detached from
7904  * the networking, so no interrupts or new tx packets will
7905  * end up in the driver.  tp->{tx,}lock are held and thus
7906  * we may not sleep.
7907  */
7908 static int tg3_init_rings(struct tg3 *tp)
7909 {
7910         int i;
7911
7912         /* Free up all the SKBs. */
7913         tg3_free_rings(tp);
7914
7915         for (i = 0; i < tp->irq_cnt; i++) {
7916                 struct tg3_napi *tnapi = &tp->napi[i];
7917
7918                 tnapi->last_tag = 0;
7919                 tnapi->last_irq_tag = 0;
7920                 tnapi->hw_status->status = 0;
7921                 tnapi->hw_status->status_tag = 0;
7922                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7923
7924                 tnapi->tx_prod = 0;
7925                 tnapi->tx_cons = 0;
7926                 if (tnapi->tx_ring)
7927                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7928
7929                 tnapi->rx_rcb_ptr = 0;
7930                 if (tnapi->rx_rcb)
7931                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7932
7933                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7934                         tg3_free_rings(tp);
7935                         return -ENOMEM;
7936                 }
7937         }
7938
7939         return 0;
7940 }
7941
7942 static void tg3_mem_tx_release(struct tg3 *tp)
7943 {
7944         int i;
7945
7946         for (i = 0; i < tp->irq_max; i++) {
7947                 struct tg3_napi *tnapi = &tp->napi[i];
7948
7949                 if (tnapi->tx_ring) {
7950                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7951                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
7952                         tnapi->tx_ring = NULL;
7953                 }
7954
7955                 kfree(tnapi->tx_buffers);
7956                 tnapi->tx_buffers = NULL;
7957         }
7958 }
7959
7960 static int tg3_mem_tx_acquire(struct tg3 *tp)
7961 {
7962         int i;
7963         struct tg3_napi *tnapi = &tp->napi[0];
7964
7965         /* If multivector TSS is enabled, vector 0 does not handle
7966          * tx interrupts.  Don't allocate any resources for it.
7967          */
7968         if (tg3_flag(tp, ENABLE_TSS))
7969                 tnapi++;
7970
7971         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
7972                 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
7973                                             TG3_TX_RING_SIZE, GFP_KERNEL);
7974                 if (!tnapi->tx_buffers)
7975                         goto err_out;
7976
7977                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7978                                                     TG3_TX_RING_BYTES,
7979                                                     &tnapi->tx_desc_mapping,
7980                                                     GFP_KERNEL);
7981                 if (!tnapi->tx_ring)
7982                         goto err_out;
7983         }
7984
7985         return 0;
7986
7987 err_out:
7988         tg3_mem_tx_release(tp);
7989         return -ENOMEM;
7990 }
7991
7992 static void tg3_mem_rx_release(struct tg3 *tp)
7993 {
7994         int i;
7995
7996         for (i = 0; i < tp->irq_max; i++) {
7997                 struct tg3_napi *tnapi = &tp->napi[i];
7998
7999                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8000
8001                 if (!tnapi->rx_rcb)
8002                         continue;
8003
8004                 dma_free_coherent(&tp->pdev->dev,
8005                                   TG3_RX_RCB_RING_BYTES(tp),
8006                                   tnapi->rx_rcb,
8007                                   tnapi->rx_rcb_mapping);
8008                 tnapi->rx_rcb = NULL;
8009         }
8010 }
8011
8012 static int tg3_mem_rx_acquire(struct tg3 *tp)
8013 {
8014         unsigned int i, limit;
8015
8016         limit = tp->rxq_cnt;
8017
8018         /* If RSS is enabled, we need a (dummy) producer ring
8019          * set on vector zero.  This is the true hw prodring.
8020          */
8021         if (tg3_flag(tp, ENABLE_RSS))
8022                 limit++;
8023
8024         for (i = 0; i < limit; i++) {
8025                 struct tg3_napi *tnapi = &tp->napi[i];
8026
8027                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8028                         goto err_out;
8029
8030                 /* If multivector RSS is enabled, vector 0
8031                  * does not handle rx or tx interrupts.
8032                  * Don't allocate any resources for it.
8033                  */
8034                 if (!i && tg3_flag(tp, ENABLE_RSS))
8035                         continue;
8036
8037                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8038                                                    TG3_RX_RCB_RING_BYTES(tp),
8039                                                    &tnapi->rx_rcb_mapping,
8040                                                    GFP_KERNEL);
8041                 if (!tnapi->rx_rcb)
8042                         goto err_out;
8043
8044                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8045         }
8046
8047         return 0;
8048
8049 err_out:
8050         tg3_mem_rx_release(tp);
8051         return -ENOMEM;
8052 }
8053
8054 /*
8055  * Must not be invoked with interrupt sources disabled and
8056  * the hardware shutdown down.
8057  */
8058 static void tg3_free_consistent(struct tg3 *tp)
8059 {
8060         int i;
8061
8062         for (i = 0; i < tp->irq_cnt; i++) {
8063                 struct tg3_napi *tnapi = &tp->napi[i];
8064
8065                 if (tnapi->hw_status) {
8066                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8067                                           tnapi->hw_status,
8068                                           tnapi->status_mapping);
8069                         tnapi->hw_status = NULL;
8070                 }
8071         }
8072
8073         tg3_mem_rx_release(tp);
8074         tg3_mem_tx_release(tp);
8075
8076         if (tp->hw_stats) {
8077                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8078                                   tp->hw_stats, tp->stats_mapping);
8079                 tp->hw_stats = NULL;
8080         }
8081 }
8082
8083 /*
8084  * Must not be invoked with interrupt sources disabled and
8085  * the hardware shutdown down.  Can sleep.
8086  */
8087 static int tg3_alloc_consistent(struct tg3 *tp)
8088 {
8089         int i;
8090
8091         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8092                                           sizeof(struct tg3_hw_stats),
8093                                           &tp->stats_mapping,
8094                                           GFP_KERNEL);
8095         if (!tp->hw_stats)
8096                 goto err_out;
8097
8098         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8099
8100         for (i = 0; i < tp->irq_cnt; i++) {
8101                 struct tg3_napi *tnapi = &tp->napi[i];
8102                 struct tg3_hw_status *sblk;
8103
8104                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8105                                                       TG3_HW_STATUS_SIZE,
8106                                                       &tnapi->status_mapping,
8107                                                       GFP_KERNEL);
8108                 if (!tnapi->hw_status)
8109                         goto err_out;
8110
8111                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8112                 sblk = tnapi->hw_status;
8113
8114                 if (tg3_flag(tp, ENABLE_RSS)) {
8115                         u16 *prodptr = NULL;
8116
8117                         /*
8118                          * When RSS is enabled, the status block format changes
8119                          * slightly.  The "rx_jumbo_consumer", "reserved",
8120                          * and "rx_mini_consumer" members get mapped to the
8121                          * other three rx return ring producer indexes.
8122                          */
8123                         switch (i) {
8124                         case 1:
8125                                 prodptr = &sblk->idx[0].rx_producer;
8126                                 break;
8127                         case 2:
8128                                 prodptr = &sblk->rx_jumbo_consumer;
8129                                 break;
8130                         case 3:
8131                                 prodptr = &sblk->reserved;
8132                                 break;
8133                         case 4:
8134                                 prodptr = &sblk->rx_mini_consumer;
8135                                 break;
8136                         }
8137                         tnapi->rx_rcb_prod_idx = prodptr;
8138                 } else {
8139                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8140                 }
8141         }
8142
8143         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8144                 goto err_out;
8145
8146         return 0;
8147
8148 err_out:
8149         tg3_free_consistent(tp);
8150         return -ENOMEM;
8151 }
8152
8153 #define MAX_WAIT_CNT 1000
8154
8155 /* To stop a block, clear the enable bit and poll till it
8156  * clears.  tp->lock is held.
8157  */
8158 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
8159 {
8160         unsigned int i;
8161         u32 val;
8162
8163         if (tg3_flag(tp, 5705_PLUS)) {
8164                 switch (ofs) {
8165                 case RCVLSC_MODE:
8166                 case DMAC_MODE:
8167                 case MBFREE_MODE:
8168                 case BUFMGR_MODE:
8169                 case MEMARB_MODE:
8170                         /* We can't enable/disable these bits of the
8171                          * 5705/5750, just say success.
8172                          */
8173                         return 0;
8174
8175                 default:
8176                         break;
8177                 }
8178         }
8179
8180         val = tr32(ofs);
8181         val &= ~enable_bit;
8182         tw32_f(ofs, val);
8183
8184         for (i = 0; i < MAX_WAIT_CNT; i++) {
8185                 udelay(100);
8186                 val = tr32(ofs);
8187                 if ((val & enable_bit) == 0)
8188                         break;
8189         }
8190
8191         if (i == MAX_WAIT_CNT && !silent) {
8192                 dev_err(&tp->pdev->dev,
8193                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8194                         ofs, enable_bit);
8195                 return -ENODEV;
8196         }
8197
8198         return 0;
8199 }
8200
8201 /* tp->lock is held. */
8202 static int tg3_abort_hw(struct tg3 *tp, int silent)
8203 {
8204         int i, err;
8205
8206         tg3_disable_ints(tp);
8207
8208         tp->rx_mode &= ~RX_MODE_ENABLE;
8209         tw32_f(MAC_RX_MODE, tp->rx_mode);
8210         udelay(10);
8211
8212         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8213         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8214         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8215         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8216         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8217         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8218
8219         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8220         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8221         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8222         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8223         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8224         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8225         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8226
8227         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8228         tw32_f(MAC_MODE, tp->mac_mode);
8229         udelay(40);
8230
8231         tp->tx_mode &= ~TX_MODE_ENABLE;
8232         tw32_f(MAC_TX_MODE, tp->tx_mode);
8233
8234         for (i = 0; i < MAX_WAIT_CNT; i++) {
8235                 udelay(100);
8236                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8237                         break;
8238         }
8239         if (i >= MAX_WAIT_CNT) {
8240                 dev_err(&tp->pdev->dev,
8241                         "%s timed out, TX_MODE_ENABLE will not clear "
8242                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8243                 err |= -ENODEV;
8244         }
8245
8246         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8247         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8248         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8249
8250         tw32(FTQ_RESET, 0xffffffff);
8251         tw32(FTQ_RESET, 0x00000000);
8252
8253         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8254         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8255
8256         for (i = 0; i < tp->irq_cnt; i++) {
8257                 struct tg3_napi *tnapi = &tp->napi[i];
8258                 if (tnapi->hw_status)
8259                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8260         }
8261
8262         return err;
8263 }
8264
8265 /* Save PCI command register before chip reset */
8266 static void tg3_save_pci_state(struct tg3 *tp)
8267 {
8268         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8269 }
8270
8271 /* Restore PCI state after chip reset */
8272 static void tg3_restore_pci_state(struct tg3 *tp)
8273 {
8274         u32 val;
8275
8276         /* Re-enable indirect register accesses. */
8277         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8278                                tp->misc_host_ctrl);
8279
8280         /* Set MAX PCI retry to zero. */
8281         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8282         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8283             tg3_flag(tp, PCIX_MODE))
8284                 val |= PCISTATE_RETRY_SAME_DMA;
8285         /* Allow reads and writes to the APE register and memory space. */
8286         if (tg3_flag(tp, ENABLE_APE))
8287                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8288                        PCISTATE_ALLOW_APE_SHMEM_WR |
8289                        PCISTATE_ALLOW_APE_PSPACE_WR;
8290         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8291
8292         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8293
8294         if (!tg3_flag(tp, PCI_EXPRESS)) {
8295                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8296                                       tp->pci_cacheline_sz);
8297                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8298                                       tp->pci_lat_timer);
8299         }
8300
8301         /* Make sure PCI-X relaxed ordering bit is clear. */
8302         if (tg3_flag(tp, PCIX_MODE)) {
8303                 u16 pcix_cmd;
8304
8305                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8306                                      &pcix_cmd);
8307                 pcix_cmd &= ~PCI_X_CMD_ERO;
8308                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8309                                       pcix_cmd);
8310         }
8311
8312         if (tg3_flag(tp, 5780_CLASS)) {
8313
8314                 /* Chip reset on 5780 will reset MSI enable bit,
8315                  * so need to restore it.
8316                  */
8317                 if (tg3_flag(tp, USING_MSI)) {
8318                         u16 ctrl;
8319
8320                         pci_read_config_word(tp->pdev,
8321                                              tp->msi_cap + PCI_MSI_FLAGS,
8322                                              &ctrl);
8323                         pci_write_config_word(tp->pdev,
8324                                               tp->msi_cap + PCI_MSI_FLAGS,
8325                                               ctrl | PCI_MSI_FLAGS_ENABLE);
8326                         val = tr32(MSGINT_MODE);
8327                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8328                 }
8329         }
8330 }
8331
8332 /* tp->lock is held. */
8333 static int tg3_chip_reset(struct tg3 *tp)
8334 {
8335         u32 val;
8336         void (*write_op)(struct tg3 *, u32, u32);
8337         int i, err;
8338
8339         tg3_nvram_lock(tp);
8340
8341         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8342
8343         /* No matching tg3_nvram_unlock() after this because
8344          * chip reset below will undo the nvram lock.
8345          */
8346         tp->nvram_lock_cnt = 0;
8347
8348         /* GRC_MISC_CFG core clock reset will clear the memory
8349          * enable bit in PCI register 4 and the MSI enable bit
8350          * on some chips, so we save relevant registers here.
8351          */
8352         tg3_save_pci_state(tp);
8353
8354         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8355             tg3_flag(tp, 5755_PLUS))
8356                 tw32(GRC_FASTBOOT_PC, 0);
8357
8358         /*
8359          * We must avoid the readl() that normally takes place.
8360          * It locks machines, causes machine checks, and other
8361          * fun things.  So, temporarily disable the 5701
8362          * hardware workaround, while we do the reset.
8363          */
8364         write_op = tp->write32;
8365         if (write_op == tg3_write_flush_reg32)
8366                 tp->write32 = tg3_write32;
8367
8368         /* Prevent the irq handler from reading or writing PCI registers
8369          * during chip reset when the memory enable bit in the PCI command
8370          * register may be cleared.  The chip does not generate interrupt
8371          * at this time, but the irq handler may still be called due to irq
8372          * sharing or irqpoll.
8373          */
8374         tg3_flag_set(tp, CHIP_RESETTING);
8375         for (i = 0; i < tp->irq_cnt; i++) {
8376                 struct tg3_napi *tnapi = &tp->napi[i];
8377                 if (tnapi->hw_status) {
8378                         tnapi->hw_status->status = 0;
8379                         tnapi->hw_status->status_tag = 0;
8380                 }
8381                 tnapi->last_tag = 0;
8382                 tnapi->last_irq_tag = 0;
8383         }
8384         smp_mb();
8385
8386         for (i = 0; i < tp->irq_cnt; i++)
8387                 synchronize_irq(tp->napi[i].irq_vec);
8388
8389         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8390                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8391                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8392         }
8393
8394         /* do the reset */
8395         val = GRC_MISC_CFG_CORECLK_RESET;
8396
8397         if (tg3_flag(tp, PCI_EXPRESS)) {
8398                 /* Force PCIe 1.0a mode */
8399                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8400                     !tg3_flag(tp, 57765_PLUS) &&
8401                     tr32(TG3_PCIE_PHY_TSTCTL) ==
8402                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8403                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8404
8405                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
8406                         tw32(GRC_MISC_CFG, (1 << 29));
8407                         val |= (1 << 29);
8408                 }
8409         }
8410
8411         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8412                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8413                 tw32(GRC_VCPU_EXT_CTRL,
8414                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8415         }
8416
8417         /* Manage gphy power for all CPMU absent PCIe devices. */
8418         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8419                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8420
8421         tw32(GRC_MISC_CFG, val);
8422
8423         /* restore 5701 hardware bug workaround write method */
8424         tp->write32 = write_op;
8425
8426         /* Unfortunately, we have to delay before the PCI read back.
8427          * Some 575X chips even will not respond to a PCI cfg access
8428          * when the reset command is given to the chip.
8429          *
8430          * How do these hardware designers expect things to work
8431          * properly if the PCI write is posted for a long period
8432          * of time?  It is always necessary to have some method by
8433          * which a register read back can occur to push the write
8434          * out which does the reset.
8435          *
8436          * For most tg3 variants the trick below was working.
8437          * Ho hum...
8438          */
8439         udelay(120);
8440
8441         /* Flush PCI posted writes.  The normal MMIO registers
8442          * are inaccessible at this time so this is the only
8443          * way to make this reliably (actually, this is no longer
8444          * the case, see above).  I tried to use indirect
8445          * register read/write but this upset some 5701 variants.
8446          */
8447         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8448
8449         udelay(120);
8450
8451         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8452                 u16 val16;
8453
8454                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
8455                         int j;
8456                         u32 cfg_val;
8457
8458                         /* Wait for link training to complete.  */
8459                         for (j = 0; j < 5000; j++)
8460                                 udelay(100);
8461
8462                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8463                         pci_write_config_dword(tp->pdev, 0xc4,
8464                                                cfg_val | (1 << 15));
8465                 }
8466
8467                 /* Clear the "no snoop" and "relaxed ordering" bits. */
8468                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8469                 /*
8470                  * Older PCIe devices only support the 128 byte
8471                  * MPS setting.  Enforce the restriction.
8472                  */
8473                 if (!tg3_flag(tp, CPMU_PRESENT))
8474                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8475                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8476
8477                 /* Clear error status */
8478                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8479                                       PCI_EXP_DEVSTA_CED |
8480                                       PCI_EXP_DEVSTA_NFED |
8481                                       PCI_EXP_DEVSTA_FED |
8482                                       PCI_EXP_DEVSTA_URD);
8483         }
8484
8485         tg3_restore_pci_state(tp);
8486
8487         tg3_flag_clear(tp, CHIP_RESETTING);
8488         tg3_flag_clear(tp, ERROR_PROCESSED);
8489
8490         val = 0;
8491         if (tg3_flag(tp, 5780_CLASS))
8492                 val = tr32(MEMARB_MODE);
8493         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8494
8495         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
8496                 tg3_stop_fw(tp);
8497                 tw32(0x5000, 0x400);
8498         }
8499
8500         if (tg3_flag(tp, IS_SSB_CORE)) {
8501                 /*
8502                  * BCM4785: In order to avoid repercussions from using
8503                  * potentially defective internal ROM, stop the Rx RISC CPU,
8504                  * which is not required.
8505                  */
8506                 tg3_stop_fw(tp);
8507                 tg3_halt_cpu(tp, RX_CPU_BASE);
8508         }
8509
8510         tw32(GRC_MODE, tp->grc_mode);
8511
8512         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
8513                 val = tr32(0xc4);
8514
8515                 tw32(0xc4, val | (1 << 15));
8516         }
8517
8518         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8519             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8520                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8521                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
8522                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8523                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8524         }
8525
8526         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8527                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8528                 val = tp->mac_mode;
8529         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8530                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8531                 val = tp->mac_mode;
8532         } else
8533                 val = 0;
8534
8535         tw32_f(MAC_MODE, val);
8536         udelay(40);
8537
8538         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8539
8540         err = tg3_poll_fw(tp);
8541         if (err)
8542                 return err;
8543
8544         tg3_mdio_start(tp);
8545
8546         if (tg3_flag(tp, PCI_EXPRESS) &&
8547             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8548             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8549             !tg3_flag(tp, 57765_PLUS)) {
8550                 val = tr32(0x7c00);
8551
8552                 tw32(0x7c00, val | (1 << 25));
8553         }
8554
8555         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8556                 val = tr32(TG3_CPMU_CLCK_ORIDE);
8557                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8558         }
8559
8560         /* Reprobe ASF enable state.  */
8561         tg3_flag_clear(tp, ENABLE_ASF);
8562         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8563         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8564         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8565                 u32 nic_cfg;
8566
8567                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8568                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8569                         tg3_flag_set(tp, ENABLE_ASF);
8570                         tp->last_event_jiffies = jiffies;
8571                         if (tg3_flag(tp, 5750_PLUS))
8572                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8573                 }
8574         }
8575
8576         return 0;
8577 }
8578
8579 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8580 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8581
8582 /* tp->lock is held. */
8583 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8584 {
8585         int err;
8586
8587         tg3_stop_fw(tp);
8588
8589         tg3_write_sig_pre_reset(tp, kind);
8590
8591         tg3_abort_hw(tp, silent);
8592         err = tg3_chip_reset(tp);
8593
8594         __tg3_set_mac_addr(tp, 0);
8595
8596         tg3_write_sig_legacy(tp, kind);
8597         tg3_write_sig_post_reset(tp, kind);
8598
8599         if (tp->hw_stats) {
8600                 /* Save the stats across chip resets... */
8601                 tg3_get_nstats(tp, &tp->net_stats_prev);
8602                 tg3_get_estats(tp, &tp->estats_prev);
8603
8604                 /* And make sure the next sample is new data */
8605                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8606         }
8607
8608         if (err)
8609                 return err;
8610
8611         return 0;
8612 }
8613
8614 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8615 {
8616         struct tg3 *tp = netdev_priv(dev);
8617         struct sockaddr *addr = p;
8618         int err = 0, skip_mac_1 = 0;
8619
8620         if (!is_valid_ether_addr(addr->sa_data))
8621                 return -EADDRNOTAVAIL;
8622
8623         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8624
8625         if (!netif_running(dev))
8626                 return 0;
8627
8628         if (tg3_flag(tp, ENABLE_ASF)) {
8629                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8630
8631                 addr0_high = tr32(MAC_ADDR_0_HIGH);
8632                 addr0_low = tr32(MAC_ADDR_0_LOW);
8633                 addr1_high = tr32(MAC_ADDR_1_HIGH);
8634                 addr1_low = tr32(MAC_ADDR_1_LOW);
8635
8636                 /* Skip MAC addr 1 if ASF is using it. */
8637                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8638                     !(addr1_high == 0 && addr1_low == 0))
8639                         skip_mac_1 = 1;
8640         }
8641         spin_lock_bh(&tp->lock);
8642         __tg3_set_mac_addr(tp, skip_mac_1);
8643         spin_unlock_bh(&tp->lock);
8644
8645         return err;
8646 }
8647
8648 /* tp->lock is held. */
8649 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8650                            dma_addr_t mapping, u32 maxlen_flags,
8651                            u32 nic_addr)
8652 {
8653         tg3_write_mem(tp,
8654                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8655                       ((u64) mapping >> 32));
8656         tg3_write_mem(tp,
8657                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8658                       ((u64) mapping & 0xffffffff));
8659         tg3_write_mem(tp,
8660                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8661                        maxlen_flags);
8662
8663         if (!tg3_flag(tp, 5705_PLUS))
8664                 tg3_write_mem(tp,
8665                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8666                               nic_addr);
8667 }
8668
8669
8670 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8671 {
8672         int i = 0;
8673
8674         if (!tg3_flag(tp, ENABLE_TSS)) {
8675                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8676                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8677                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8678         } else {
8679                 tw32(HOSTCC_TXCOL_TICKS, 0);
8680                 tw32(HOSTCC_TXMAX_FRAMES, 0);
8681                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8682
8683                 for (; i < tp->txq_cnt; i++) {
8684                         u32 reg;
8685
8686                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8687                         tw32(reg, ec->tx_coalesce_usecs);
8688                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8689                         tw32(reg, ec->tx_max_coalesced_frames);
8690                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8691                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8692                 }
8693         }
8694
8695         for (; i < tp->irq_max - 1; i++) {
8696                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8697                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8698                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8699         }
8700 }
8701
8702 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8703 {
8704         int i = 0;
8705         u32 limit = tp->rxq_cnt;
8706
8707         if (!tg3_flag(tp, ENABLE_RSS)) {
8708                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8709                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8710                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8711                 limit--;
8712         } else {
8713                 tw32(HOSTCC_RXCOL_TICKS, 0);
8714                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8715                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8716         }
8717
8718         for (; i < limit; i++) {
8719                 u32 reg;
8720
8721                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8722                 tw32(reg, ec->rx_coalesce_usecs);
8723                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8724                 tw32(reg, ec->rx_max_coalesced_frames);
8725                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8726                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8727         }
8728
8729         for (; i < tp->irq_max - 1; i++) {
8730                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8731                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8732                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8733         }
8734 }
8735
8736 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8737 {
8738         tg3_coal_tx_init(tp, ec);
8739         tg3_coal_rx_init(tp, ec);
8740
8741         if (!tg3_flag(tp, 5705_PLUS)) {
8742                 u32 val = ec->stats_block_coalesce_usecs;
8743
8744                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8745                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8746
8747                 if (!tp->link_up)
8748                         val = 0;
8749
8750                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8751         }
8752 }
8753
8754 /* tp->lock is held. */
8755 static void tg3_rings_reset(struct tg3 *tp)
8756 {
8757         int i;
8758         u32 stblk, txrcb, rxrcb, limit;
8759         struct tg3_napi *tnapi = &tp->napi[0];
8760
8761         /* Disable all transmit rings but the first. */
8762         if (!tg3_flag(tp, 5705_PLUS))
8763                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8764         else if (tg3_flag(tp, 5717_PLUS))
8765                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8766         else if (tg3_flag(tp, 57765_CLASS) ||
8767                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
8768                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8769         else
8770                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8771
8772         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8773              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8774                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8775                               BDINFO_FLAGS_DISABLED);
8776
8777
8778         /* Disable all receive return rings but the first. */
8779         if (tg3_flag(tp, 5717_PLUS))
8780                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8781         else if (!tg3_flag(tp, 5705_PLUS))
8782                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8783         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8784                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762 ||
8785                  tg3_flag(tp, 57765_CLASS))
8786                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8787         else
8788                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8789
8790         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8791              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8792                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8793                               BDINFO_FLAGS_DISABLED);
8794
8795         /* Disable interrupts */
8796         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8797         tp->napi[0].chk_msi_cnt = 0;
8798         tp->napi[0].last_rx_cons = 0;
8799         tp->napi[0].last_tx_cons = 0;
8800
8801         /* Zero mailbox registers. */
8802         if (tg3_flag(tp, SUPPORT_MSIX)) {
8803                 for (i = 1; i < tp->irq_max; i++) {
8804                         tp->napi[i].tx_prod = 0;
8805                         tp->napi[i].tx_cons = 0;
8806                         if (tg3_flag(tp, ENABLE_TSS))
8807                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8808                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8809                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8810                         tp->napi[i].chk_msi_cnt = 0;
8811                         tp->napi[i].last_rx_cons = 0;
8812                         tp->napi[i].last_tx_cons = 0;
8813                 }
8814                 if (!tg3_flag(tp, ENABLE_TSS))
8815                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8816         } else {
8817                 tp->napi[0].tx_prod = 0;
8818                 tp->napi[0].tx_cons = 0;
8819                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8820                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8821         }
8822
8823         /* Make sure the NIC-based send BD rings are disabled. */
8824         if (!tg3_flag(tp, 5705_PLUS)) {
8825                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8826                 for (i = 0; i < 16; i++)
8827                         tw32_tx_mbox(mbox + i * 8, 0);
8828         }
8829
8830         txrcb = NIC_SRAM_SEND_RCB;
8831         rxrcb = NIC_SRAM_RCV_RET_RCB;
8832
8833         /* Clear status block in ram. */
8834         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8835
8836         /* Set status block DMA address */
8837         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8838              ((u64) tnapi->status_mapping >> 32));
8839         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8840              ((u64) tnapi->status_mapping & 0xffffffff));
8841
8842         if (tnapi->tx_ring) {
8843                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8844                                (TG3_TX_RING_SIZE <<
8845                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8846                                NIC_SRAM_TX_BUFFER_DESC);
8847                 txrcb += TG3_BDINFO_SIZE;
8848         }
8849
8850         if (tnapi->rx_rcb) {
8851                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8852                                (tp->rx_ret_ring_mask + 1) <<
8853                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8854                 rxrcb += TG3_BDINFO_SIZE;
8855         }
8856
8857         stblk = HOSTCC_STATBLCK_RING1;
8858
8859         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8860                 u64 mapping = (u64)tnapi->status_mapping;
8861                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8862                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8863
8864                 /* Clear status block in ram. */
8865                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8866
8867                 if (tnapi->tx_ring) {
8868                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8869                                        (TG3_TX_RING_SIZE <<
8870                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8871                                        NIC_SRAM_TX_BUFFER_DESC);
8872                         txrcb += TG3_BDINFO_SIZE;
8873                 }
8874
8875                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8876                                ((tp->rx_ret_ring_mask + 1) <<
8877                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8878
8879                 stblk += 8;
8880                 rxrcb += TG3_BDINFO_SIZE;
8881         }
8882 }
8883
8884 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8885 {
8886         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8887
8888         if (!tg3_flag(tp, 5750_PLUS) ||
8889             tg3_flag(tp, 5780_CLASS) ||
8890             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8891             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8892             tg3_flag(tp, 57765_PLUS))
8893                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8894         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8895                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8896                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8897         else
8898                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8899
8900         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8901         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8902
8903         val = min(nic_rep_thresh, host_rep_thresh);
8904         tw32(RCVBDI_STD_THRESH, val);
8905
8906         if (tg3_flag(tp, 57765_PLUS))
8907                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8908
8909         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8910                 return;
8911
8912         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8913
8914         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8915
8916         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8917         tw32(RCVBDI_JUMBO_THRESH, val);
8918
8919         if (tg3_flag(tp, 57765_PLUS))
8920                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8921 }
8922
8923 static inline u32 calc_crc(unsigned char *buf, int len)
8924 {
8925         u32 reg;
8926         u32 tmp;
8927         int j, k;
8928
8929         reg = 0xffffffff;
8930
8931         for (j = 0; j < len; j++) {
8932                 reg ^= buf[j];
8933
8934                 for (k = 0; k < 8; k++) {
8935                         tmp = reg & 0x01;
8936
8937                         reg >>= 1;
8938
8939                         if (tmp)
8940                                 reg ^= 0xedb88320;
8941                 }
8942         }
8943
8944         return ~reg;
8945 }
8946
8947 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8948 {
8949         /* accept or reject all multicast frames */
8950         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8951         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8952         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8953         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8954 }
8955
8956 static void __tg3_set_rx_mode(struct net_device *dev)
8957 {
8958         struct tg3 *tp = netdev_priv(dev);
8959         u32 rx_mode;
8960
8961         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8962                                   RX_MODE_KEEP_VLAN_TAG);
8963
8964 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8965         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8966          * flag clear.
8967          */
8968         if (!tg3_flag(tp, ENABLE_ASF))
8969                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8970 #endif
8971
8972         if (dev->flags & IFF_PROMISC) {
8973                 /* Promiscuous mode. */
8974                 rx_mode |= RX_MODE_PROMISC;
8975         } else if (dev->flags & IFF_ALLMULTI) {
8976                 /* Accept all multicast. */
8977                 tg3_set_multi(tp, 1);
8978         } else if (netdev_mc_empty(dev)) {
8979                 /* Reject all multicast. */
8980                 tg3_set_multi(tp, 0);
8981         } else {
8982                 /* Accept one or more multicast(s). */
8983                 struct netdev_hw_addr *ha;
8984                 u32 mc_filter[4] = { 0, };
8985                 u32 regidx;
8986                 u32 bit;
8987                 u32 crc;
8988
8989                 netdev_for_each_mc_addr(ha, dev) {
8990                         crc = calc_crc(ha->addr, ETH_ALEN);
8991                         bit = ~crc & 0x7f;
8992                         regidx = (bit & 0x60) >> 5;
8993                         bit &= 0x1f;
8994                         mc_filter[regidx] |= (1 << bit);
8995                 }
8996
8997                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8998                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8999                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9000                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9001         }
9002
9003         if (rx_mode != tp->rx_mode) {
9004                 tp->rx_mode = rx_mode;
9005                 tw32_f(MAC_RX_MODE, rx_mode);
9006                 udelay(10);
9007         }
9008 }
9009
9010 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9011 {
9012         int i;
9013
9014         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9015                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9016 }
9017
9018 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9019 {
9020         int i;
9021
9022         if (!tg3_flag(tp, SUPPORT_MSIX))
9023                 return;
9024
9025         if (tp->rxq_cnt == 1) {
9026                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9027                 return;
9028         }
9029
9030         /* Validate table against current IRQ count */
9031         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9032                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9033                         break;
9034         }
9035
9036         if (i != TG3_RSS_INDIR_TBL_SIZE)
9037                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9038 }
9039
9040 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9041 {
9042         int i = 0;
9043         u32 reg = MAC_RSS_INDIR_TBL_0;
9044
9045         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9046                 u32 val = tp->rss_ind_tbl[i];
9047                 i++;
9048                 for (; i % 8; i++) {
9049                         val <<= 4;
9050                         val |= tp->rss_ind_tbl[i];
9051                 }
9052                 tw32(reg, val);
9053                 reg += 4;
9054         }
9055 }
9056
9057 /* tp->lock is held. */
9058 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
9059 {
9060         u32 val, rdmac_mode;
9061         int i, err, limit;
9062         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9063
9064         tg3_disable_ints(tp);
9065
9066         tg3_stop_fw(tp);
9067
9068         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9069
9070         if (tg3_flag(tp, INIT_COMPLETE))
9071                 tg3_abort_hw(tp, 1);
9072
9073         /* Enable MAC control of LPI */
9074         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
9075                 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
9076                       TG3_CPMU_EEE_LNKIDL_UART_IDL;
9077                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
9078                         val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
9079
9080                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
9081
9082                 tw32_f(TG3_CPMU_EEE_CTRL,
9083                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9084
9085                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9086                       TG3_CPMU_EEEMD_LPI_IN_TX |
9087                       TG3_CPMU_EEEMD_LPI_IN_RX |
9088                       TG3_CPMU_EEEMD_EEE_ENABLE;
9089
9090                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
9091                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9092
9093                 if (tg3_flag(tp, ENABLE_APE))
9094                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9095
9096                 tw32_f(TG3_CPMU_EEE_MODE, val);
9097
9098                 tw32_f(TG3_CPMU_EEE_DBTMR1,
9099                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9100                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9101
9102                 tw32_f(TG3_CPMU_EEE_DBTMR2,
9103                        TG3_CPMU_DBTMR2_APE_TX_2047US |
9104                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9105         }
9106
9107         if (reset_phy)
9108                 tg3_phy_reset(tp);
9109
9110         err = tg3_chip_reset(tp);
9111         if (err)
9112                 return err;
9113
9114         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9115
9116         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
9117                 val = tr32(TG3_CPMU_CTRL);
9118                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9119                 tw32(TG3_CPMU_CTRL, val);
9120
9121                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9122                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9123                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9124                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9125
9126                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9127                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9128                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9129                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9130
9131                 val = tr32(TG3_CPMU_HST_ACC);
9132                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9133                 val |= CPMU_HST_ACC_MACCLK_6_25;
9134                 tw32(TG3_CPMU_HST_ACC, val);
9135         }
9136
9137         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
9138                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9139                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9140                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9141                 tw32(PCIE_PWR_MGMT_THRESH, val);
9142
9143                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9144                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9145
9146                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9147
9148                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9149                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9150         }
9151
9152         if (tg3_flag(tp, L1PLLPD_EN)) {
9153                 u32 grc_mode = tr32(GRC_MODE);
9154
9155                 /* Access the lower 1K of PL PCIE block registers. */
9156                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9157                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9158
9159                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9160                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9161                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9162
9163                 tw32(GRC_MODE, grc_mode);
9164         }
9165
9166         if (tg3_flag(tp, 57765_CLASS)) {
9167                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
9168                         u32 grc_mode = tr32(GRC_MODE);
9169
9170                         /* Access the lower 1K of PL PCIE block registers. */
9171                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9172                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9173
9174                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9175                                    TG3_PCIE_PL_LO_PHYCTL5);
9176                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9177                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9178
9179                         tw32(GRC_MODE, grc_mode);
9180                 }
9181
9182                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
9183                         u32 grc_mode;
9184
9185                         /* Fix transmit hangs */
9186                         val = tr32(TG3_CPMU_PADRNG_CTL);
9187                         val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9188                         tw32(TG3_CPMU_PADRNG_CTL, val);
9189
9190                         grc_mode = tr32(GRC_MODE);
9191
9192                         /* Access the lower 1K of DL PCIE block registers. */
9193                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9194                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9195
9196                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9197                                    TG3_PCIE_DL_LO_FTSMAX);
9198                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9199                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9200                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9201
9202                         tw32(GRC_MODE, grc_mode);
9203                 }
9204
9205                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9206                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9207                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9208                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9209         }
9210
9211         /* This works around an issue with Athlon chipsets on
9212          * B3 tigon3 silicon.  This bit has no effect on any
9213          * other revision.  But do not set this on PCI Express
9214          * chips and don't even touch the clocks if the CPMU is present.
9215          */
9216         if (!tg3_flag(tp, CPMU_PRESENT)) {
9217                 if (!tg3_flag(tp, PCI_EXPRESS))
9218                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9219                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9220         }
9221
9222         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
9223             tg3_flag(tp, PCIX_MODE)) {
9224                 val = tr32(TG3PCI_PCISTATE);
9225                 val |= PCISTATE_RETRY_SAME_DMA;
9226                 tw32(TG3PCI_PCISTATE, val);
9227         }
9228
9229         if (tg3_flag(tp, ENABLE_APE)) {
9230                 /* Allow reads and writes to the
9231                  * APE register and memory space.
9232                  */
9233                 val = tr32(TG3PCI_PCISTATE);
9234                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9235                        PCISTATE_ALLOW_APE_SHMEM_WR |
9236                        PCISTATE_ALLOW_APE_PSPACE_WR;
9237                 tw32(TG3PCI_PCISTATE, val);
9238         }
9239
9240         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
9241                 /* Enable some hw fixes.  */
9242                 val = tr32(TG3PCI_MSI_DATA);
9243                 val |= (1 << 26) | (1 << 28) | (1 << 29);
9244                 tw32(TG3PCI_MSI_DATA, val);
9245         }
9246
9247         /* Descriptor ring init may make accesses to the
9248          * NIC SRAM area to setup the TX descriptors, so we
9249          * can only do this after the hardware has been
9250          * successfully reset.
9251          */
9252         err = tg3_init_rings(tp);
9253         if (err)
9254                 return err;
9255
9256         if (tg3_flag(tp, 57765_PLUS)) {
9257                 val = tr32(TG3PCI_DMA_RW_CTRL) &
9258                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9259                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
9260                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9261                 if (!tg3_flag(tp, 57765_CLASS) &&
9262                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9263                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5762)
9264                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
9265                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9266         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
9267                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
9268                 /* This value is determined during the probe time DMA
9269                  * engine test, tg3_test_dma.
9270                  */
9271                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9272         }
9273
9274         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9275                           GRC_MODE_4X_NIC_SEND_RINGS |
9276                           GRC_MODE_NO_TX_PHDR_CSUM |
9277                           GRC_MODE_NO_RX_PHDR_CSUM);
9278         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9279
9280         /* Pseudo-header checksum is done by hardware logic and not
9281          * the offload processers, so make the chip do the pseudo-
9282          * header checksums on receive.  For transmit it is more
9283          * convenient to do the pseudo-header checksum in software
9284          * as Linux does that on transmit for us in all cases.
9285          */
9286         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9287
9288         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9289         if (tp->rxptpctl)
9290                 tw32(TG3_RX_PTP_CTL,
9291                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9292
9293         if (tg3_flag(tp, PTP_CAPABLE))
9294                 val |= GRC_MODE_TIME_SYNC_ENABLE;
9295
9296         tw32(GRC_MODE, tp->grc_mode | val);
9297
9298         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
9299         val = tr32(GRC_MISC_CFG);
9300         val &= ~0xff;
9301         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9302         tw32(GRC_MISC_CFG, val);
9303
9304         /* Initialize MBUF/DESC pool. */
9305         if (tg3_flag(tp, 5750_PLUS)) {
9306                 /* Do nothing.  */
9307         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
9308                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9309                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
9310                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9311                 else
9312                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9313                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9314                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9315         } else if (tg3_flag(tp, TSO_CAPABLE)) {
9316                 int fw_len;
9317
9318                 fw_len = tp->fw_len;
9319                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9320                 tw32(BUFMGR_MB_POOL_ADDR,
9321                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9322                 tw32(BUFMGR_MB_POOL_SIZE,
9323                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9324         }
9325
9326         if (tp->dev->mtu <= ETH_DATA_LEN) {
9327                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9328                      tp->bufmgr_config.mbuf_read_dma_low_water);
9329                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9330                      tp->bufmgr_config.mbuf_mac_rx_low_water);
9331                 tw32(BUFMGR_MB_HIGH_WATER,
9332                      tp->bufmgr_config.mbuf_high_water);
9333         } else {
9334                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9335                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9336                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9337                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9338                 tw32(BUFMGR_MB_HIGH_WATER,
9339                      tp->bufmgr_config.mbuf_high_water_jumbo);
9340         }
9341         tw32(BUFMGR_DMA_LOW_WATER,
9342              tp->bufmgr_config.dma_low_water);
9343         tw32(BUFMGR_DMA_HIGH_WATER,
9344              tp->bufmgr_config.dma_high_water);
9345
9346         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9347         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
9348                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9349         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9350             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9351             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
9352                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9353         tw32(BUFMGR_MODE, val);
9354         for (i = 0; i < 2000; i++) {
9355                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9356                         break;
9357                 udelay(10);
9358         }
9359         if (i >= 2000) {
9360                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9361                 return -ENODEV;
9362         }
9363
9364         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
9365                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9366
9367         tg3_setup_rxbd_thresholds(tp);
9368
9369         /* Initialize TG3_BDINFO's at:
9370          *  RCVDBDI_STD_BD:     standard eth size rx ring
9371          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
9372          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
9373          *
9374          * like so:
9375          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
9376          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
9377          *                              ring attribute flags
9378          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
9379          *
9380          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9381          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9382          *
9383          * The size of each ring is fixed in the firmware, but the location is
9384          * configurable.
9385          */
9386         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9387              ((u64) tpr->rx_std_mapping >> 32));
9388         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9389              ((u64) tpr->rx_std_mapping & 0xffffffff));
9390         if (!tg3_flag(tp, 5717_PLUS))
9391                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9392                      NIC_SRAM_RX_BUFFER_DESC);
9393
9394         /* Disable the mini ring */
9395         if (!tg3_flag(tp, 5705_PLUS))
9396                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9397                      BDINFO_FLAGS_DISABLED);
9398
9399         /* Program the jumbo buffer descriptor ring control
9400          * blocks on those devices that have them.
9401          */
9402         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9403             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9404
9405                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9406                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9407                              ((u64) tpr->rx_jmb_mapping >> 32));
9408                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9409                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9410                         val = TG3_RX_JMB_RING_SIZE(tp) <<
9411                               BDINFO_FLAGS_MAXLEN_SHIFT;
9412                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9413                              val | BDINFO_FLAGS_USE_EXT_RECV);
9414                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9415                             tg3_flag(tp, 57765_CLASS) ||
9416                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9417                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9418                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9419                 } else {
9420                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9421                              BDINFO_FLAGS_DISABLED);
9422                 }
9423
9424                 if (tg3_flag(tp, 57765_PLUS)) {
9425                         val = TG3_RX_STD_RING_SIZE(tp);
9426                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9427                         val |= (TG3_RX_STD_DMA_SZ << 2);
9428                 } else
9429                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9430         } else
9431                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9432
9433         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9434
9435         tpr->rx_std_prod_idx = tp->rx_pending;
9436         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9437
9438         tpr->rx_jmb_prod_idx =
9439                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9440         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9441
9442         tg3_rings_reset(tp);
9443
9444         /* Initialize MAC address and backoff seed. */
9445         __tg3_set_mac_addr(tp, 0);
9446
9447         /* MTU + ethernet header + FCS + optional VLAN tag */
9448         tw32(MAC_RX_MTU_SIZE,
9449              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9450
9451         /* The slot time is changed by tg3_setup_phy if we
9452          * run at gigabit with half duplex.
9453          */
9454         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9455               (6 << TX_LENGTHS_IPG_SHIFT) |
9456               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9457
9458         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
9459             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9460                 val |= tr32(MAC_TX_LENGTHS) &
9461                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
9462                         TX_LENGTHS_CNT_DWN_VAL_MSK);
9463
9464         tw32(MAC_TX_LENGTHS, val);
9465
9466         /* Receive rules. */
9467         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9468         tw32(RCVLPC_CONFIG, 0x0181);
9469
9470         /* Calculate RDMAC_MODE setting early, we need it to determine
9471          * the RCVLPC_STATE_ENABLE mask.
9472          */
9473         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9474                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9475                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9476                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9477                       RDMAC_MODE_LNGREAD_ENAB);
9478
9479         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
9480                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9481
9482         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9483             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9484             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9485                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9486                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9487                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9488
9489         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9490             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9491                 if (tg3_flag(tp, TSO_CAPABLE) &&
9492                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
9493                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9494                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9495                            !tg3_flag(tp, IS_5788)) {
9496                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9497                 }
9498         }
9499
9500         if (tg3_flag(tp, PCI_EXPRESS))
9501                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9502
9503         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
9504                 tp->dma_limit = 0;
9505                 if (tp->dev->mtu <= ETH_DATA_LEN) {
9506                         rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
9507                         tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
9508                 }
9509         }
9510
9511         if (tg3_flag(tp, HW_TSO_1) ||
9512             tg3_flag(tp, HW_TSO_2) ||
9513             tg3_flag(tp, HW_TSO_3))
9514                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9515
9516         if (tg3_flag(tp, 57765_PLUS) ||
9517             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9518             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9519                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9520
9521         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
9522             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9523                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9524
9525         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9526             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9527             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9528             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
9529             tg3_flag(tp, 57765_PLUS)) {
9530                 u32 tgtreg;
9531
9532                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9533                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
9534                 else
9535                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
9536
9537                 val = tr32(tgtreg);
9538                 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9539                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
9540                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9541                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9542                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9543                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9544                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9545                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9546                 }
9547                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9548         }
9549
9550         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9551             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
9552             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
9553                 u32 tgtreg;
9554
9555                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9556                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
9557                 else
9558                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
9559
9560                 val = tr32(tgtreg);
9561                 tw32(tgtreg, val |
9562                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9563                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9564         }
9565
9566         /* Receive/send statistics. */
9567         if (tg3_flag(tp, 5750_PLUS)) {
9568                 val = tr32(RCVLPC_STATS_ENABLE);
9569                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9570                 tw32(RCVLPC_STATS_ENABLE, val);
9571         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9572                    tg3_flag(tp, TSO_CAPABLE)) {
9573                 val = tr32(RCVLPC_STATS_ENABLE);
9574                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9575                 tw32(RCVLPC_STATS_ENABLE, val);
9576         } else {
9577                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9578         }
9579         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9580         tw32(SNDDATAI_STATSENAB, 0xffffff);
9581         tw32(SNDDATAI_STATSCTRL,
9582              (SNDDATAI_SCTRL_ENABLE |
9583               SNDDATAI_SCTRL_FASTUPD));
9584
9585         /* Setup host coalescing engine. */
9586         tw32(HOSTCC_MODE, 0);
9587         for (i = 0; i < 2000; i++) {
9588                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9589                         break;
9590                 udelay(10);
9591         }
9592
9593         __tg3_set_coalesce(tp, &tp->coal);
9594
9595         if (!tg3_flag(tp, 5705_PLUS)) {
9596                 /* Status/statistics block address.  See tg3_timer,
9597                  * the tg3_periodic_fetch_stats call there, and
9598                  * tg3_get_stats to see how this works for 5705/5750 chips.
9599                  */
9600                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9601                      ((u64) tp->stats_mapping >> 32));
9602                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9603                      ((u64) tp->stats_mapping & 0xffffffff));
9604                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9605
9606                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9607
9608                 /* Clear statistics and status block memory areas */
9609                 for (i = NIC_SRAM_STATS_BLK;
9610                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9611                      i += sizeof(u32)) {
9612                         tg3_write_mem(tp, i, 0);
9613                         udelay(40);
9614                 }
9615         }
9616
9617         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9618
9619         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9620         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9621         if (!tg3_flag(tp, 5705_PLUS))
9622                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9623
9624         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9625                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9626                 /* reset to prevent losing 1st rx packet intermittently */
9627                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9628                 udelay(10);
9629         }
9630
9631         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9632                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9633                         MAC_MODE_FHDE_ENABLE;
9634         if (tg3_flag(tp, ENABLE_APE))
9635                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9636         if (!tg3_flag(tp, 5705_PLUS) &&
9637             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9638             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9639                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9640         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9641         udelay(40);
9642
9643         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9644          * If TG3_FLAG_IS_NIC is zero, we should read the
9645          * register to preserve the GPIO settings for LOMs. The GPIOs,
9646          * whether used as inputs or outputs, are set by boot code after
9647          * reset.
9648          */
9649         if (!tg3_flag(tp, IS_NIC)) {
9650                 u32 gpio_mask;
9651
9652                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9653                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9654                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9655
9656                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9657                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9658                                      GRC_LCLCTRL_GPIO_OUTPUT3;
9659
9660                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9661                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9662
9663                 tp->grc_local_ctrl &= ~gpio_mask;
9664                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9665
9666                 /* GPIO1 must be driven high for eeprom write protect */
9667                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9668                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9669                                                GRC_LCLCTRL_GPIO_OUTPUT1);
9670         }
9671         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9672         udelay(100);
9673
9674         if (tg3_flag(tp, USING_MSIX)) {
9675                 val = tr32(MSGINT_MODE);
9676                 val |= MSGINT_MODE_ENABLE;
9677                 if (tp->irq_cnt > 1)
9678                         val |= MSGINT_MODE_MULTIVEC_EN;
9679                 if (!tg3_flag(tp, 1SHOT_MSI))
9680                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9681                 tw32(MSGINT_MODE, val);
9682         }
9683
9684         if (!tg3_flag(tp, 5705_PLUS)) {
9685                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9686                 udelay(40);
9687         }
9688
9689         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9690                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9691                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9692                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9693                WDMAC_MODE_LNGREAD_ENAB);
9694
9695         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9696             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9697                 if (tg3_flag(tp, TSO_CAPABLE) &&
9698                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9699                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9700                         /* nothing */
9701                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9702                            !tg3_flag(tp, IS_5788)) {
9703                         val |= WDMAC_MODE_RX_ACCEL;
9704                 }
9705         }
9706
9707         /* Enable host coalescing bug fix */
9708         if (tg3_flag(tp, 5755_PLUS))
9709                 val |= WDMAC_MODE_STATUS_TAG_FIX;
9710
9711         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9712                 val |= WDMAC_MODE_BURST_ALL_DATA;
9713
9714         tw32_f(WDMAC_MODE, val);
9715         udelay(40);
9716
9717         if (tg3_flag(tp, PCIX_MODE)) {
9718                 u16 pcix_cmd;
9719
9720                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9721                                      &pcix_cmd);
9722                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9723                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9724                         pcix_cmd |= PCI_X_CMD_READ_2K;
9725                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9726                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9727                         pcix_cmd |= PCI_X_CMD_READ_2K;
9728                 }
9729                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9730                                       pcix_cmd);
9731         }
9732
9733         tw32_f(RDMAC_MODE, rdmac_mode);
9734         udelay(40);
9735
9736         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
9737                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9738                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9739                                 break;
9740                 }
9741                 if (i < TG3_NUM_RDMA_CHANNELS) {
9742                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9743                         val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9744                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9745                         tg3_flag_set(tp, 5719_RDMA_BUG);
9746                 }
9747         }
9748
9749         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9750         if (!tg3_flag(tp, 5705_PLUS))
9751                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9752
9753         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9754                 tw32(SNDDATAC_MODE,
9755                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9756         else
9757                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9758
9759         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9760         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9761         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9762         if (tg3_flag(tp, LRG_PROD_RING_CAP))
9763                 val |= RCVDBDI_MODE_LRG_RING_SZ;
9764         tw32(RCVDBDI_MODE, val);
9765         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9766         if (tg3_flag(tp, HW_TSO_1) ||
9767             tg3_flag(tp, HW_TSO_2) ||
9768             tg3_flag(tp, HW_TSO_3))
9769                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9770         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9771         if (tg3_flag(tp, ENABLE_TSS))
9772                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9773         tw32(SNDBDI_MODE, val);
9774         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9775
9776         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9777                 err = tg3_load_5701_a0_firmware_fix(tp);
9778                 if (err)
9779                         return err;
9780         }
9781
9782         if (tg3_flag(tp, TSO_CAPABLE)) {
9783                 err = tg3_load_tso_firmware(tp);
9784                 if (err)
9785                         return err;
9786         }
9787
9788         tp->tx_mode = TX_MODE_ENABLE;
9789
9790         if (tg3_flag(tp, 5755_PLUS) ||
9791             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9792                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9793
9794         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
9795             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
9796                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9797                 tp->tx_mode &= ~val;
9798                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9799         }
9800
9801         tw32_f(MAC_TX_MODE, tp->tx_mode);
9802         udelay(100);
9803
9804         if (tg3_flag(tp, ENABLE_RSS)) {
9805                 tg3_rss_write_indir_tbl(tp);
9806
9807                 /* Setup the "secret" hash key. */
9808                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9809                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9810                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9811                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9812                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9813                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9814                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9815                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9816                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9817                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9818         }
9819
9820         tp->rx_mode = RX_MODE_ENABLE;
9821         if (tg3_flag(tp, 5755_PLUS))
9822                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9823
9824         if (tg3_flag(tp, ENABLE_RSS))
9825                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9826                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
9827                                RX_MODE_RSS_IPV6_HASH_EN |
9828                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
9829                                RX_MODE_RSS_IPV4_HASH_EN |
9830                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
9831
9832         tw32_f(MAC_RX_MODE, tp->rx_mode);
9833         udelay(10);
9834
9835         tw32(MAC_LED_CTRL, tp->led_ctrl);
9836
9837         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9838         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9839                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9840                 udelay(10);
9841         }
9842         tw32_f(MAC_RX_MODE, tp->rx_mode);
9843         udelay(10);
9844
9845         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9846                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9847                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9848                         /* Set drive transmission level to 1.2V  */
9849                         /* only if the signal pre-emphasis bit is not set  */
9850                         val = tr32(MAC_SERDES_CFG);
9851                         val &= 0xfffff000;
9852                         val |= 0x880;
9853                         tw32(MAC_SERDES_CFG, val);
9854                 }
9855                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9856                         tw32(MAC_SERDES_CFG, 0x616000);
9857         }
9858
9859         /* Prevent chip from dropping frames when flow control
9860          * is enabled.
9861          */
9862         if (tg3_flag(tp, 57765_CLASS))
9863                 val = 1;
9864         else
9865                 val = 2;
9866         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9867
9868         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9869             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9870                 /* Use hardware link auto-negotiation */
9871                 tg3_flag_set(tp, HW_AUTONEG);
9872         }
9873
9874         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9875             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9876                 u32 tmp;
9877
9878                 tmp = tr32(SERDES_RX_CTRL);
9879                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9880                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9881                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9882                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9883         }
9884
9885         if (!tg3_flag(tp, USE_PHYLIB)) {
9886                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9887                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9888
9889                 err = tg3_setup_phy(tp, 0);
9890                 if (err)
9891                         return err;
9892
9893                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9894                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9895                         u32 tmp;
9896
9897                         /* Clear CRC stats. */
9898                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9899                                 tg3_writephy(tp, MII_TG3_TEST1,
9900                                              tmp | MII_TG3_TEST1_CRC_EN);
9901                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9902                         }
9903                 }
9904         }
9905
9906         __tg3_set_rx_mode(tp->dev);
9907
9908         /* Initialize receive rules. */
9909         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9910         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9911         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9912         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9913
9914         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9915                 limit = 8;
9916         else
9917                 limit = 16;
9918         if (tg3_flag(tp, ENABLE_ASF))
9919                 limit -= 4;
9920         switch (limit) {
9921         case 16:
9922                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9923         case 15:
9924                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9925         case 14:
9926                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9927         case 13:
9928                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9929         case 12:
9930                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9931         case 11:
9932                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9933         case 10:
9934                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9935         case 9:
9936                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9937         case 8:
9938                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9939         case 7:
9940                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9941         case 6:
9942                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9943         case 5:
9944                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9945         case 4:
9946                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9947         case 3:
9948                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9949         case 2:
9950         case 1:
9951
9952         default:
9953                 break;
9954         }
9955
9956         if (tg3_flag(tp, ENABLE_APE))
9957                 /* Write our heartbeat update interval to APE. */
9958                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9959                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9960
9961         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9962
9963         return 0;
9964 }
9965
9966 /* Called at device open time to get the chip ready for
9967  * packet processing.  Invoked with tp->lock held.
9968  */
9969 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9970 {
9971         tg3_switch_clocks(tp);
9972
9973         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9974
9975         return tg3_reset_hw(tp, reset_phy);
9976 }
9977
9978 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
9979 {
9980         int i;
9981
9982         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
9983                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
9984
9985                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
9986                 off += len;
9987
9988                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
9989                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
9990                         memset(ocir, 0, TG3_OCIR_LEN);
9991         }
9992 }
9993
9994 /* sysfs attributes for hwmon */
9995 static ssize_t tg3_show_temp(struct device *dev,
9996                              struct device_attribute *devattr, char *buf)
9997 {
9998         struct pci_dev *pdev = to_pci_dev(dev);
9999         struct net_device *netdev = pci_get_drvdata(pdev);
10000         struct tg3 *tp = netdev_priv(netdev);
10001         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10002         u32 temperature;
10003
10004         spin_lock_bh(&tp->lock);
10005         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10006                                 sizeof(temperature));
10007         spin_unlock_bh(&tp->lock);
10008         return sprintf(buf, "%u\n", temperature);
10009 }
10010
10011
10012 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10013                           TG3_TEMP_SENSOR_OFFSET);
10014 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10015                           TG3_TEMP_CAUTION_OFFSET);
10016 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10017                           TG3_TEMP_MAX_OFFSET);
10018
10019 static struct attribute *tg3_attributes[] = {
10020         &sensor_dev_attr_temp1_input.dev_attr.attr,
10021         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10022         &sensor_dev_attr_temp1_max.dev_attr.attr,
10023         NULL
10024 };
10025
10026 static const struct attribute_group tg3_group = {
10027         .attrs = tg3_attributes,
10028 };
10029
10030 static void tg3_hwmon_close(struct tg3 *tp)
10031 {
10032         if (tp->hwmon_dev) {
10033                 hwmon_device_unregister(tp->hwmon_dev);
10034                 tp->hwmon_dev = NULL;
10035                 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10036         }
10037 }
10038
10039 static void tg3_hwmon_open(struct tg3 *tp)
10040 {
10041         int i, err;
10042         u32 size = 0;
10043         struct pci_dev *pdev = tp->pdev;
10044         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10045
10046         tg3_sd_scan_scratchpad(tp, ocirs);
10047
10048         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10049                 if (!ocirs[i].src_data_length)
10050                         continue;
10051
10052                 size += ocirs[i].src_hdr_length;
10053                 size += ocirs[i].src_data_length;
10054         }
10055
10056         if (!size)
10057                 return;
10058
10059         /* Register hwmon sysfs hooks */
10060         err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10061         if (err) {
10062                 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10063                 return;
10064         }
10065
10066         tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10067         if (IS_ERR(tp->hwmon_dev)) {
10068                 tp->hwmon_dev = NULL;
10069                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10070                 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10071         }
10072 }
10073
10074
10075 #define TG3_STAT_ADD32(PSTAT, REG) \
10076 do {    u32 __val = tr32(REG); \
10077         (PSTAT)->low += __val; \
10078         if ((PSTAT)->low < __val) \
10079                 (PSTAT)->high += 1; \
10080 } while (0)
10081
10082 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10083 {
10084         struct tg3_hw_stats *sp = tp->hw_stats;
10085
10086         if (!tp->link_up)
10087                 return;
10088
10089         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10090         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10091         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10092         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10093         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10094         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10095         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10096         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10097         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10098         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10099         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10100         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10101         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10102         if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
10103                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10104                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10105                 u32 val;
10106
10107                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10108                 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
10109                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10110                 tg3_flag_clear(tp, 5719_RDMA_BUG);
10111         }
10112
10113         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10114         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10115         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10116         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10117         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10118         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10119         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10120         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10121         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10122         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10123         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10124         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10125         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10126         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10127
10128         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10129         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
10130             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
10131             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
10132                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10133         } else {
10134                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10135                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10136                 if (val) {
10137                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10138                         sp->rx_discards.low += val;
10139                         if (sp->rx_discards.low < val)
10140                                 sp->rx_discards.high += 1;
10141                 }
10142                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10143         }
10144         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10145 }
10146
10147 static void tg3_chk_missed_msi(struct tg3 *tp)
10148 {
10149         u32 i;
10150
10151         for (i = 0; i < tp->irq_cnt; i++) {
10152                 struct tg3_napi *tnapi = &tp->napi[i];
10153
10154                 if (tg3_has_work(tnapi)) {
10155                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10156                             tnapi->last_tx_cons == tnapi->tx_cons) {
10157                                 if (tnapi->chk_msi_cnt < 1) {
10158                                         tnapi->chk_msi_cnt++;
10159                                         return;
10160                                 }
10161                                 tg3_msi(0, tnapi);
10162                         }
10163                 }
10164                 tnapi->chk_msi_cnt = 0;
10165                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10166                 tnapi->last_tx_cons = tnapi->tx_cons;
10167         }
10168 }
10169
10170 static void tg3_timer(unsigned long __opaque)
10171 {
10172         struct tg3 *tp = (struct tg3 *) __opaque;
10173
10174         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10175                 goto restart_timer;
10176
10177         spin_lock(&tp->lock);
10178
10179         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
10180             tg3_flag(tp, 57765_CLASS))
10181                 tg3_chk_missed_msi(tp);
10182
10183         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10184                 /* BCM4785: Flush posted writes from GbE to host memory. */
10185                 tr32(HOSTCC_MODE);
10186         }
10187
10188         if (!tg3_flag(tp, TAGGED_STATUS)) {
10189                 /* All of this garbage is because when using non-tagged
10190                  * IRQ status the mailbox/status_block protocol the chip
10191                  * uses with the cpu is race prone.
10192                  */
10193                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10194                         tw32(GRC_LOCAL_CTRL,
10195                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10196                 } else {
10197                         tw32(HOSTCC_MODE, tp->coalesce_mode |
10198                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10199                 }
10200
10201                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10202                         spin_unlock(&tp->lock);
10203                         tg3_reset_task_schedule(tp);
10204                         goto restart_timer;
10205                 }
10206         }
10207
10208         /* This part only runs once per second. */
10209         if (!--tp->timer_counter) {
10210                 if (tg3_flag(tp, 5705_PLUS))
10211                         tg3_periodic_fetch_stats(tp);
10212
10213                 if (tp->setlpicnt && !--tp->setlpicnt)
10214                         tg3_phy_eee_enable(tp);
10215
10216                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10217                         u32 mac_stat;
10218                         int phy_event;
10219
10220                         mac_stat = tr32(MAC_STATUS);
10221
10222                         phy_event = 0;
10223                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10224                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10225                                         phy_event = 1;
10226                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10227                                 phy_event = 1;
10228
10229                         if (phy_event)
10230                                 tg3_setup_phy(tp, 0);
10231                 } else if (tg3_flag(tp, POLL_SERDES)) {
10232                         u32 mac_stat = tr32(MAC_STATUS);
10233                         int need_setup = 0;
10234
10235                         if (tp->link_up &&
10236                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10237                                 need_setup = 1;
10238                         }
10239                         if (!tp->link_up &&
10240                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
10241                                          MAC_STATUS_SIGNAL_DET))) {
10242                                 need_setup = 1;
10243                         }
10244                         if (need_setup) {
10245                                 if (!tp->serdes_counter) {
10246                                         tw32_f(MAC_MODE,
10247                                              (tp->mac_mode &
10248                                               ~MAC_MODE_PORT_MODE_MASK));
10249                                         udelay(40);
10250                                         tw32_f(MAC_MODE, tp->mac_mode);
10251                                         udelay(40);
10252                                 }
10253                                 tg3_setup_phy(tp, 0);
10254                         }
10255                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10256                            tg3_flag(tp, 5780_CLASS)) {
10257                         tg3_serdes_parallel_detect(tp);
10258                 }
10259
10260                 tp->timer_counter = tp->timer_multiplier;
10261         }
10262
10263         /* Heartbeat is only sent once every 2 seconds.
10264          *
10265          * The heartbeat is to tell the ASF firmware that the host
10266          * driver is still alive.  In the event that the OS crashes,
10267          * ASF needs to reset the hardware to free up the FIFO space
10268          * that may be filled with rx packets destined for the host.
10269          * If the FIFO is full, ASF will no longer function properly.
10270          *
10271          * Unintended resets have been reported on real time kernels
10272          * where the timer doesn't run on time.  Netpoll will also have
10273          * same problem.
10274          *
10275          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10276          * to check the ring condition when the heartbeat is expiring
10277          * before doing the reset.  This will prevent most unintended
10278          * resets.
10279          */
10280         if (!--tp->asf_counter) {
10281                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10282                         tg3_wait_for_event_ack(tp);
10283
10284                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10285                                       FWCMD_NICDRV_ALIVE3);
10286                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10287                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10288                                       TG3_FW_UPDATE_TIMEOUT_SEC);
10289
10290                         tg3_generate_fw_event(tp);
10291                 }
10292                 tp->asf_counter = tp->asf_multiplier;
10293         }
10294
10295         spin_unlock(&tp->lock);
10296
10297 restart_timer:
10298         tp->timer.expires = jiffies + tp->timer_offset;
10299         add_timer(&tp->timer);
10300 }
10301
10302 static void tg3_timer_init(struct tg3 *tp)
10303 {
10304         if (tg3_flag(tp, TAGGED_STATUS) &&
10305             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
10306             !tg3_flag(tp, 57765_CLASS))
10307                 tp->timer_offset = HZ;
10308         else
10309                 tp->timer_offset = HZ / 10;
10310
10311         BUG_ON(tp->timer_offset > HZ);
10312
10313         tp->timer_multiplier = (HZ / tp->timer_offset);
10314         tp->asf_multiplier = (HZ / tp->timer_offset) *
10315                              TG3_FW_UPDATE_FREQ_SEC;
10316
10317         init_timer(&tp->timer);
10318         tp->timer.data = (unsigned long) tp;
10319         tp->timer.function = tg3_timer;
10320 }
10321
10322 static void tg3_timer_start(struct tg3 *tp)
10323 {
10324         tp->asf_counter   = tp->asf_multiplier;
10325         tp->timer_counter = tp->timer_multiplier;
10326
10327         tp->timer.expires = jiffies + tp->timer_offset;
10328         add_timer(&tp->timer);
10329 }
10330
10331 static void tg3_timer_stop(struct tg3 *tp)
10332 {
10333         del_timer_sync(&tp->timer);
10334 }
10335
10336 /* Restart hardware after configuration changes, self-test, etc.
10337  * Invoked with tp->lock held.
10338  */
10339 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
10340         __releases(tp->lock)
10341         __acquires(tp->lock)
10342 {
10343         int err;
10344
10345         err = tg3_init_hw(tp, reset_phy);
10346         if (err) {
10347                 netdev_err(tp->dev,
10348                            "Failed to re-initialize device, aborting\n");
10349                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10350                 tg3_full_unlock(tp);
10351                 tg3_timer_stop(tp);
10352                 tp->irq_sync = 0;
10353                 tg3_napi_enable(tp);
10354                 dev_close(tp->dev);
10355                 tg3_full_lock(tp, 0);
10356         }
10357         return err;
10358 }
10359
10360 static void tg3_reset_task(struct work_struct *work)
10361 {
10362         struct tg3 *tp = container_of(work, struct tg3, reset_task);
10363         int err;
10364
10365         tg3_full_lock(tp, 0);
10366
10367         if (!netif_running(tp->dev)) {
10368                 tg3_flag_clear(tp, RESET_TASK_PENDING);
10369                 tg3_full_unlock(tp);
10370                 return;
10371         }
10372
10373         tg3_full_unlock(tp);
10374
10375         tg3_phy_stop(tp);
10376
10377         tg3_netif_stop(tp);
10378
10379         tg3_full_lock(tp, 1);
10380
10381         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10382                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10383                 tp->write32_rx_mbox = tg3_write_flush_reg32;
10384                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10385                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10386         }
10387
10388         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10389         err = tg3_init_hw(tp, 1);
10390         if (err)
10391                 goto out;
10392
10393         tg3_netif_start(tp);
10394
10395 out:
10396         tg3_full_unlock(tp);
10397
10398         if (!err)
10399                 tg3_phy_start(tp);
10400
10401         tg3_flag_clear(tp, RESET_TASK_PENDING);
10402 }
10403
10404 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10405 {
10406         irq_handler_t fn;
10407         unsigned long flags;
10408         char *name;
10409         struct tg3_napi *tnapi = &tp->napi[irq_num];
10410
10411         if (tp->irq_cnt == 1)
10412                 name = tp->dev->name;
10413         else {
10414                 name = &tnapi->irq_lbl[0];
10415                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10416                 name[IFNAMSIZ-1] = 0;
10417         }
10418
10419         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10420                 fn = tg3_msi;
10421                 if (tg3_flag(tp, 1SHOT_MSI))
10422                         fn = tg3_msi_1shot;
10423                 flags = 0;
10424         } else {
10425                 fn = tg3_interrupt;
10426                 if (tg3_flag(tp, TAGGED_STATUS))
10427                         fn = tg3_interrupt_tagged;
10428                 flags = IRQF_SHARED;
10429         }
10430
10431         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10432 }
10433
10434 static int tg3_test_interrupt(struct tg3 *tp)
10435 {
10436         struct tg3_napi *tnapi = &tp->napi[0];
10437         struct net_device *dev = tp->dev;
10438         int err, i, intr_ok = 0;
10439         u32 val;
10440
10441         if (!netif_running(dev))
10442                 return -ENODEV;
10443
10444         tg3_disable_ints(tp);
10445
10446         free_irq(tnapi->irq_vec, tnapi);
10447
10448         /*
10449          * Turn off MSI one shot mode.  Otherwise this test has no
10450          * observable way to know whether the interrupt was delivered.
10451          */
10452         if (tg3_flag(tp, 57765_PLUS)) {
10453                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10454                 tw32(MSGINT_MODE, val);
10455         }
10456
10457         err = request_irq(tnapi->irq_vec, tg3_test_isr,
10458                           IRQF_SHARED, dev->name, tnapi);
10459         if (err)
10460                 return err;
10461
10462         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10463         tg3_enable_ints(tp);
10464
10465         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10466                tnapi->coal_now);
10467
10468         for (i = 0; i < 5; i++) {
10469                 u32 int_mbox, misc_host_ctrl;
10470
10471                 int_mbox = tr32_mailbox(tnapi->int_mbox);
10472                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10473
10474                 if ((int_mbox != 0) ||
10475                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10476                         intr_ok = 1;
10477                         break;
10478                 }
10479
10480                 if (tg3_flag(tp, 57765_PLUS) &&
10481                     tnapi->hw_status->status_tag != tnapi->last_tag)
10482                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10483
10484                 msleep(10);
10485         }
10486
10487         tg3_disable_ints(tp);
10488
10489         free_irq(tnapi->irq_vec, tnapi);
10490
10491         err = tg3_request_irq(tp, 0);
10492
10493         if (err)
10494                 return err;
10495
10496         if (intr_ok) {
10497                 /* Reenable MSI one shot mode. */
10498                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10499                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10500                         tw32(MSGINT_MODE, val);
10501                 }
10502                 return 0;
10503         }
10504
10505         return -EIO;
10506 }
10507
10508 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10509  * successfully restored
10510  */
10511 static int tg3_test_msi(struct tg3 *tp)
10512 {
10513         int err;
10514         u16 pci_cmd;
10515
10516         if (!tg3_flag(tp, USING_MSI))
10517                 return 0;
10518
10519         /* Turn off SERR reporting in case MSI terminates with Master
10520          * Abort.
10521          */
10522         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10523         pci_write_config_word(tp->pdev, PCI_COMMAND,
10524                               pci_cmd & ~PCI_COMMAND_SERR);
10525
10526         err = tg3_test_interrupt(tp);
10527
10528         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10529
10530         if (!err)
10531                 return 0;
10532
10533         /* other failures */
10534         if (err != -EIO)
10535                 return err;
10536
10537         /* MSI test failed, go back to INTx mode */
10538         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10539                     "to INTx mode. Please report this failure to the PCI "
10540                     "maintainer and include system chipset information\n");
10541
10542         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10543
10544         pci_disable_msi(tp->pdev);
10545
10546         tg3_flag_clear(tp, USING_MSI);
10547         tp->napi[0].irq_vec = tp->pdev->irq;
10548
10549         err = tg3_request_irq(tp, 0);
10550         if (err)
10551                 return err;
10552
10553         /* Need to reset the chip because the MSI cycle may have terminated
10554          * with Master Abort.
10555          */
10556         tg3_full_lock(tp, 1);
10557
10558         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10559         err = tg3_init_hw(tp, 1);
10560
10561         tg3_full_unlock(tp);
10562
10563         if (err)
10564                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10565
10566         return err;
10567 }
10568
10569 static int tg3_request_firmware(struct tg3 *tp)
10570 {
10571         const __be32 *fw_data;
10572
10573         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10574                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10575                            tp->fw_needed);
10576                 return -ENOENT;
10577         }
10578
10579         fw_data = (void *)tp->fw->data;
10580
10581         /* Firmware blob starts with version numbers, followed by
10582          * start address and _full_ length including BSS sections
10583          * (which must be longer than the actual data, of course
10584          */
10585
10586         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
10587         if (tp->fw_len < (tp->fw->size - 12)) {
10588                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10589                            tp->fw_len, tp->fw_needed);
10590                 release_firmware(tp->fw);
10591                 tp->fw = NULL;
10592                 return -EINVAL;
10593         }
10594
10595         /* We no longer need firmware; we have it. */
10596         tp->fw_needed = NULL;
10597         return 0;
10598 }
10599
10600 static u32 tg3_irq_count(struct tg3 *tp)
10601 {
10602         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10603
10604         if (irq_cnt > 1) {
10605                 /* We want as many rx rings enabled as there are cpus.
10606                  * In multiqueue MSI-X mode, the first MSI-X vector
10607                  * only deals with link interrupts, etc, so we add
10608                  * one to the number of vectors we are requesting.
10609                  */
10610                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
10611         }
10612
10613         return irq_cnt;
10614 }
10615
10616 static bool tg3_enable_msix(struct tg3 *tp)
10617 {
10618         int i, rc;
10619         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
10620
10621         tp->txq_cnt = tp->txq_req;
10622         tp->rxq_cnt = tp->rxq_req;
10623         if (!tp->rxq_cnt)
10624                 tp->rxq_cnt = netif_get_num_default_rss_queues();
10625         if (tp->rxq_cnt > tp->rxq_max)
10626                 tp->rxq_cnt = tp->rxq_max;
10627
10628         /* Disable multiple TX rings by default.  Simple round-robin hardware
10629          * scheduling of the TX rings can cause starvation of rings with
10630          * small packets when other rings have TSO or jumbo packets.
10631          */
10632         if (!tp->txq_req)
10633                 tp->txq_cnt = 1;
10634
10635         tp->irq_cnt = tg3_irq_count(tp);
10636
10637         for (i = 0; i < tp->irq_max; i++) {
10638                 msix_ent[i].entry  = i;
10639                 msix_ent[i].vector = 0;
10640         }
10641
10642         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10643         if (rc < 0) {
10644                 return false;
10645         } else if (rc != 0) {
10646                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10647                         return false;
10648                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10649                               tp->irq_cnt, rc);
10650                 tp->irq_cnt = rc;
10651                 tp->rxq_cnt = max(rc - 1, 1);
10652                 if (tp->txq_cnt)
10653                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
10654         }
10655
10656         for (i = 0; i < tp->irq_max; i++)
10657                 tp->napi[i].irq_vec = msix_ent[i].vector;
10658
10659         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
10660                 pci_disable_msix(tp->pdev);
10661                 return false;
10662         }
10663
10664         if (tp->irq_cnt == 1)
10665                 return true;
10666
10667         tg3_flag_set(tp, ENABLE_RSS);
10668
10669         if (tp->txq_cnt > 1)
10670                 tg3_flag_set(tp, ENABLE_TSS);
10671
10672         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
10673
10674         return true;
10675 }
10676
10677 static void tg3_ints_init(struct tg3 *tp)
10678 {
10679         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10680             !tg3_flag(tp, TAGGED_STATUS)) {
10681                 /* All MSI supporting chips should support tagged
10682                  * status.  Assert that this is the case.
10683                  */
10684                 netdev_warn(tp->dev,
10685                             "MSI without TAGGED_STATUS? Not using MSI\n");
10686                 goto defcfg;
10687         }
10688
10689         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10690                 tg3_flag_set(tp, USING_MSIX);
10691         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10692                 tg3_flag_set(tp, USING_MSI);
10693
10694         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10695                 u32 msi_mode = tr32(MSGINT_MODE);
10696                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10697                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10698                 if (!tg3_flag(tp, 1SHOT_MSI))
10699                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10700                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10701         }
10702 defcfg:
10703         if (!tg3_flag(tp, USING_MSIX)) {
10704                 tp->irq_cnt = 1;
10705                 tp->napi[0].irq_vec = tp->pdev->irq;
10706         }
10707
10708         if (tp->irq_cnt == 1) {
10709                 tp->txq_cnt = 1;
10710                 tp->rxq_cnt = 1;
10711                 netif_set_real_num_tx_queues(tp->dev, 1);
10712                 netif_set_real_num_rx_queues(tp->dev, 1);
10713         }
10714 }
10715
10716 static void tg3_ints_fini(struct tg3 *tp)
10717 {
10718         if (tg3_flag(tp, USING_MSIX))
10719                 pci_disable_msix(tp->pdev);
10720         else if (tg3_flag(tp, USING_MSI))
10721                 pci_disable_msi(tp->pdev);
10722         tg3_flag_clear(tp, USING_MSI);
10723         tg3_flag_clear(tp, USING_MSIX);
10724         tg3_flag_clear(tp, ENABLE_RSS);
10725         tg3_flag_clear(tp, ENABLE_TSS);
10726 }
10727
10728 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
10729                      bool init)
10730 {
10731         struct net_device *dev = tp->dev;
10732         int i, err;
10733
10734         /*
10735          * Setup interrupts first so we know how
10736          * many NAPI resources to allocate
10737          */
10738         tg3_ints_init(tp);
10739
10740         tg3_rss_check_indir_tbl(tp);
10741
10742         /* The placement of this call is tied
10743          * to the setup and use of Host TX descriptors.
10744          */
10745         err = tg3_alloc_consistent(tp);
10746         if (err)
10747                 goto err_out1;
10748
10749         tg3_napi_init(tp);
10750
10751         tg3_napi_enable(tp);
10752
10753         for (i = 0; i < tp->irq_cnt; i++) {
10754                 struct tg3_napi *tnapi = &tp->napi[i];
10755                 err = tg3_request_irq(tp, i);
10756                 if (err) {
10757                         for (i--; i >= 0; i--) {
10758                                 tnapi = &tp->napi[i];
10759                                 free_irq(tnapi->irq_vec, tnapi);
10760                         }
10761                         goto err_out2;
10762                 }
10763         }
10764
10765         tg3_full_lock(tp, 0);
10766
10767         err = tg3_init_hw(tp, reset_phy);
10768         if (err) {
10769                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10770                 tg3_free_rings(tp);
10771         }
10772
10773         tg3_full_unlock(tp);
10774
10775         if (err)
10776                 goto err_out3;
10777
10778         if (test_irq && tg3_flag(tp, USING_MSI)) {
10779                 err = tg3_test_msi(tp);
10780
10781                 if (err) {
10782                         tg3_full_lock(tp, 0);
10783                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10784                         tg3_free_rings(tp);
10785                         tg3_full_unlock(tp);
10786
10787                         goto err_out2;
10788                 }
10789
10790                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10791                         u32 val = tr32(PCIE_TRANSACTION_CFG);
10792
10793                         tw32(PCIE_TRANSACTION_CFG,
10794                              val | PCIE_TRANS_CFG_1SHOT_MSI);
10795                 }
10796         }
10797
10798         tg3_phy_start(tp);
10799
10800         tg3_hwmon_open(tp);
10801
10802         tg3_full_lock(tp, 0);
10803
10804         tg3_timer_start(tp);
10805         tg3_flag_set(tp, INIT_COMPLETE);
10806         tg3_enable_ints(tp);
10807
10808         if (init)
10809                 tg3_ptp_init(tp);
10810         else
10811                 tg3_ptp_resume(tp);
10812
10813
10814         tg3_full_unlock(tp);
10815
10816         netif_tx_start_all_queues(dev);
10817
10818         /*
10819          * Reset loopback feature if it was turned on while the device was down
10820          * make sure that it's installed properly now.
10821          */
10822         if (dev->features & NETIF_F_LOOPBACK)
10823                 tg3_set_loopback(dev, dev->features);
10824
10825         return 0;
10826
10827 err_out3:
10828         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10829                 struct tg3_napi *tnapi = &tp->napi[i];
10830                 free_irq(tnapi->irq_vec, tnapi);
10831         }
10832
10833 err_out2:
10834         tg3_napi_disable(tp);
10835         tg3_napi_fini(tp);
10836         tg3_free_consistent(tp);
10837
10838 err_out1:
10839         tg3_ints_fini(tp);
10840
10841         return err;
10842 }
10843
10844 static void tg3_stop(struct tg3 *tp)
10845 {
10846         int i;
10847
10848         tg3_reset_task_cancel(tp);
10849         tg3_netif_stop(tp);
10850
10851         tg3_timer_stop(tp);
10852
10853         tg3_hwmon_close(tp);
10854
10855         tg3_phy_stop(tp);
10856
10857         tg3_full_lock(tp, 1);
10858
10859         tg3_disable_ints(tp);
10860
10861         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10862         tg3_free_rings(tp);
10863         tg3_flag_clear(tp, INIT_COMPLETE);
10864
10865         tg3_full_unlock(tp);
10866
10867         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10868                 struct tg3_napi *tnapi = &tp->napi[i];
10869                 free_irq(tnapi->irq_vec, tnapi);
10870         }
10871
10872         tg3_ints_fini(tp);
10873
10874         tg3_napi_fini(tp);
10875
10876         tg3_free_consistent(tp);
10877 }
10878
10879 static int tg3_open(struct net_device *dev)
10880 {
10881         struct tg3 *tp = netdev_priv(dev);
10882         int err;
10883
10884         if (tp->fw_needed) {
10885                 err = tg3_request_firmware(tp);
10886                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
10887                         if (err)
10888                                 return err;
10889                 } else if (err) {
10890                         netdev_warn(tp->dev, "TSO capability disabled\n");
10891                         tg3_flag_clear(tp, TSO_CAPABLE);
10892                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10893                         netdev_notice(tp->dev, "TSO capability restored\n");
10894                         tg3_flag_set(tp, TSO_CAPABLE);
10895                 }
10896         }
10897
10898         tg3_carrier_off(tp);
10899
10900         err = tg3_power_up(tp);
10901         if (err)
10902                 return err;
10903
10904         tg3_full_lock(tp, 0);
10905
10906         tg3_disable_ints(tp);
10907         tg3_flag_clear(tp, INIT_COMPLETE);
10908
10909         tg3_full_unlock(tp);
10910
10911         err = tg3_start(tp, true, true, true);
10912         if (err) {
10913                 tg3_frob_aux_power(tp, false);
10914                 pci_set_power_state(tp->pdev, PCI_D3hot);
10915         }
10916
10917         if (tg3_flag(tp, PTP_CAPABLE)) {
10918                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
10919                                                    &tp->pdev->dev);
10920                 if (IS_ERR(tp->ptp_clock))
10921                         tp->ptp_clock = NULL;
10922         }
10923
10924         return err;
10925 }
10926
10927 static int tg3_close(struct net_device *dev)
10928 {
10929         struct tg3 *tp = netdev_priv(dev);
10930
10931         tg3_ptp_fini(tp);
10932
10933         tg3_stop(tp);
10934
10935         /* Clear stats across close / open calls */
10936         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10937         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10938
10939         tg3_power_down(tp);
10940
10941         tg3_carrier_off(tp);
10942
10943         return 0;
10944 }
10945
10946 static inline u64 get_stat64(tg3_stat64_t *val)
10947 {
10948        return ((u64)val->high << 32) | ((u64)val->low);
10949 }
10950
10951 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10952 {
10953         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10954
10955         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10956             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10957              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10958                 u32 val;
10959
10960                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10961                         tg3_writephy(tp, MII_TG3_TEST1,
10962                                      val | MII_TG3_TEST1_CRC_EN);
10963                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10964                 } else
10965                         val = 0;
10966
10967                 tp->phy_crc_errors += val;
10968
10969                 return tp->phy_crc_errors;
10970         }
10971
10972         return get_stat64(&hw_stats->rx_fcs_errors);
10973 }
10974
10975 #define ESTAT_ADD(member) \
10976         estats->member =        old_estats->member + \
10977                                 get_stat64(&hw_stats->member)
10978
10979 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10980 {
10981         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10982         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10983
10984         ESTAT_ADD(rx_octets);
10985         ESTAT_ADD(rx_fragments);
10986         ESTAT_ADD(rx_ucast_packets);
10987         ESTAT_ADD(rx_mcast_packets);
10988         ESTAT_ADD(rx_bcast_packets);
10989         ESTAT_ADD(rx_fcs_errors);
10990         ESTAT_ADD(rx_align_errors);
10991         ESTAT_ADD(rx_xon_pause_rcvd);
10992         ESTAT_ADD(rx_xoff_pause_rcvd);
10993         ESTAT_ADD(rx_mac_ctrl_rcvd);
10994         ESTAT_ADD(rx_xoff_entered);
10995         ESTAT_ADD(rx_frame_too_long_errors);
10996         ESTAT_ADD(rx_jabbers);
10997         ESTAT_ADD(rx_undersize_packets);
10998         ESTAT_ADD(rx_in_length_errors);
10999         ESTAT_ADD(rx_out_length_errors);
11000         ESTAT_ADD(rx_64_or_less_octet_packets);
11001         ESTAT_ADD(rx_65_to_127_octet_packets);
11002         ESTAT_ADD(rx_128_to_255_octet_packets);
11003         ESTAT_ADD(rx_256_to_511_octet_packets);
11004         ESTAT_ADD(rx_512_to_1023_octet_packets);
11005         ESTAT_ADD(rx_1024_to_1522_octet_packets);
11006         ESTAT_ADD(rx_1523_to_2047_octet_packets);
11007         ESTAT_ADD(rx_2048_to_4095_octet_packets);
11008         ESTAT_ADD(rx_4096_to_8191_octet_packets);
11009         ESTAT_ADD(rx_8192_to_9022_octet_packets);
11010
11011         ESTAT_ADD(tx_octets);
11012         ESTAT_ADD(tx_collisions);
11013         ESTAT_ADD(tx_xon_sent);
11014         ESTAT_ADD(tx_xoff_sent);
11015         ESTAT_ADD(tx_flow_control);
11016         ESTAT_ADD(tx_mac_errors);
11017         ESTAT_ADD(tx_single_collisions);
11018         ESTAT_ADD(tx_mult_collisions);
11019         ESTAT_ADD(tx_deferred);
11020         ESTAT_ADD(tx_excessive_collisions);
11021         ESTAT_ADD(tx_late_collisions);
11022         ESTAT_ADD(tx_collide_2times);
11023         ESTAT_ADD(tx_collide_3times);
11024         ESTAT_ADD(tx_collide_4times);
11025         ESTAT_ADD(tx_collide_5times);
11026         ESTAT_ADD(tx_collide_6times);
11027         ESTAT_ADD(tx_collide_7times);
11028         ESTAT_ADD(tx_collide_8times);
11029         ESTAT_ADD(tx_collide_9times);
11030         ESTAT_ADD(tx_collide_10times);
11031         ESTAT_ADD(tx_collide_11times);
11032         ESTAT_ADD(tx_collide_12times);
11033         ESTAT_ADD(tx_collide_13times);
11034         ESTAT_ADD(tx_collide_14times);
11035         ESTAT_ADD(tx_collide_15times);
11036         ESTAT_ADD(tx_ucast_packets);
11037         ESTAT_ADD(tx_mcast_packets);
11038         ESTAT_ADD(tx_bcast_packets);
11039         ESTAT_ADD(tx_carrier_sense_errors);
11040         ESTAT_ADD(tx_discards);
11041         ESTAT_ADD(tx_errors);
11042
11043         ESTAT_ADD(dma_writeq_full);
11044         ESTAT_ADD(dma_write_prioq_full);
11045         ESTAT_ADD(rxbds_empty);
11046         ESTAT_ADD(rx_discards);
11047         ESTAT_ADD(rx_errors);
11048         ESTAT_ADD(rx_threshold_hit);
11049
11050         ESTAT_ADD(dma_readq_full);
11051         ESTAT_ADD(dma_read_prioq_full);
11052         ESTAT_ADD(tx_comp_queue_full);
11053
11054         ESTAT_ADD(ring_set_send_prod_index);
11055         ESTAT_ADD(ring_status_update);
11056         ESTAT_ADD(nic_irqs);
11057         ESTAT_ADD(nic_avoided_irqs);
11058         ESTAT_ADD(nic_tx_threshold_hit);
11059
11060         ESTAT_ADD(mbuf_lwm_thresh_hit);
11061 }
11062
11063 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11064 {
11065         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11066         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11067
11068         stats->rx_packets = old_stats->rx_packets +
11069                 get_stat64(&hw_stats->rx_ucast_packets) +
11070                 get_stat64(&hw_stats->rx_mcast_packets) +
11071                 get_stat64(&hw_stats->rx_bcast_packets);
11072
11073         stats->tx_packets = old_stats->tx_packets +
11074                 get_stat64(&hw_stats->tx_ucast_packets) +
11075                 get_stat64(&hw_stats->tx_mcast_packets) +
11076                 get_stat64(&hw_stats->tx_bcast_packets);
11077
11078         stats->rx_bytes = old_stats->rx_bytes +
11079                 get_stat64(&hw_stats->rx_octets);
11080         stats->tx_bytes = old_stats->tx_bytes +
11081                 get_stat64(&hw_stats->tx_octets);
11082
11083         stats->rx_errors = old_stats->rx_errors +
11084                 get_stat64(&hw_stats->rx_errors);
11085         stats->tx_errors = old_stats->tx_errors +
11086                 get_stat64(&hw_stats->tx_errors) +
11087                 get_stat64(&hw_stats->tx_mac_errors) +
11088                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11089                 get_stat64(&hw_stats->tx_discards);
11090
11091         stats->multicast = old_stats->multicast +
11092                 get_stat64(&hw_stats->rx_mcast_packets);
11093         stats->collisions = old_stats->collisions +
11094                 get_stat64(&hw_stats->tx_collisions);
11095
11096         stats->rx_length_errors = old_stats->rx_length_errors +
11097                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11098                 get_stat64(&hw_stats->rx_undersize_packets);
11099
11100         stats->rx_over_errors = old_stats->rx_over_errors +
11101                 get_stat64(&hw_stats->rxbds_empty);
11102         stats->rx_frame_errors = old_stats->rx_frame_errors +
11103                 get_stat64(&hw_stats->rx_align_errors);
11104         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11105                 get_stat64(&hw_stats->tx_discards);
11106         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11107                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11108
11109         stats->rx_crc_errors = old_stats->rx_crc_errors +
11110                 tg3_calc_crc_errors(tp);
11111
11112         stats->rx_missed_errors = old_stats->rx_missed_errors +
11113                 get_stat64(&hw_stats->rx_discards);
11114
11115         stats->rx_dropped = tp->rx_dropped;
11116         stats->tx_dropped = tp->tx_dropped;
11117 }
11118
11119 static int tg3_get_regs_len(struct net_device *dev)
11120 {
11121         return TG3_REG_BLK_SIZE;
11122 }
11123
11124 static void tg3_get_regs(struct net_device *dev,
11125                 struct ethtool_regs *regs, void *_p)
11126 {
11127         struct tg3 *tp = netdev_priv(dev);
11128
11129         regs->version = 0;
11130
11131         memset(_p, 0, TG3_REG_BLK_SIZE);
11132
11133         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11134                 return;
11135
11136         tg3_full_lock(tp, 0);
11137
11138         tg3_dump_legacy_regs(tp, (u32 *)_p);
11139
11140         tg3_full_unlock(tp);
11141 }
11142
11143 static int tg3_get_eeprom_len(struct net_device *dev)
11144 {
11145         struct tg3 *tp = netdev_priv(dev);
11146
11147         return tp->nvram_size;
11148 }
11149
11150 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11151 {
11152         struct tg3 *tp = netdev_priv(dev);
11153         int ret;
11154         u8  *pd;
11155         u32 i, offset, len, b_offset, b_count;
11156         __be32 val;
11157
11158         if (tg3_flag(tp, NO_NVRAM))
11159                 return -EINVAL;
11160
11161         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11162                 return -EAGAIN;
11163
11164         offset = eeprom->offset;
11165         len = eeprom->len;
11166         eeprom->len = 0;
11167
11168         eeprom->magic = TG3_EEPROM_MAGIC;
11169
11170         if (offset & 3) {
11171                 /* adjustments to start on required 4 byte boundary */
11172                 b_offset = offset & 3;
11173                 b_count = 4 - b_offset;
11174                 if (b_count > len) {
11175                         /* i.e. offset=1 len=2 */
11176                         b_count = len;
11177                 }
11178                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11179                 if (ret)
11180                         return ret;
11181                 memcpy(data, ((char *)&val) + b_offset, b_count);
11182                 len -= b_count;
11183                 offset += b_count;
11184                 eeprom->len += b_count;
11185         }
11186
11187         /* read bytes up to the last 4 byte boundary */
11188         pd = &data[eeprom->len];
11189         for (i = 0; i < (len - (len & 3)); i += 4) {
11190                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11191                 if (ret) {
11192                         eeprom->len += i;
11193                         return ret;
11194                 }
11195                 memcpy(pd + i, &val, 4);
11196         }
11197         eeprom->len += i;
11198
11199         if (len & 3) {
11200                 /* read last bytes not ending on 4 byte boundary */
11201                 pd = &data[eeprom->len];
11202                 b_count = len & 3;
11203                 b_offset = offset + len - b_count;
11204                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11205                 if (ret)
11206                         return ret;
11207                 memcpy(pd, &val, b_count);
11208                 eeprom->len += b_count;
11209         }
11210         return 0;
11211 }
11212
11213 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11214 {
11215         struct tg3 *tp = netdev_priv(dev);
11216         int ret;
11217         u32 offset, len, b_offset, odd_len;
11218         u8 *buf;
11219         __be32 start, end;
11220
11221         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11222                 return -EAGAIN;
11223
11224         if (tg3_flag(tp, NO_NVRAM) ||
11225             eeprom->magic != TG3_EEPROM_MAGIC)
11226                 return -EINVAL;
11227
11228         offset = eeprom->offset;
11229         len = eeprom->len;
11230
11231         if ((b_offset = (offset & 3))) {
11232                 /* adjustments to start on required 4 byte boundary */
11233                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11234                 if (ret)
11235                         return ret;
11236                 len += b_offset;
11237                 offset &= ~3;
11238                 if (len < 4)
11239                         len = 4;
11240         }
11241
11242         odd_len = 0;
11243         if (len & 3) {
11244                 /* adjustments to end on required 4 byte boundary */
11245                 odd_len = 1;
11246                 len = (len + 3) & ~3;
11247                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11248                 if (ret)
11249                         return ret;
11250         }
11251
11252         buf = data;
11253         if (b_offset || odd_len) {
11254                 buf = kmalloc(len, GFP_KERNEL);
11255                 if (!buf)
11256                         return -ENOMEM;
11257                 if (b_offset)
11258                         memcpy(buf, &start, 4);
11259                 if (odd_len)
11260                         memcpy(buf+len-4, &end, 4);
11261                 memcpy(buf + b_offset, data, eeprom->len);
11262         }
11263
11264         ret = tg3_nvram_write_block(tp, offset, len, buf);
11265
11266         if (buf != data)
11267                 kfree(buf);
11268
11269         return ret;
11270 }
11271
11272 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11273 {
11274         struct tg3 *tp = netdev_priv(dev);
11275
11276         if (tg3_flag(tp, USE_PHYLIB)) {
11277                 struct phy_device *phydev;
11278                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11279                         return -EAGAIN;
11280                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11281                 return phy_ethtool_gset(phydev, cmd);
11282         }
11283
11284         cmd->supported = (SUPPORTED_Autoneg);
11285
11286         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11287                 cmd->supported |= (SUPPORTED_1000baseT_Half |
11288                                    SUPPORTED_1000baseT_Full);
11289
11290         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11291                 cmd->supported |= (SUPPORTED_100baseT_Half |
11292                                   SUPPORTED_100baseT_Full |
11293                                   SUPPORTED_10baseT_Half |
11294                                   SUPPORTED_10baseT_Full |
11295                                   SUPPORTED_TP);
11296                 cmd->port = PORT_TP;
11297         } else {
11298                 cmd->supported |= SUPPORTED_FIBRE;
11299                 cmd->port = PORT_FIBRE;
11300         }
11301
11302         cmd->advertising = tp->link_config.advertising;
11303         if (tg3_flag(tp, PAUSE_AUTONEG)) {
11304                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11305                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11306                                 cmd->advertising |= ADVERTISED_Pause;
11307                         } else {
11308                                 cmd->advertising |= ADVERTISED_Pause |
11309                                                     ADVERTISED_Asym_Pause;
11310                         }
11311                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11312                         cmd->advertising |= ADVERTISED_Asym_Pause;
11313                 }
11314         }
11315         if (netif_running(dev) && tp->link_up) {
11316                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11317                 cmd->duplex = tp->link_config.active_duplex;
11318                 cmd->lp_advertising = tp->link_config.rmt_adv;
11319                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11320                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11321                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11322                         else
11323                                 cmd->eth_tp_mdix = ETH_TP_MDI;
11324                 }
11325         } else {
11326                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11327                 cmd->duplex = DUPLEX_UNKNOWN;
11328                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11329         }
11330         cmd->phy_address = tp->phy_addr;
11331         cmd->transceiver = XCVR_INTERNAL;
11332         cmd->autoneg = tp->link_config.autoneg;
11333         cmd->maxtxpkt = 0;
11334         cmd->maxrxpkt = 0;
11335         return 0;
11336 }
11337
11338 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11339 {
11340         struct tg3 *tp = netdev_priv(dev);
11341         u32 speed = ethtool_cmd_speed(cmd);
11342
11343         if (tg3_flag(tp, USE_PHYLIB)) {
11344                 struct phy_device *phydev;
11345                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11346                         return -EAGAIN;
11347                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11348                 return phy_ethtool_sset(phydev, cmd);
11349         }
11350
11351         if (cmd->autoneg != AUTONEG_ENABLE &&
11352             cmd->autoneg != AUTONEG_DISABLE)
11353                 return -EINVAL;
11354
11355         if (cmd->autoneg == AUTONEG_DISABLE &&
11356             cmd->duplex != DUPLEX_FULL &&
11357             cmd->duplex != DUPLEX_HALF)
11358                 return -EINVAL;
11359
11360         if (cmd->autoneg == AUTONEG_ENABLE) {
11361                 u32 mask = ADVERTISED_Autoneg |
11362                            ADVERTISED_Pause |
11363                            ADVERTISED_Asym_Pause;
11364
11365                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11366                         mask |= ADVERTISED_1000baseT_Half |
11367                                 ADVERTISED_1000baseT_Full;
11368
11369                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11370                         mask |= ADVERTISED_100baseT_Half |
11371                                 ADVERTISED_100baseT_Full |
11372                                 ADVERTISED_10baseT_Half |
11373                                 ADVERTISED_10baseT_Full |
11374                                 ADVERTISED_TP;
11375                 else
11376                         mask |= ADVERTISED_FIBRE;
11377
11378                 if (cmd->advertising & ~mask)
11379                         return -EINVAL;
11380
11381                 mask &= (ADVERTISED_1000baseT_Half |
11382                          ADVERTISED_1000baseT_Full |
11383                          ADVERTISED_100baseT_Half |
11384                          ADVERTISED_100baseT_Full |
11385                          ADVERTISED_10baseT_Half |
11386                          ADVERTISED_10baseT_Full);
11387
11388                 cmd->advertising &= mask;
11389         } else {
11390                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11391                         if (speed != SPEED_1000)
11392                                 return -EINVAL;
11393
11394                         if (cmd->duplex != DUPLEX_FULL)
11395                                 return -EINVAL;
11396                 } else {
11397                         if (speed != SPEED_100 &&
11398                             speed != SPEED_10)
11399                                 return -EINVAL;
11400                 }
11401         }
11402
11403         tg3_full_lock(tp, 0);
11404
11405         tp->link_config.autoneg = cmd->autoneg;
11406         if (cmd->autoneg == AUTONEG_ENABLE) {
11407                 tp->link_config.advertising = (cmd->advertising |
11408                                               ADVERTISED_Autoneg);
11409                 tp->link_config.speed = SPEED_UNKNOWN;
11410                 tp->link_config.duplex = DUPLEX_UNKNOWN;
11411         } else {
11412                 tp->link_config.advertising = 0;
11413                 tp->link_config.speed = speed;
11414                 tp->link_config.duplex = cmd->duplex;
11415         }
11416
11417         if (netif_running(dev))
11418                 tg3_setup_phy(tp, 1);
11419
11420         tg3_full_unlock(tp);
11421
11422         return 0;
11423 }
11424
11425 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11426 {
11427         struct tg3 *tp = netdev_priv(dev);
11428
11429         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11430         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11431         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11432         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11433 }
11434
11435 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11436 {
11437         struct tg3 *tp = netdev_priv(dev);
11438
11439         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11440                 wol->supported = WAKE_MAGIC;
11441         else
11442                 wol->supported = 0;
11443         wol->wolopts = 0;
11444         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11445                 wol->wolopts = WAKE_MAGIC;
11446         memset(&wol->sopass, 0, sizeof(wol->sopass));
11447 }
11448
11449 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11450 {
11451         struct tg3 *tp = netdev_priv(dev);
11452         struct device *dp = &tp->pdev->dev;
11453
11454         if (wol->wolopts & ~WAKE_MAGIC)
11455                 return -EINVAL;
11456         if ((wol->wolopts & WAKE_MAGIC) &&
11457             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11458                 return -EINVAL;
11459
11460         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11461
11462         spin_lock_bh(&tp->lock);
11463         if (device_may_wakeup(dp))
11464                 tg3_flag_set(tp, WOL_ENABLE);
11465         else
11466                 tg3_flag_clear(tp, WOL_ENABLE);
11467         spin_unlock_bh(&tp->lock);
11468
11469         return 0;
11470 }
11471
11472 static u32 tg3_get_msglevel(struct net_device *dev)
11473 {
11474         struct tg3 *tp = netdev_priv(dev);
11475         return tp->msg_enable;
11476 }
11477
11478 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11479 {
11480         struct tg3 *tp = netdev_priv(dev);
11481         tp->msg_enable = value;
11482 }
11483
11484 static int tg3_nway_reset(struct net_device *dev)
11485 {
11486         struct tg3 *tp = netdev_priv(dev);
11487         int r;
11488
11489         if (!netif_running(dev))
11490                 return -EAGAIN;
11491
11492         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11493                 return -EINVAL;
11494
11495         if (tg3_flag(tp, USE_PHYLIB)) {
11496                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11497                         return -EAGAIN;
11498                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11499         } else {
11500                 u32 bmcr;
11501
11502                 spin_lock_bh(&tp->lock);
11503                 r = -EINVAL;
11504                 tg3_readphy(tp, MII_BMCR, &bmcr);
11505                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11506                     ((bmcr & BMCR_ANENABLE) ||
11507                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11508                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11509                                                    BMCR_ANENABLE);
11510                         r = 0;
11511                 }
11512                 spin_unlock_bh(&tp->lock);
11513         }
11514
11515         return r;
11516 }
11517
11518 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11519 {
11520         struct tg3 *tp = netdev_priv(dev);
11521
11522         ering->rx_max_pending = tp->rx_std_ring_mask;
11523         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11524                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11525         else
11526                 ering->rx_jumbo_max_pending = 0;
11527
11528         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11529
11530         ering->rx_pending = tp->rx_pending;
11531         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11532                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11533         else
11534                 ering->rx_jumbo_pending = 0;
11535
11536         ering->tx_pending = tp->napi[0].tx_pending;
11537 }
11538
11539 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11540 {
11541         struct tg3 *tp = netdev_priv(dev);
11542         int i, irq_sync = 0, err = 0;
11543
11544         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11545             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11546             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11547             (ering->tx_pending <= MAX_SKB_FRAGS) ||
11548             (tg3_flag(tp, TSO_BUG) &&
11549              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11550                 return -EINVAL;
11551
11552         if (netif_running(dev)) {
11553                 tg3_phy_stop(tp);
11554                 tg3_netif_stop(tp);
11555                 irq_sync = 1;
11556         }
11557
11558         tg3_full_lock(tp, irq_sync);
11559
11560         tp->rx_pending = ering->rx_pending;
11561
11562         if (tg3_flag(tp, MAX_RXPEND_64) &&
11563             tp->rx_pending > 63)
11564                 tp->rx_pending = 63;
11565         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11566
11567         for (i = 0; i < tp->irq_max; i++)
11568                 tp->napi[i].tx_pending = ering->tx_pending;
11569
11570         if (netif_running(dev)) {
11571                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11572                 err = tg3_restart_hw(tp, 1);
11573                 if (!err)
11574                         tg3_netif_start(tp);
11575         }
11576
11577         tg3_full_unlock(tp);
11578
11579         if (irq_sync && !err)
11580                 tg3_phy_start(tp);
11581
11582         return err;
11583 }
11584
11585 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11586 {
11587         struct tg3 *tp = netdev_priv(dev);
11588
11589         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11590
11591         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11592                 epause->rx_pause = 1;
11593         else
11594                 epause->rx_pause = 0;
11595
11596         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11597                 epause->tx_pause = 1;
11598         else
11599                 epause->tx_pause = 0;
11600 }
11601
11602 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11603 {
11604         struct tg3 *tp = netdev_priv(dev);
11605         int err = 0;
11606
11607         if (tg3_flag(tp, USE_PHYLIB)) {
11608                 u32 newadv;
11609                 struct phy_device *phydev;
11610
11611                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11612
11613                 if (!(phydev->supported & SUPPORTED_Pause) ||
11614                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11615                      (epause->rx_pause != epause->tx_pause)))
11616                         return -EINVAL;
11617
11618                 tp->link_config.flowctrl = 0;
11619                 if (epause->rx_pause) {
11620                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11621
11622                         if (epause->tx_pause) {
11623                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11624                                 newadv = ADVERTISED_Pause;
11625                         } else
11626                                 newadv = ADVERTISED_Pause |
11627                                          ADVERTISED_Asym_Pause;
11628                 } else if (epause->tx_pause) {
11629                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11630                         newadv = ADVERTISED_Asym_Pause;
11631                 } else
11632                         newadv = 0;
11633
11634                 if (epause->autoneg)
11635                         tg3_flag_set(tp, PAUSE_AUTONEG);
11636                 else
11637                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11638
11639                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11640                         u32 oldadv = phydev->advertising &
11641                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11642                         if (oldadv != newadv) {
11643                                 phydev->advertising &=
11644                                         ~(ADVERTISED_Pause |
11645                                           ADVERTISED_Asym_Pause);
11646                                 phydev->advertising |= newadv;
11647                                 if (phydev->autoneg) {
11648                                         /*
11649                                          * Always renegotiate the link to
11650                                          * inform our link partner of our
11651                                          * flow control settings, even if the
11652                                          * flow control is forced.  Let
11653                                          * tg3_adjust_link() do the final
11654                                          * flow control setup.
11655                                          */
11656                                         return phy_start_aneg(phydev);
11657                                 }
11658                         }
11659
11660                         if (!epause->autoneg)
11661                                 tg3_setup_flow_control(tp, 0, 0);
11662                 } else {
11663                         tp->link_config.advertising &=
11664                                         ~(ADVERTISED_Pause |
11665                                           ADVERTISED_Asym_Pause);
11666                         tp->link_config.advertising |= newadv;
11667                 }
11668         } else {
11669                 int irq_sync = 0;
11670
11671                 if (netif_running(dev)) {
11672                         tg3_netif_stop(tp);
11673                         irq_sync = 1;
11674                 }
11675
11676                 tg3_full_lock(tp, irq_sync);
11677
11678                 if (epause->autoneg)
11679                         tg3_flag_set(tp, PAUSE_AUTONEG);
11680                 else
11681                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11682                 if (epause->rx_pause)
11683                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11684                 else
11685                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11686                 if (epause->tx_pause)
11687                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11688                 else
11689                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11690
11691                 if (netif_running(dev)) {
11692                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11693                         err = tg3_restart_hw(tp, 1);
11694                         if (!err)
11695                                 tg3_netif_start(tp);
11696                 }
11697
11698                 tg3_full_unlock(tp);
11699         }
11700
11701         return err;
11702 }
11703
11704 static int tg3_get_sset_count(struct net_device *dev, int sset)
11705 {
11706         switch (sset) {
11707         case ETH_SS_TEST:
11708                 return TG3_NUM_TEST;
11709         case ETH_SS_STATS:
11710                 return TG3_NUM_STATS;
11711         default:
11712                 return -EOPNOTSUPP;
11713         }
11714 }
11715
11716 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11717                          u32 *rules __always_unused)
11718 {
11719         struct tg3 *tp = netdev_priv(dev);
11720
11721         if (!tg3_flag(tp, SUPPORT_MSIX))
11722                 return -EOPNOTSUPP;
11723
11724         switch (info->cmd) {
11725         case ETHTOOL_GRXRINGS:
11726                 if (netif_running(tp->dev))
11727                         info->data = tp->rxq_cnt;
11728                 else {
11729                         info->data = num_online_cpus();
11730                         if (info->data > TG3_RSS_MAX_NUM_QS)
11731                                 info->data = TG3_RSS_MAX_NUM_QS;
11732                 }
11733
11734                 /* The first interrupt vector only
11735                  * handles link interrupts.
11736                  */
11737                 info->data -= 1;
11738                 return 0;
11739
11740         default:
11741                 return -EOPNOTSUPP;
11742         }
11743 }
11744
11745 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11746 {
11747         u32 size = 0;
11748         struct tg3 *tp = netdev_priv(dev);
11749
11750         if (tg3_flag(tp, SUPPORT_MSIX))
11751                 size = TG3_RSS_INDIR_TBL_SIZE;
11752
11753         return size;
11754 }
11755
11756 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11757 {
11758         struct tg3 *tp = netdev_priv(dev);
11759         int i;
11760
11761         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11762                 indir[i] = tp->rss_ind_tbl[i];
11763
11764         return 0;
11765 }
11766
11767 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11768 {
11769         struct tg3 *tp = netdev_priv(dev);
11770         size_t i;
11771
11772         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11773                 tp->rss_ind_tbl[i] = indir[i];
11774
11775         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11776                 return 0;
11777
11778         /* It is legal to write the indirection
11779          * table while the device is running.
11780          */
11781         tg3_full_lock(tp, 0);
11782         tg3_rss_write_indir_tbl(tp);
11783         tg3_full_unlock(tp);
11784
11785         return 0;
11786 }
11787
11788 static void tg3_get_channels(struct net_device *dev,
11789                              struct ethtool_channels *channel)
11790 {
11791         struct tg3 *tp = netdev_priv(dev);
11792         u32 deflt_qs = netif_get_num_default_rss_queues();
11793
11794         channel->max_rx = tp->rxq_max;
11795         channel->max_tx = tp->txq_max;
11796
11797         if (netif_running(dev)) {
11798                 channel->rx_count = tp->rxq_cnt;
11799                 channel->tx_count = tp->txq_cnt;
11800         } else {
11801                 if (tp->rxq_req)
11802                         channel->rx_count = tp->rxq_req;
11803                 else
11804                         channel->rx_count = min(deflt_qs, tp->rxq_max);
11805
11806                 if (tp->txq_req)
11807                         channel->tx_count = tp->txq_req;
11808                 else
11809                         channel->tx_count = min(deflt_qs, tp->txq_max);
11810         }
11811 }
11812
11813 static int tg3_set_channels(struct net_device *dev,
11814                             struct ethtool_channels *channel)
11815 {
11816         struct tg3 *tp = netdev_priv(dev);
11817
11818         if (!tg3_flag(tp, SUPPORT_MSIX))
11819                 return -EOPNOTSUPP;
11820
11821         if (channel->rx_count > tp->rxq_max ||
11822             channel->tx_count > tp->txq_max)
11823                 return -EINVAL;
11824
11825         tp->rxq_req = channel->rx_count;
11826         tp->txq_req = channel->tx_count;
11827
11828         if (!netif_running(dev))
11829                 return 0;
11830
11831         tg3_stop(tp);
11832
11833         tg3_carrier_off(tp);
11834
11835         tg3_start(tp, true, false, false);
11836
11837         return 0;
11838 }
11839
11840 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11841 {
11842         switch (stringset) {
11843         case ETH_SS_STATS:
11844                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11845                 break;
11846         case ETH_SS_TEST:
11847                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11848                 break;
11849         default:
11850                 WARN_ON(1);     /* we need a WARN() */
11851                 break;
11852         }
11853 }
11854
11855 static int tg3_set_phys_id(struct net_device *dev,
11856                             enum ethtool_phys_id_state state)
11857 {
11858         struct tg3 *tp = netdev_priv(dev);
11859
11860         if (!netif_running(tp->dev))
11861                 return -EAGAIN;
11862
11863         switch (state) {
11864         case ETHTOOL_ID_ACTIVE:
11865                 return 1;       /* cycle on/off once per second */
11866
11867         case ETHTOOL_ID_ON:
11868                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11869                      LED_CTRL_1000MBPS_ON |
11870                      LED_CTRL_100MBPS_ON |
11871                      LED_CTRL_10MBPS_ON |
11872                      LED_CTRL_TRAFFIC_OVERRIDE |
11873                      LED_CTRL_TRAFFIC_BLINK |
11874                      LED_CTRL_TRAFFIC_LED);
11875                 break;
11876
11877         case ETHTOOL_ID_OFF:
11878                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11879                      LED_CTRL_TRAFFIC_OVERRIDE);
11880                 break;
11881
11882         case ETHTOOL_ID_INACTIVE:
11883                 tw32(MAC_LED_CTRL, tp->led_ctrl);
11884                 break;
11885         }
11886
11887         return 0;
11888 }
11889
11890 static void tg3_get_ethtool_stats(struct net_device *dev,
11891                                    struct ethtool_stats *estats, u64 *tmp_stats)
11892 {
11893         struct tg3 *tp = netdev_priv(dev);
11894
11895         if (tp->hw_stats)
11896                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11897         else
11898                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11899 }
11900
11901 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11902 {
11903         int i;
11904         __be32 *buf;
11905         u32 offset = 0, len = 0;
11906         u32 magic, val;
11907
11908         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11909                 return NULL;
11910
11911         if (magic == TG3_EEPROM_MAGIC) {
11912                 for (offset = TG3_NVM_DIR_START;
11913                      offset < TG3_NVM_DIR_END;
11914                      offset += TG3_NVM_DIRENT_SIZE) {
11915                         if (tg3_nvram_read(tp, offset, &val))
11916                                 return NULL;
11917
11918                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11919                             TG3_NVM_DIRTYPE_EXTVPD)
11920                                 break;
11921                 }
11922
11923                 if (offset != TG3_NVM_DIR_END) {
11924                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11925                         if (tg3_nvram_read(tp, offset + 4, &offset))
11926                                 return NULL;
11927
11928                         offset = tg3_nvram_logical_addr(tp, offset);
11929                 }
11930         }
11931
11932         if (!offset || !len) {
11933                 offset = TG3_NVM_VPD_OFF;
11934                 len = TG3_NVM_VPD_LEN;
11935         }
11936
11937         buf = kmalloc(len, GFP_KERNEL);
11938         if (buf == NULL)
11939                 return NULL;
11940
11941         if (magic == TG3_EEPROM_MAGIC) {
11942                 for (i = 0; i < len; i += 4) {
11943                         /* The data is in little-endian format in NVRAM.
11944                          * Use the big-endian read routines to preserve
11945                          * the byte order as it exists in NVRAM.
11946                          */
11947                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11948                                 goto error;
11949                 }
11950         } else {
11951                 u8 *ptr;
11952                 ssize_t cnt;
11953                 unsigned int pos = 0;
11954
11955                 ptr = (u8 *)&buf[0];
11956                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11957                         cnt = pci_read_vpd(tp->pdev, pos,
11958                                            len - pos, ptr);
11959                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
11960                                 cnt = 0;
11961                         else if (cnt < 0)
11962                                 goto error;
11963                 }
11964                 if (pos != len)
11965                         goto error;
11966         }
11967
11968         *vpdlen = len;
11969
11970         return buf;
11971
11972 error:
11973         kfree(buf);
11974         return NULL;
11975 }
11976
11977 #define NVRAM_TEST_SIZE 0x100
11978 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
11979 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
11980 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
11981 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
11982 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
11983 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
11984 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11985 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11986
11987 static int tg3_test_nvram(struct tg3 *tp)
11988 {
11989         u32 csum, magic, len;
11990         __be32 *buf;
11991         int i, j, k, err = 0, size;
11992
11993         if (tg3_flag(tp, NO_NVRAM))
11994                 return 0;
11995
11996         if (tg3_nvram_read(tp, 0, &magic) != 0)
11997                 return -EIO;
11998
11999         if (magic == TG3_EEPROM_MAGIC)
12000                 size = NVRAM_TEST_SIZE;
12001         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12002                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12003                     TG3_EEPROM_SB_FORMAT_1) {
12004                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12005                         case TG3_EEPROM_SB_REVISION_0:
12006                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12007                                 break;
12008                         case TG3_EEPROM_SB_REVISION_2:
12009                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12010                                 break;
12011                         case TG3_EEPROM_SB_REVISION_3:
12012                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12013                                 break;
12014                         case TG3_EEPROM_SB_REVISION_4:
12015                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12016                                 break;
12017                         case TG3_EEPROM_SB_REVISION_5:
12018                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12019                                 break;
12020                         case TG3_EEPROM_SB_REVISION_6:
12021                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12022                                 break;
12023                         default:
12024                                 return -EIO;
12025                         }
12026                 } else
12027                         return 0;
12028         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12029                 size = NVRAM_SELFBOOT_HW_SIZE;
12030         else
12031                 return -EIO;
12032
12033         buf = kmalloc(size, GFP_KERNEL);
12034         if (buf == NULL)
12035                 return -ENOMEM;
12036
12037         err = -EIO;
12038         for (i = 0, j = 0; i < size; i += 4, j++) {
12039                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12040                 if (err)
12041                         break;
12042         }
12043         if (i < size)
12044                 goto out;
12045
12046         /* Selfboot format */
12047         magic = be32_to_cpu(buf[0]);
12048         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12049             TG3_EEPROM_MAGIC_FW) {
12050                 u8 *buf8 = (u8 *) buf, csum8 = 0;
12051
12052                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12053                     TG3_EEPROM_SB_REVISION_2) {
12054                         /* For rev 2, the csum doesn't include the MBA. */
12055                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12056                                 csum8 += buf8[i];
12057                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12058                                 csum8 += buf8[i];
12059                 } else {
12060                         for (i = 0; i < size; i++)
12061                                 csum8 += buf8[i];
12062                 }
12063
12064                 if (csum8 == 0) {
12065                         err = 0;
12066                         goto out;
12067                 }
12068
12069                 err = -EIO;
12070                 goto out;
12071         }
12072
12073         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12074             TG3_EEPROM_MAGIC_HW) {
12075                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12076                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12077                 u8 *buf8 = (u8 *) buf;
12078
12079                 /* Separate the parity bits and the data bytes.  */
12080                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12081                         if ((i == 0) || (i == 8)) {
12082                                 int l;
12083                                 u8 msk;
12084
12085                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12086                                         parity[k++] = buf8[i] & msk;
12087                                 i++;
12088                         } else if (i == 16) {
12089                                 int l;
12090                                 u8 msk;
12091
12092                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12093                                         parity[k++] = buf8[i] & msk;
12094                                 i++;
12095
12096                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12097                                         parity[k++] = buf8[i] & msk;
12098                                 i++;
12099                         }
12100                         data[j++] = buf8[i];
12101                 }
12102
12103                 err = -EIO;
12104                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12105                         u8 hw8 = hweight8(data[i]);
12106
12107                         if ((hw8 & 0x1) && parity[i])
12108                                 goto out;
12109                         else if (!(hw8 & 0x1) && !parity[i])
12110                                 goto out;
12111                 }
12112                 err = 0;
12113                 goto out;
12114         }
12115
12116         err = -EIO;
12117
12118         /* Bootstrap checksum at offset 0x10 */
12119         csum = calc_crc((unsigned char *) buf, 0x10);
12120         if (csum != le32_to_cpu(buf[0x10/4]))
12121                 goto out;
12122
12123         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12124         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12125         if (csum != le32_to_cpu(buf[0xfc/4]))
12126                 goto out;
12127
12128         kfree(buf);
12129
12130         buf = tg3_vpd_readblock(tp, &len);
12131         if (!buf)
12132                 return -ENOMEM;
12133
12134         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12135         if (i > 0) {
12136                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12137                 if (j < 0)
12138                         goto out;
12139
12140                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12141                         goto out;
12142
12143                 i += PCI_VPD_LRDT_TAG_SIZE;
12144                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12145                                               PCI_VPD_RO_KEYWORD_CHKSUM);
12146                 if (j > 0) {
12147                         u8 csum8 = 0;
12148
12149                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
12150
12151                         for (i = 0; i <= j; i++)
12152                                 csum8 += ((u8 *)buf)[i];
12153
12154                         if (csum8)
12155                                 goto out;
12156                 }
12157         }
12158
12159         err = 0;
12160
12161 out:
12162         kfree(buf);
12163         return err;
12164 }
12165
12166 #define TG3_SERDES_TIMEOUT_SEC  2
12167 #define TG3_COPPER_TIMEOUT_SEC  6
12168
12169 static int tg3_test_link(struct tg3 *tp)
12170 {
12171         int i, max;
12172
12173         if (!netif_running(tp->dev))
12174                 return -ENODEV;
12175
12176         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12177                 max = TG3_SERDES_TIMEOUT_SEC;
12178         else
12179                 max = TG3_COPPER_TIMEOUT_SEC;
12180
12181         for (i = 0; i < max; i++) {
12182                 if (tp->link_up)
12183                         return 0;
12184
12185                 if (msleep_interruptible(1000))
12186                         break;
12187         }
12188
12189         return -EIO;
12190 }
12191
12192 /* Only test the commonly used registers */
12193 static int tg3_test_registers(struct tg3 *tp)
12194 {
12195         int i, is_5705, is_5750;
12196         u32 offset, read_mask, write_mask, val, save_val, read_val;
12197         static struct {
12198                 u16 offset;
12199                 u16 flags;
12200 #define TG3_FL_5705     0x1
12201 #define TG3_FL_NOT_5705 0x2
12202 #define TG3_FL_NOT_5788 0x4
12203 #define TG3_FL_NOT_5750 0x8
12204                 u32 read_mask;
12205                 u32 write_mask;
12206         } reg_tbl[] = {
12207                 /* MAC Control Registers */
12208                 { MAC_MODE, TG3_FL_NOT_5705,
12209                         0x00000000, 0x00ef6f8c },
12210                 { MAC_MODE, TG3_FL_5705,
12211                         0x00000000, 0x01ef6b8c },
12212                 { MAC_STATUS, TG3_FL_NOT_5705,
12213                         0x03800107, 0x00000000 },
12214                 { MAC_STATUS, TG3_FL_5705,
12215                         0x03800100, 0x00000000 },
12216                 { MAC_ADDR_0_HIGH, 0x0000,
12217                         0x00000000, 0x0000ffff },
12218                 { MAC_ADDR_0_LOW, 0x0000,
12219                         0x00000000, 0xffffffff },
12220                 { MAC_RX_MTU_SIZE, 0x0000,
12221                         0x00000000, 0x0000ffff },
12222                 { MAC_TX_MODE, 0x0000,
12223                         0x00000000, 0x00000070 },
12224                 { MAC_TX_LENGTHS, 0x0000,
12225                         0x00000000, 0x00003fff },
12226                 { MAC_RX_MODE, TG3_FL_NOT_5705,
12227                         0x00000000, 0x000007fc },
12228                 { MAC_RX_MODE, TG3_FL_5705,
12229                         0x00000000, 0x000007dc },
12230                 { MAC_HASH_REG_0, 0x0000,
12231                         0x00000000, 0xffffffff },
12232                 { MAC_HASH_REG_1, 0x0000,
12233                         0x00000000, 0xffffffff },
12234                 { MAC_HASH_REG_2, 0x0000,
12235                         0x00000000, 0xffffffff },
12236                 { MAC_HASH_REG_3, 0x0000,
12237                         0x00000000, 0xffffffff },
12238
12239                 /* Receive Data and Receive BD Initiator Control Registers. */
12240                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12241                         0x00000000, 0xffffffff },
12242                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12243                         0x00000000, 0xffffffff },
12244                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12245                         0x00000000, 0x00000003 },
12246                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12247                         0x00000000, 0xffffffff },
12248                 { RCVDBDI_STD_BD+0, 0x0000,
12249                         0x00000000, 0xffffffff },
12250                 { RCVDBDI_STD_BD+4, 0x0000,
12251                         0x00000000, 0xffffffff },
12252                 { RCVDBDI_STD_BD+8, 0x0000,
12253                         0x00000000, 0xffff0002 },
12254                 { RCVDBDI_STD_BD+0xc, 0x0000,
12255                         0x00000000, 0xffffffff },
12256
12257                 /* Receive BD Initiator Control Registers. */
12258                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12259                         0x00000000, 0xffffffff },
12260                 { RCVBDI_STD_THRESH, TG3_FL_5705,
12261                         0x00000000, 0x000003ff },
12262                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12263                         0x00000000, 0xffffffff },
12264
12265                 /* Host Coalescing Control Registers. */
12266                 { HOSTCC_MODE, TG3_FL_NOT_5705,
12267                         0x00000000, 0x00000004 },
12268                 { HOSTCC_MODE, TG3_FL_5705,
12269                         0x00000000, 0x000000f6 },
12270                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12271                         0x00000000, 0xffffffff },
12272                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12273                         0x00000000, 0x000003ff },
12274                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12275                         0x00000000, 0xffffffff },
12276                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12277                         0x00000000, 0x000003ff },
12278                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12279                         0x00000000, 0xffffffff },
12280                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12281                         0x00000000, 0x000000ff },
12282                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12283                         0x00000000, 0xffffffff },
12284                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12285                         0x00000000, 0x000000ff },
12286                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12287                         0x00000000, 0xffffffff },
12288                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12289                         0x00000000, 0xffffffff },
12290                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12291                         0x00000000, 0xffffffff },
12292                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12293                         0x00000000, 0x000000ff },
12294                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12295                         0x00000000, 0xffffffff },
12296                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12297                         0x00000000, 0x000000ff },
12298                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12299                         0x00000000, 0xffffffff },
12300                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12301                         0x00000000, 0xffffffff },
12302                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12303                         0x00000000, 0xffffffff },
12304                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12305                         0x00000000, 0xffffffff },
12306                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12307                         0x00000000, 0xffffffff },
12308                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12309                         0xffffffff, 0x00000000 },
12310                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12311                         0xffffffff, 0x00000000 },
12312
12313                 /* Buffer Manager Control Registers. */
12314                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12315                         0x00000000, 0x007fff80 },
12316                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12317                         0x00000000, 0x007fffff },
12318                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12319                         0x00000000, 0x0000003f },
12320                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12321                         0x00000000, 0x000001ff },
12322                 { BUFMGR_MB_HIGH_WATER, 0x0000,
12323                         0x00000000, 0x000001ff },
12324                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12325                         0xffffffff, 0x00000000 },
12326                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12327                         0xffffffff, 0x00000000 },
12328
12329                 /* Mailbox Registers */
12330                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12331                         0x00000000, 0x000001ff },
12332                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12333                         0x00000000, 0x000001ff },
12334                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12335                         0x00000000, 0x000007ff },
12336                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12337                         0x00000000, 0x000001ff },
12338
12339                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12340         };
12341
12342         is_5705 = is_5750 = 0;
12343         if (tg3_flag(tp, 5705_PLUS)) {
12344                 is_5705 = 1;
12345                 if (tg3_flag(tp, 5750_PLUS))
12346                         is_5750 = 1;
12347         }
12348
12349         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12350                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12351                         continue;
12352
12353                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12354                         continue;
12355
12356                 if (tg3_flag(tp, IS_5788) &&
12357                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
12358                         continue;
12359
12360                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12361                         continue;
12362
12363                 offset = (u32) reg_tbl[i].offset;
12364                 read_mask = reg_tbl[i].read_mask;
12365                 write_mask = reg_tbl[i].write_mask;
12366
12367                 /* Save the original register content */
12368                 save_val = tr32(offset);
12369
12370                 /* Determine the read-only value. */
12371                 read_val = save_val & read_mask;
12372
12373                 /* Write zero to the register, then make sure the read-only bits
12374                  * are not changed and the read/write bits are all zeros.
12375                  */
12376                 tw32(offset, 0);
12377
12378                 val = tr32(offset);
12379
12380                 /* Test the read-only and read/write bits. */
12381                 if (((val & read_mask) != read_val) || (val & write_mask))
12382                         goto out;
12383
12384                 /* Write ones to all the bits defined by RdMask and WrMask, then
12385                  * make sure the read-only bits are not changed and the
12386                  * read/write bits are all ones.
12387                  */
12388                 tw32(offset, read_mask | write_mask);
12389
12390                 val = tr32(offset);
12391
12392                 /* Test the read-only bits. */
12393                 if ((val & read_mask) != read_val)
12394                         goto out;
12395
12396                 /* Test the read/write bits. */
12397                 if ((val & write_mask) != write_mask)
12398                         goto out;
12399
12400                 tw32(offset, save_val);
12401         }
12402
12403         return 0;
12404
12405 out:
12406         if (netif_msg_hw(tp))
12407                 netdev_err(tp->dev,
12408                            "Register test failed at offset %x\n", offset);
12409         tw32(offset, save_val);
12410         return -EIO;
12411 }
12412
12413 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12414 {
12415         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12416         int i;
12417         u32 j;
12418
12419         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12420                 for (j = 0; j < len; j += 4) {
12421                         u32 val;
12422
12423                         tg3_write_mem(tp, offset + j, test_pattern[i]);
12424                         tg3_read_mem(tp, offset + j, &val);
12425                         if (val != test_pattern[i])
12426                                 return -EIO;
12427                 }
12428         }
12429         return 0;
12430 }
12431
12432 static int tg3_test_memory(struct tg3 *tp)
12433 {
12434         static struct mem_entry {
12435                 u32 offset;
12436                 u32 len;
12437         } mem_tbl_570x[] = {
12438                 { 0x00000000, 0x00b50},
12439                 { 0x00002000, 0x1c000},
12440                 { 0xffffffff, 0x00000}
12441         }, mem_tbl_5705[] = {
12442                 { 0x00000100, 0x0000c},
12443                 { 0x00000200, 0x00008},
12444                 { 0x00004000, 0x00800},
12445                 { 0x00006000, 0x01000},
12446                 { 0x00008000, 0x02000},
12447                 { 0x00010000, 0x0e000},
12448                 { 0xffffffff, 0x00000}
12449         }, mem_tbl_5755[] = {
12450                 { 0x00000200, 0x00008},
12451                 { 0x00004000, 0x00800},
12452                 { 0x00006000, 0x00800},
12453                 { 0x00008000, 0x02000},
12454                 { 0x00010000, 0x0c000},
12455                 { 0xffffffff, 0x00000}
12456         }, mem_tbl_5906[] = {
12457                 { 0x00000200, 0x00008},
12458                 { 0x00004000, 0x00400},
12459                 { 0x00006000, 0x00400},
12460                 { 0x00008000, 0x01000},
12461                 { 0x00010000, 0x01000},
12462                 { 0xffffffff, 0x00000}
12463         }, mem_tbl_5717[] = {
12464                 { 0x00000200, 0x00008},
12465                 { 0x00010000, 0x0a000},
12466                 { 0x00020000, 0x13c00},
12467                 { 0xffffffff, 0x00000}
12468         }, mem_tbl_57765[] = {
12469                 { 0x00000200, 0x00008},
12470                 { 0x00004000, 0x00800},
12471                 { 0x00006000, 0x09800},
12472                 { 0x00010000, 0x0a000},
12473                 { 0xffffffff, 0x00000}
12474         };
12475         struct mem_entry *mem_tbl;
12476         int err = 0;
12477         int i;
12478
12479         if (tg3_flag(tp, 5717_PLUS))
12480                 mem_tbl = mem_tbl_5717;
12481         else if (tg3_flag(tp, 57765_CLASS) ||
12482                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
12483                 mem_tbl = mem_tbl_57765;
12484         else if (tg3_flag(tp, 5755_PLUS))
12485                 mem_tbl = mem_tbl_5755;
12486         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12487                 mem_tbl = mem_tbl_5906;
12488         else if (tg3_flag(tp, 5705_PLUS))
12489                 mem_tbl = mem_tbl_5705;
12490         else
12491                 mem_tbl = mem_tbl_570x;
12492
12493         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12494                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12495                 if (err)
12496                         break;
12497         }
12498
12499         return err;
12500 }
12501
12502 #define TG3_TSO_MSS             500
12503
12504 #define TG3_TSO_IP_HDR_LEN      20
12505 #define TG3_TSO_TCP_HDR_LEN     20
12506 #define TG3_TSO_TCP_OPT_LEN     12
12507
12508 static const u8 tg3_tso_header[] = {
12509 0x08, 0x00,
12510 0x45, 0x00, 0x00, 0x00,
12511 0x00, 0x00, 0x40, 0x00,
12512 0x40, 0x06, 0x00, 0x00,
12513 0x0a, 0x00, 0x00, 0x01,
12514 0x0a, 0x00, 0x00, 0x02,
12515 0x0d, 0x00, 0xe0, 0x00,
12516 0x00, 0x00, 0x01, 0x00,
12517 0x00, 0x00, 0x02, 0x00,
12518 0x80, 0x10, 0x10, 0x00,
12519 0x14, 0x09, 0x00, 0x00,
12520 0x01, 0x01, 0x08, 0x0a,
12521 0x11, 0x11, 0x11, 0x11,
12522 0x11, 0x11, 0x11, 0x11,
12523 };
12524
12525 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12526 {
12527         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12528         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12529         u32 budget;
12530         struct sk_buff *skb;
12531         u8 *tx_data, *rx_data;
12532         dma_addr_t map;
12533         int num_pkts, tx_len, rx_len, i, err;
12534         struct tg3_rx_buffer_desc *desc;
12535         struct tg3_napi *tnapi, *rnapi;
12536         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12537
12538         tnapi = &tp->napi[0];
12539         rnapi = &tp->napi[0];
12540         if (tp->irq_cnt > 1) {
12541                 if (tg3_flag(tp, ENABLE_RSS))
12542                         rnapi = &tp->napi[1];
12543                 if (tg3_flag(tp, ENABLE_TSS))
12544                         tnapi = &tp->napi[1];
12545         }
12546         coal_now = tnapi->coal_now | rnapi->coal_now;
12547
12548         err = -EIO;
12549
12550         tx_len = pktsz;
12551         skb = netdev_alloc_skb(tp->dev, tx_len);
12552         if (!skb)
12553                 return -ENOMEM;
12554
12555         tx_data = skb_put(skb, tx_len);
12556         memcpy(tx_data, tp->dev->dev_addr, 6);
12557         memset(tx_data + 6, 0x0, 8);
12558
12559         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12560
12561         if (tso_loopback) {
12562                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12563
12564                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12565                               TG3_TSO_TCP_OPT_LEN;
12566
12567                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12568                        sizeof(tg3_tso_header));
12569                 mss = TG3_TSO_MSS;
12570
12571                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12572                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12573
12574                 /* Set the total length field in the IP header */
12575                 iph->tot_len = htons((u16)(mss + hdr_len));
12576
12577                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12578                               TXD_FLAG_CPU_POST_DMA);
12579
12580                 if (tg3_flag(tp, HW_TSO_1) ||
12581                     tg3_flag(tp, HW_TSO_2) ||
12582                     tg3_flag(tp, HW_TSO_3)) {
12583                         struct tcphdr *th;
12584                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12585                         th = (struct tcphdr *)&tx_data[val];
12586                         th->check = 0;
12587                 } else
12588                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
12589
12590                 if (tg3_flag(tp, HW_TSO_3)) {
12591                         mss |= (hdr_len & 0xc) << 12;
12592                         if (hdr_len & 0x10)
12593                                 base_flags |= 0x00000010;
12594                         base_flags |= (hdr_len & 0x3e0) << 5;
12595                 } else if (tg3_flag(tp, HW_TSO_2))
12596                         mss |= hdr_len << 9;
12597                 else if (tg3_flag(tp, HW_TSO_1) ||
12598                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
12599                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12600                 } else {
12601                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12602                 }
12603
12604                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12605         } else {
12606                 num_pkts = 1;
12607                 data_off = ETH_HLEN;
12608
12609                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12610                     tx_len > VLAN_ETH_FRAME_LEN)
12611                         base_flags |= TXD_FLAG_JMB_PKT;
12612         }
12613
12614         for (i = data_off; i < tx_len; i++)
12615                 tx_data[i] = (u8) (i & 0xff);
12616
12617         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12618         if (pci_dma_mapping_error(tp->pdev, map)) {
12619                 dev_kfree_skb(skb);
12620                 return -EIO;
12621         }
12622
12623         val = tnapi->tx_prod;
12624         tnapi->tx_buffers[val].skb = skb;
12625         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12626
12627         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12628                rnapi->coal_now);
12629
12630         udelay(10);
12631
12632         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12633
12634         budget = tg3_tx_avail(tnapi);
12635         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12636                             base_flags | TXD_FLAG_END, mss, 0)) {
12637                 tnapi->tx_buffers[val].skb = NULL;
12638                 dev_kfree_skb(skb);
12639                 return -EIO;
12640         }
12641
12642         tnapi->tx_prod++;
12643
12644         /* Sync BD data before updating mailbox */
12645         wmb();
12646
12647         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12648         tr32_mailbox(tnapi->prodmbox);
12649
12650         udelay(10);
12651
12652         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
12653         for (i = 0; i < 35; i++) {
12654                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12655                        coal_now);
12656
12657                 udelay(10);
12658
12659                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12660                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
12661                 if ((tx_idx == tnapi->tx_prod) &&
12662                     (rx_idx == (rx_start_idx + num_pkts)))
12663                         break;
12664         }
12665
12666         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12667         dev_kfree_skb(skb);
12668
12669         if (tx_idx != tnapi->tx_prod)
12670                 goto out;
12671
12672         if (rx_idx != rx_start_idx + num_pkts)
12673                 goto out;
12674
12675         val = data_off;
12676         while (rx_idx != rx_start_idx) {
12677                 desc = &rnapi->rx_rcb[rx_start_idx++];
12678                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12679                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12680
12681                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12682                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12683                         goto out;
12684
12685                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12686                          - ETH_FCS_LEN;
12687
12688                 if (!tso_loopback) {
12689                         if (rx_len != tx_len)
12690                                 goto out;
12691
12692                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12693                                 if (opaque_key != RXD_OPAQUE_RING_STD)
12694                                         goto out;
12695                         } else {
12696                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12697                                         goto out;
12698                         }
12699                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12700                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12701                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
12702                         goto out;
12703                 }
12704
12705                 if (opaque_key == RXD_OPAQUE_RING_STD) {
12706                         rx_data = tpr->rx_std_buffers[desc_idx].data;
12707                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12708                                              mapping);
12709                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12710                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12711                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12712                                              mapping);
12713                 } else
12714                         goto out;
12715
12716                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12717                                             PCI_DMA_FROMDEVICE);
12718
12719                 rx_data += TG3_RX_OFFSET(tp);
12720                 for (i = data_off; i < rx_len; i++, val++) {
12721                         if (*(rx_data + i) != (u8) (val & 0xff))
12722                                 goto out;
12723                 }
12724         }
12725
12726         err = 0;
12727
12728         /* tg3_free_rings will unmap and free the rx_data */
12729 out:
12730         return err;
12731 }
12732
12733 #define TG3_STD_LOOPBACK_FAILED         1
12734 #define TG3_JMB_LOOPBACK_FAILED         2
12735 #define TG3_TSO_LOOPBACK_FAILED         4
12736 #define TG3_LOOPBACK_FAILED \
12737         (TG3_STD_LOOPBACK_FAILED | \
12738          TG3_JMB_LOOPBACK_FAILED | \
12739          TG3_TSO_LOOPBACK_FAILED)
12740
12741 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12742 {
12743         int err = -EIO;
12744         u32 eee_cap;
12745         u32 jmb_pkt_sz = 9000;
12746
12747         if (tp->dma_limit)
12748                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12749
12750         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12751         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12752
12753         if (!netif_running(tp->dev)) {
12754                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12755                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12756                 if (do_extlpbk)
12757                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12758                 goto done;
12759         }
12760
12761         err = tg3_reset_hw(tp, 1);
12762         if (err) {
12763                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12764                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12765                 if (do_extlpbk)
12766                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12767                 goto done;
12768         }
12769
12770         if (tg3_flag(tp, ENABLE_RSS)) {
12771                 int i;
12772
12773                 /* Reroute all rx packets to the 1st queue */
12774                 for (i = MAC_RSS_INDIR_TBL_0;
12775                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12776                         tw32(i, 0x0);
12777         }
12778
12779         /* HW errata - mac loopback fails in some cases on 5780.
12780          * Normal traffic and PHY loopback are not affected by
12781          * errata.  Also, the MAC loopback test is deprecated for
12782          * all newer ASIC revisions.
12783          */
12784         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
12785             !tg3_flag(tp, CPMU_PRESENT)) {
12786                 tg3_mac_loopback(tp, true);
12787
12788                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12789                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12790
12791                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12792                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12793                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12794
12795                 tg3_mac_loopback(tp, false);
12796         }
12797
12798         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12799             !tg3_flag(tp, USE_PHYLIB)) {
12800                 int i;
12801
12802                 tg3_phy_lpbk_set(tp, 0, false);
12803
12804                 /* Wait for link */
12805                 for (i = 0; i < 100; i++) {
12806                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12807                                 break;
12808                         mdelay(1);
12809                 }
12810
12811                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12812                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12813                 if (tg3_flag(tp, TSO_CAPABLE) &&
12814                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12815                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
12816                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12817                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12818                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12819
12820                 if (do_extlpbk) {
12821                         tg3_phy_lpbk_set(tp, 0, true);
12822
12823                         /* All link indications report up, but the hardware
12824                          * isn't really ready for about 20 msec.  Double it
12825                          * to be sure.
12826                          */
12827                         mdelay(40);
12828
12829                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12830                                 data[TG3_EXT_LOOPB_TEST] |=
12831                                                         TG3_STD_LOOPBACK_FAILED;
12832                         if (tg3_flag(tp, TSO_CAPABLE) &&
12833                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12834                                 data[TG3_EXT_LOOPB_TEST] |=
12835                                                         TG3_TSO_LOOPBACK_FAILED;
12836                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12837                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12838                                 data[TG3_EXT_LOOPB_TEST] |=
12839                                                         TG3_JMB_LOOPBACK_FAILED;
12840                 }
12841
12842                 /* Re-enable gphy autopowerdown. */
12843                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12844                         tg3_phy_toggle_apd(tp, true);
12845         }
12846
12847         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
12848                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
12849
12850 done:
12851         tp->phy_flags |= eee_cap;
12852
12853         return err;
12854 }
12855
12856 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12857                           u64 *data)
12858 {
12859         struct tg3 *tp = netdev_priv(dev);
12860         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12861
12862         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12863             tg3_power_up(tp)) {
12864                 etest->flags |= ETH_TEST_FL_FAILED;
12865                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12866                 return;
12867         }
12868
12869         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12870
12871         if (tg3_test_nvram(tp) != 0) {
12872                 etest->flags |= ETH_TEST_FL_FAILED;
12873                 data[TG3_NVRAM_TEST] = 1;
12874         }
12875         if (!doextlpbk && tg3_test_link(tp)) {
12876                 etest->flags |= ETH_TEST_FL_FAILED;
12877                 data[TG3_LINK_TEST] = 1;
12878         }
12879         if (etest->flags & ETH_TEST_FL_OFFLINE) {
12880                 int err, err2 = 0, irq_sync = 0;
12881
12882                 if (netif_running(dev)) {
12883                         tg3_phy_stop(tp);
12884                         tg3_netif_stop(tp);
12885                         irq_sync = 1;
12886                 }
12887
12888                 tg3_full_lock(tp, irq_sync);
12889                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12890                 err = tg3_nvram_lock(tp);
12891                 tg3_halt_cpu(tp, RX_CPU_BASE);
12892                 if (!tg3_flag(tp, 5705_PLUS))
12893                         tg3_halt_cpu(tp, TX_CPU_BASE);
12894                 if (!err)
12895                         tg3_nvram_unlock(tp);
12896
12897                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12898                         tg3_phy_reset(tp);
12899
12900                 if (tg3_test_registers(tp) != 0) {
12901                         etest->flags |= ETH_TEST_FL_FAILED;
12902                         data[TG3_REGISTER_TEST] = 1;
12903                 }
12904
12905                 if (tg3_test_memory(tp) != 0) {
12906                         etest->flags |= ETH_TEST_FL_FAILED;
12907                         data[TG3_MEMORY_TEST] = 1;
12908                 }
12909
12910                 if (doextlpbk)
12911                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12912
12913                 if (tg3_test_loopback(tp, data, doextlpbk))
12914                         etest->flags |= ETH_TEST_FL_FAILED;
12915
12916                 tg3_full_unlock(tp);
12917
12918                 if (tg3_test_interrupt(tp) != 0) {
12919                         etest->flags |= ETH_TEST_FL_FAILED;
12920                         data[TG3_INTERRUPT_TEST] = 1;
12921                 }
12922
12923                 tg3_full_lock(tp, 0);
12924
12925                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12926                 if (netif_running(dev)) {
12927                         tg3_flag_set(tp, INIT_COMPLETE);
12928                         err2 = tg3_restart_hw(tp, 1);
12929                         if (!err2)
12930                                 tg3_netif_start(tp);
12931                 }
12932
12933                 tg3_full_unlock(tp);
12934
12935                 if (irq_sync && !err2)
12936                         tg3_phy_start(tp);
12937         }
12938         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12939                 tg3_power_down(tp);
12940
12941 }
12942
12943 static int tg3_hwtstamp_ioctl(struct net_device *dev,
12944                               struct ifreq *ifr, int cmd)
12945 {
12946         struct tg3 *tp = netdev_priv(dev);
12947         struct hwtstamp_config stmpconf;
12948
12949         if (!tg3_flag(tp, PTP_CAPABLE))
12950                 return -EINVAL;
12951
12952         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
12953                 return -EFAULT;
12954
12955         if (stmpconf.flags)
12956                 return -EINVAL;
12957
12958         switch (stmpconf.tx_type) {
12959         case HWTSTAMP_TX_ON:
12960                 tg3_flag_set(tp, TX_TSTAMP_EN);
12961                 break;
12962         case HWTSTAMP_TX_OFF:
12963                 tg3_flag_clear(tp, TX_TSTAMP_EN);
12964                 break;
12965         default:
12966                 return -ERANGE;
12967         }
12968
12969         switch (stmpconf.rx_filter) {
12970         case HWTSTAMP_FILTER_NONE:
12971                 tp->rxptpctl = 0;
12972                 break;
12973         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
12974                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12975                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
12976                 break;
12977         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
12978                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12979                                TG3_RX_PTP_CTL_SYNC_EVNT;
12980                 break;
12981         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
12982                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12983                                TG3_RX_PTP_CTL_DELAY_REQ;
12984                 break;
12985         case HWTSTAMP_FILTER_PTP_V2_EVENT:
12986                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12987                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12988                 break;
12989         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
12990                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12991                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12992                 break;
12993         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
12994                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
12995                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12996                 break;
12997         case HWTSTAMP_FILTER_PTP_V2_SYNC:
12998                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12999                                TG3_RX_PTP_CTL_SYNC_EVNT;
13000                 break;
13001         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13002                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13003                                TG3_RX_PTP_CTL_SYNC_EVNT;
13004                 break;
13005         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13006                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13007                                TG3_RX_PTP_CTL_SYNC_EVNT;
13008                 break;
13009         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13010                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13011                                TG3_RX_PTP_CTL_DELAY_REQ;
13012                 break;
13013         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13014                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13015                                TG3_RX_PTP_CTL_DELAY_REQ;
13016                 break;
13017         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13018                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13019                                TG3_RX_PTP_CTL_DELAY_REQ;
13020                 break;
13021         default:
13022                 return -ERANGE;
13023         }
13024
13025         if (netif_running(dev) && tp->rxptpctl)
13026                 tw32(TG3_RX_PTP_CTL,
13027                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13028
13029         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13030                 -EFAULT : 0;
13031 }
13032
13033 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13034 {
13035         struct mii_ioctl_data *data = if_mii(ifr);
13036         struct tg3 *tp = netdev_priv(dev);
13037         int err;
13038
13039         if (tg3_flag(tp, USE_PHYLIB)) {
13040                 struct phy_device *phydev;
13041                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13042                         return -EAGAIN;
13043                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13044                 return phy_mii_ioctl(phydev, ifr, cmd);
13045         }
13046
13047         switch (cmd) {
13048         case SIOCGMIIPHY:
13049                 data->phy_id = tp->phy_addr;
13050
13051                 /* fallthru */
13052         case SIOCGMIIREG: {
13053                 u32 mii_regval;
13054
13055                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13056                         break;                  /* We have no PHY */
13057
13058                 if (!netif_running(dev))
13059                         return -EAGAIN;
13060
13061                 spin_lock_bh(&tp->lock);
13062                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13063                                     data->reg_num & 0x1f, &mii_regval);
13064                 spin_unlock_bh(&tp->lock);
13065
13066                 data->val_out = mii_regval;
13067
13068                 return err;
13069         }
13070
13071         case SIOCSMIIREG:
13072                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13073                         break;                  /* We have no PHY */
13074
13075                 if (!netif_running(dev))
13076                         return -EAGAIN;
13077
13078                 spin_lock_bh(&tp->lock);
13079                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13080                                      data->reg_num & 0x1f, data->val_in);
13081                 spin_unlock_bh(&tp->lock);
13082
13083                 return err;
13084
13085         case SIOCSHWTSTAMP:
13086                 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13087
13088         default:
13089                 /* do nothing */
13090                 break;
13091         }
13092         return -EOPNOTSUPP;
13093 }
13094
13095 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13096 {
13097         struct tg3 *tp = netdev_priv(dev);
13098
13099         memcpy(ec, &tp->coal, sizeof(*ec));
13100         return 0;
13101 }
13102
13103 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13104 {
13105         struct tg3 *tp = netdev_priv(dev);
13106         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13107         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13108
13109         if (!tg3_flag(tp, 5705_PLUS)) {
13110                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13111                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13112                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13113                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13114         }
13115
13116         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13117             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13118             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13119             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13120             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13121             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13122             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13123             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13124             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13125             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13126                 return -EINVAL;
13127
13128         /* No rx interrupts will be generated if both are zero */
13129         if ((ec->rx_coalesce_usecs == 0) &&
13130             (ec->rx_max_coalesced_frames == 0))
13131                 return -EINVAL;
13132
13133         /* No tx interrupts will be generated if both are zero */
13134         if ((ec->tx_coalesce_usecs == 0) &&
13135             (ec->tx_max_coalesced_frames == 0))
13136                 return -EINVAL;
13137
13138         /* Only copy relevant parameters, ignore all others. */
13139         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13140         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13141         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13142         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13143         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13144         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13145         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13146         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13147         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13148
13149         if (netif_running(dev)) {
13150                 tg3_full_lock(tp, 0);
13151                 __tg3_set_coalesce(tp, &tp->coal);
13152                 tg3_full_unlock(tp);
13153         }
13154         return 0;
13155 }
13156
13157 static const struct ethtool_ops tg3_ethtool_ops = {
13158         .get_settings           = tg3_get_settings,
13159         .set_settings           = tg3_set_settings,
13160         .get_drvinfo            = tg3_get_drvinfo,
13161         .get_regs_len           = tg3_get_regs_len,
13162         .get_regs               = tg3_get_regs,
13163         .get_wol                = tg3_get_wol,
13164         .set_wol                = tg3_set_wol,
13165         .get_msglevel           = tg3_get_msglevel,
13166         .set_msglevel           = tg3_set_msglevel,
13167         .nway_reset             = tg3_nway_reset,
13168         .get_link               = ethtool_op_get_link,
13169         .get_eeprom_len         = tg3_get_eeprom_len,
13170         .get_eeprom             = tg3_get_eeprom,
13171         .set_eeprom             = tg3_set_eeprom,
13172         .get_ringparam          = tg3_get_ringparam,
13173         .set_ringparam          = tg3_set_ringparam,
13174         .get_pauseparam         = tg3_get_pauseparam,
13175         .set_pauseparam         = tg3_set_pauseparam,
13176         .self_test              = tg3_self_test,
13177         .get_strings            = tg3_get_strings,
13178         .set_phys_id            = tg3_set_phys_id,
13179         .get_ethtool_stats      = tg3_get_ethtool_stats,
13180         .get_coalesce           = tg3_get_coalesce,
13181         .set_coalesce           = tg3_set_coalesce,
13182         .get_sset_count         = tg3_get_sset_count,
13183         .get_rxnfc              = tg3_get_rxnfc,
13184         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
13185         .get_rxfh_indir         = tg3_get_rxfh_indir,
13186         .set_rxfh_indir         = tg3_set_rxfh_indir,
13187         .get_channels           = tg3_get_channels,
13188         .set_channels           = tg3_set_channels,
13189         .get_ts_info            = tg3_get_ts_info,
13190 };
13191
13192 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13193                                                 struct rtnl_link_stats64 *stats)
13194 {
13195         struct tg3 *tp = netdev_priv(dev);
13196
13197         spin_lock_bh(&tp->lock);
13198         if (!tp->hw_stats) {
13199                 spin_unlock_bh(&tp->lock);
13200                 return &tp->net_stats_prev;
13201         }
13202
13203         tg3_get_nstats(tp, stats);
13204         spin_unlock_bh(&tp->lock);
13205
13206         return stats;
13207 }
13208
13209 static void tg3_set_rx_mode(struct net_device *dev)
13210 {
13211         struct tg3 *tp = netdev_priv(dev);
13212
13213         if (!netif_running(dev))
13214                 return;
13215
13216         tg3_full_lock(tp, 0);
13217         __tg3_set_rx_mode(dev);
13218         tg3_full_unlock(tp);
13219 }
13220
13221 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13222                                int new_mtu)
13223 {
13224         dev->mtu = new_mtu;
13225
13226         if (new_mtu > ETH_DATA_LEN) {
13227                 if (tg3_flag(tp, 5780_CLASS)) {
13228                         netdev_update_features(dev);
13229                         tg3_flag_clear(tp, TSO_CAPABLE);
13230                 } else {
13231                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
13232                 }
13233         } else {
13234                 if (tg3_flag(tp, 5780_CLASS)) {
13235                         tg3_flag_set(tp, TSO_CAPABLE);
13236                         netdev_update_features(dev);
13237                 }
13238                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13239         }
13240 }
13241
13242 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13243 {
13244         struct tg3 *tp = netdev_priv(dev);
13245         int err, reset_phy = 0;
13246
13247         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13248                 return -EINVAL;
13249
13250         if (!netif_running(dev)) {
13251                 /* We'll just catch it later when the
13252                  * device is up'd.
13253                  */
13254                 tg3_set_mtu(dev, tp, new_mtu);
13255                 return 0;
13256         }
13257
13258         tg3_phy_stop(tp);
13259
13260         tg3_netif_stop(tp);
13261
13262         tg3_full_lock(tp, 1);
13263
13264         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13265
13266         tg3_set_mtu(dev, tp, new_mtu);
13267
13268         /* Reset PHY, otherwise the read DMA engine will be in a mode that
13269          * breaks all requests to 256 bytes.
13270          */
13271         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
13272                 reset_phy = 1;
13273
13274         err = tg3_restart_hw(tp, reset_phy);
13275
13276         if (!err)
13277                 tg3_netif_start(tp);
13278
13279         tg3_full_unlock(tp);
13280
13281         if (!err)
13282                 tg3_phy_start(tp);
13283
13284         return err;
13285 }
13286
13287 static const struct net_device_ops tg3_netdev_ops = {
13288         .ndo_open               = tg3_open,
13289         .ndo_stop               = tg3_close,
13290         .ndo_start_xmit         = tg3_start_xmit,
13291         .ndo_get_stats64        = tg3_get_stats64,
13292         .ndo_validate_addr      = eth_validate_addr,
13293         .ndo_set_rx_mode        = tg3_set_rx_mode,
13294         .ndo_set_mac_address    = tg3_set_mac_addr,
13295         .ndo_do_ioctl           = tg3_ioctl,
13296         .ndo_tx_timeout         = tg3_tx_timeout,
13297         .ndo_change_mtu         = tg3_change_mtu,
13298         .ndo_fix_features       = tg3_fix_features,
13299         .ndo_set_features       = tg3_set_features,
13300 #ifdef CONFIG_NET_POLL_CONTROLLER
13301         .ndo_poll_controller    = tg3_poll_controller,
13302 #endif
13303 };
13304
13305 static void tg3_get_eeprom_size(struct tg3 *tp)
13306 {
13307         u32 cursize, val, magic;
13308
13309         tp->nvram_size = EEPROM_CHIP_SIZE;
13310
13311         if (tg3_nvram_read(tp, 0, &magic) != 0)
13312                 return;
13313
13314         if ((magic != TG3_EEPROM_MAGIC) &&
13315             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13316             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13317                 return;
13318
13319         /*
13320          * Size the chip by reading offsets at increasing powers of two.
13321          * When we encounter our validation signature, we know the addressing
13322          * has wrapped around, and thus have our chip size.
13323          */
13324         cursize = 0x10;
13325
13326         while (cursize < tp->nvram_size) {
13327                 if (tg3_nvram_read(tp, cursize, &val) != 0)
13328                         return;
13329
13330                 if (val == magic)
13331                         break;
13332
13333                 cursize <<= 1;
13334         }
13335
13336         tp->nvram_size = cursize;
13337 }
13338
13339 static void tg3_get_nvram_size(struct tg3 *tp)
13340 {
13341         u32 val;
13342
13343         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13344                 return;
13345
13346         /* Selfboot format */
13347         if (val != TG3_EEPROM_MAGIC) {
13348                 tg3_get_eeprom_size(tp);
13349                 return;
13350         }
13351
13352         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13353                 if (val != 0) {
13354                         /* This is confusing.  We want to operate on the
13355                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
13356                          * call will read from NVRAM and byteswap the data
13357                          * according to the byteswapping settings for all
13358                          * other register accesses.  This ensures the data we
13359                          * want will always reside in the lower 16-bits.
13360                          * However, the data in NVRAM is in LE format, which
13361                          * means the data from the NVRAM read will always be
13362                          * opposite the endianness of the CPU.  The 16-bit
13363                          * byteswap then brings the data to CPU endianness.
13364                          */
13365                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13366                         return;
13367                 }
13368         }
13369         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13370 }
13371
13372 static void tg3_get_nvram_info(struct tg3 *tp)
13373 {
13374         u32 nvcfg1;
13375
13376         nvcfg1 = tr32(NVRAM_CFG1);
13377         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13378                 tg3_flag_set(tp, FLASH);
13379         } else {
13380                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13381                 tw32(NVRAM_CFG1, nvcfg1);
13382         }
13383
13384         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13385             tg3_flag(tp, 5780_CLASS)) {
13386                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13387                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13388                         tp->nvram_jedecnum = JEDEC_ATMEL;
13389                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13390                         tg3_flag_set(tp, NVRAM_BUFFERED);
13391                         break;
13392                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13393                         tp->nvram_jedecnum = JEDEC_ATMEL;
13394                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13395                         break;
13396                 case FLASH_VENDOR_ATMEL_EEPROM:
13397                         tp->nvram_jedecnum = JEDEC_ATMEL;
13398                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13399                         tg3_flag_set(tp, NVRAM_BUFFERED);
13400                         break;
13401                 case FLASH_VENDOR_ST:
13402                         tp->nvram_jedecnum = JEDEC_ST;
13403                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13404                         tg3_flag_set(tp, NVRAM_BUFFERED);
13405                         break;
13406                 case FLASH_VENDOR_SAIFUN:
13407                         tp->nvram_jedecnum = JEDEC_SAIFUN;
13408                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13409                         break;
13410                 case FLASH_VENDOR_SST_SMALL:
13411                 case FLASH_VENDOR_SST_LARGE:
13412                         tp->nvram_jedecnum = JEDEC_SST;
13413                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13414                         break;
13415                 }
13416         } else {
13417                 tp->nvram_jedecnum = JEDEC_ATMEL;
13418                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13419                 tg3_flag_set(tp, NVRAM_BUFFERED);
13420         }
13421 }
13422
13423 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13424 {
13425         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13426         case FLASH_5752PAGE_SIZE_256:
13427                 tp->nvram_pagesize = 256;
13428                 break;
13429         case FLASH_5752PAGE_SIZE_512:
13430                 tp->nvram_pagesize = 512;
13431                 break;
13432         case FLASH_5752PAGE_SIZE_1K:
13433                 tp->nvram_pagesize = 1024;
13434                 break;
13435         case FLASH_5752PAGE_SIZE_2K:
13436                 tp->nvram_pagesize = 2048;
13437                 break;
13438         case FLASH_5752PAGE_SIZE_4K:
13439                 tp->nvram_pagesize = 4096;
13440                 break;
13441         case FLASH_5752PAGE_SIZE_264:
13442                 tp->nvram_pagesize = 264;
13443                 break;
13444         case FLASH_5752PAGE_SIZE_528:
13445                 tp->nvram_pagesize = 528;
13446                 break;
13447         }
13448 }
13449
13450 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13451 {
13452         u32 nvcfg1;
13453
13454         nvcfg1 = tr32(NVRAM_CFG1);
13455
13456         /* NVRAM protection for TPM */
13457         if (nvcfg1 & (1 << 27))
13458                 tg3_flag_set(tp, PROTECTED_NVRAM);
13459
13460         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13461         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13462         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13463                 tp->nvram_jedecnum = JEDEC_ATMEL;
13464                 tg3_flag_set(tp, NVRAM_BUFFERED);
13465                 break;
13466         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13467                 tp->nvram_jedecnum = JEDEC_ATMEL;
13468                 tg3_flag_set(tp, NVRAM_BUFFERED);
13469                 tg3_flag_set(tp, FLASH);
13470                 break;
13471         case FLASH_5752VENDOR_ST_M45PE10:
13472         case FLASH_5752VENDOR_ST_M45PE20:
13473         case FLASH_5752VENDOR_ST_M45PE40:
13474                 tp->nvram_jedecnum = JEDEC_ST;
13475                 tg3_flag_set(tp, NVRAM_BUFFERED);
13476                 tg3_flag_set(tp, FLASH);
13477                 break;
13478         }
13479
13480         if (tg3_flag(tp, FLASH)) {
13481                 tg3_nvram_get_pagesize(tp, nvcfg1);
13482         } else {
13483                 /* For eeprom, set pagesize to maximum eeprom size */
13484                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13485
13486                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13487                 tw32(NVRAM_CFG1, nvcfg1);
13488         }
13489 }
13490
13491 static void tg3_get_5755_nvram_info(struct tg3 *tp)
13492 {
13493         u32 nvcfg1, protect = 0;
13494
13495         nvcfg1 = tr32(NVRAM_CFG1);
13496
13497         /* NVRAM protection for TPM */
13498         if (nvcfg1 & (1 << 27)) {
13499                 tg3_flag_set(tp, PROTECTED_NVRAM);
13500                 protect = 1;
13501         }
13502
13503         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13504         switch (nvcfg1) {
13505         case FLASH_5755VENDOR_ATMEL_FLASH_1:
13506         case FLASH_5755VENDOR_ATMEL_FLASH_2:
13507         case FLASH_5755VENDOR_ATMEL_FLASH_3:
13508         case FLASH_5755VENDOR_ATMEL_FLASH_5:
13509                 tp->nvram_jedecnum = JEDEC_ATMEL;
13510                 tg3_flag_set(tp, NVRAM_BUFFERED);
13511                 tg3_flag_set(tp, FLASH);
13512                 tp->nvram_pagesize = 264;
13513                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13514                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13515                         tp->nvram_size = (protect ? 0x3e200 :
13516                                           TG3_NVRAM_SIZE_512KB);
13517                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13518                         tp->nvram_size = (protect ? 0x1f200 :
13519                                           TG3_NVRAM_SIZE_256KB);
13520                 else
13521                         tp->nvram_size = (protect ? 0x1f200 :
13522                                           TG3_NVRAM_SIZE_128KB);
13523                 break;
13524         case FLASH_5752VENDOR_ST_M45PE10:
13525         case FLASH_5752VENDOR_ST_M45PE20:
13526         case FLASH_5752VENDOR_ST_M45PE40:
13527                 tp->nvram_jedecnum = JEDEC_ST;
13528                 tg3_flag_set(tp, NVRAM_BUFFERED);
13529                 tg3_flag_set(tp, FLASH);
13530                 tp->nvram_pagesize = 256;
13531                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13532                         tp->nvram_size = (protect ?
13533                                           TG3_NVRAM_SIZE_64KB :
13534                                           TG3_NVRAM_SIZE_128KB);
13535                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13536                         tp->nvram_size = (protect ?
13537                                           TG3_NVRAM_SIZE_64KB :
13538                                           TG3_NVRAM_SIZE_256KB);
13539                 else
13540                         tp->nvram_size = (protect ?
13541                                           TG3_NVRAM_SIZE_128KB :
13542                                           TG3_NVRAM_SIZE_512KB);
13543                 break;
13544         }
13545 }
13546
13547 static void tg3_get_5787_nvram_info(struct tg3 *tp)
13548 {
13549         u32 nvcfg1;
13550
13551         nvcfg1 = tr32(NVRAM_CFG1);
13552
13553         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13554         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13555         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13556         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13557         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13558                 tp->nvram_jedecnum = JEDEC_ATMEL;
13559                 tg3_flag_set(tp, NVRAM_BUFFERED);
13560                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13561
13562                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13563                 tw32(NVRAM_CFG1, nvcfg1);
13564                 break;
13565         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13566         case FLASH_5755VENDOR_ATMEL_FLASH_1:
13567         case FLASH_5755VENDOR_ATMEL_FLASH_2:
13568         case FLASH_5755VENDOR_ATMEL_FLASH_3:
13569                 tp->nvram_jedecnum = JEDEC_ATMEL;
13570                 tg3_flag_set(tp, NVRAM_BUFFERED);
13571                 tg3_flag_set(tp, FLASH);
13572                 tp->nvram_pagesize = 264;
13573                 break;
13574         case FLASH_5752VENDOR_ST_M45PE10:
13575         case FLASH_5752VENDOR_ST_M45PE20:
13576         case FLASH_5752VENDOR_ST_M45PE40:
13577                 tp->nvram_jedecnum = JEDEC_ST;
13578                 tg3_flag_set(tp, NVRAM_BUFFERED);
13579                 tg3_flag_set(tp, FLASH);
13580                 tp->nvram_pagesize = 256;
13581                 break;
13582         }
13583 }
13584
13585 static void tg3_get_5761_nvram_info(struct tg3 *tp)
13586 {
13587         u32 nvcfg1, protect = 0;
13588
13589         nvcfg1 = tr32(NVRAM_CFG1);
13590
13591         /* NVRAM protection for TPM */
13592         if (nvcfg1 & (1 << 27)) {
13593                 tg3_flag_set(tp, PROTECTED_NVRAM);
13594                 protect = 1;
13595         }
13596
13597         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13598         switch (nvcfg1) {
13599         case FLASH_5761VENDOR_ATMEL_ADB021D:
13600         case FLASH_5761VENDOR_ATMEL_ADB041D:
13601         case FLASH_5761VENDOR_ATMEL_ADB081D:
13602         case FLASH_5761VENDOR_ATMEL_ADB161D:
13603         case FLASH_5761VENDOR_ATMEL_MDB021D:
13604         case FLASH_5761VENDOR_ATMEL_MDB041D:
13605         case FLASH_5761VENDOR_ATMEL_MDB081D:
13606         case FLASH_5761VENDOR_ATMEL_MDB161D:
13607                 tp->nvram_jedecnum = JEDEC_ATMEL;
13608                 tg3_flag_set(tp, NVRAM_BUFFERED);
13609                 tg3_flag_set(tp, FLASH);
13610                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13611                 tp->nvram_pagesize = 256;
13612                 break;
13613         case FLASH_5761VENDOR_ST_A_M45PE20:
13614         case FLASH_5761VENDOR_ST_A_M45PE40:
13615         case FLASH_5761VENDOR_ST_A_M45PE80:
13616         case FLASH_5761VENDOR_ST_A_M45PE16:
13617         case FLASH_5761VENDOR_ST_M_M45PE20:
13618         case FLASH_5761VENDOR_ST_M_M45PE40:
13619         case FLASH_5761VENDOR_ST_M_M45PE80:
13620         case FLASH_5761VENDOR_ST_M_M45PE16:
13621                 tp->nvram_jedecnum = JEDEC_ST;
13622                 tg3_flag_set(tp, NVRAM_BUFFERED);
13623                 tg3_flag_set(tp, FLASH);
13624                 tp->nvram_pagesize = 256;
13625                 break;
13626         }
13627
13628         if (protect) {
13629                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13630         } else {
13631                 switch (nvcfg1) {
13632                 case FLASH_5761VENDOR_ATMEL_ADB161D:
13633                 case FLASH_5761VENDOR_ATMEL_MDB161D:
13634                 case FLASH_5761VENDOR_ST_A_M45PE16:
13635                 case FLASH_5761VENDOR_ST_M_M45PE16:
13636                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13637                         break;
13638                 case FLASH_5761VENDOR_ATMEL_ADB081D:
13639                 case FLASH_5761VENDOR_ATMEL_MDB081D:
13640                 case FLASH_5761VENDOR_ST_A_M45PE80:
13641                 case FLASH_5761VENDOR_ST_M_M45PE80:
13642                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13643                         break;
13644                 case FLASH_5761VENDOR_ATMEL_ADB041D:
13645                 case FLASH_5761VENDOR_ATMEL_MDB041D:
13646                 case FLASH_5761VENDOR_ST_A_M45PE40:
13647                 case FLASH_5761VENDOR_ST_M_M45PE40:
13648                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13649                         break;
13650                 case FLASH_5761VENDOR_ATMEL_ADB021D:
13651                 case FLASH_5761VENDOR_ATMEL_MDB021D:
13652                 case FLASH_5761VENDOR_ST_A_M45PE20:
13653                 case FLASH_5761VENDOR_ST_M_M45PE20:
13654                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13655                         break;
13656                 }
13657         }
13658 }
13659
13660 static void tg3_get_5906_nvram_info(struct tg3 *tp)
13661 {
13662         tp->nvram_jedecnum = JEDEC_ATMEL;
13663         tg3_flag_set(tp, NVRAM_BUFFERED);
13664         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13665 }
13666
13667 static void tg3_get_57780_nvram_info(struct tg3 *tp)
13668 {
13669         u32 nvcfg1;
13670
13671         nvcfg1 = tr32(NVRAM_CFG1);
13672
13673         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13674         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13675         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13676                 tp->nvram_jedecnum = JEDEC_ATMEL;
13677                 tg3_flag_set(tp, NVRAM_BUFFERED);
13678                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13679
13680                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13681                 tw32(NVRAM_CFG1, nvcfg1);
13682                 return;
13683         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13684         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13685         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13686         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13687         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13688         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13689         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13690                 tp->nvram_jedecnum = JEDEC_ATMEL;
13691                 tg3_flag_set(tp, NVRAM_BUFFERED);
13692                 tg3_flag_set(tp, FLASH);
13693
13694                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13695                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13696                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13697                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13698                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13699                         break;
13700                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13701                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13702                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13703                         break;
13704                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13705                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13706                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13707                         break;
13708                 }
13709                 break;
13710         case FLASH_5752VENDOR_ST_M45PE10:
13711         case FLASH_5752VENDOR_ST_M45PE20:
13712         case FLASH_5752VENDOR_ST_M45PE40:
13713                 tp->nvram_jedecnum = JEDEC_ST;
13714                 tg3_flag_set(tp, NVRAM_BUFFERED);
13715                 tg3_flag_set(tp, FLASH);
13716
13717                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13718                 case FLASH_5752VENDOR_ST_M45PE10:
13719                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13720                         break;
13721                 case FLASH_5752VENDOR_ST_M45PE20:
13722                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13723                         break;
13724                 case FLASH_5752VENDOR_ST_M45PE40:
13725                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13726                         break;
13727                 }
13728                 break;
13729         default:
13730                 tg3_flag_set(tp, NO_NVRAM);
13731                 return;
13732         }
13733
13734         tg3_nvram_get_pagesize(tp, nvcfg1);
13735         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13736                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13737 }
13738
13739
13740 static void tg3_get_5717_nvram_info(struct tg3 *tp)
13741 {
13742         u32 nvcfg1;
13743
13744         nvcfg1 = tr32(NVRAM_CFG1);
13745
13746         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13747         case FLASH_5717VENDOR_ATMEL_EEPROM:
13748         case FLASH_5717VENDOR_MICRO_EEPROM:
13749                 tp->nvram_jedecnum = JEDEC_ATMEL;
13750                 tg3_flag_set(tp, NVRAM_BUFFERED);
13751                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13752
13753                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13754                 tw32(NVRAM_CFG1, nvcfg1);
13755                 return;
13756         case FLASH_5717VENDOR_ATMEL_MDB011D:
13757         case FLASH_5717VENDOR_ATMEL_ADB011B:
13758         case FLASH_5717VENDOR_ATMEL_ADB011D:
13759         case FLASH_5717VENDOR_ATMEL_MDB021D:
13760         case FLASH_5717VENDOR_ATMEL_ADB021B:
13761         case FLASH_5717VENDOR_ATMEL_ADB021D:
13762         case FLASH_5717VENDOR_ATMEL_45USPT:
13763                 tp->nvram_jedecnum = JEDEC_ATMEL;
13764                 tg3_flag_set(tp, NVRAM_BUFFERED);
13765                 tg3_flag_set(tp, FLASH);
13766
13767                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13768                 case FLASH_5717VENDOR_ATMEL_MDB021D:
13769                         /* Detect size with tg3_nvram_get_size() */
13770                         break;
13771                 case FLASH_5717VENDOR_ATMEL_ADB021B:
13772                 case FLASH_5717VENDOR_ATMEL_ADB021D:
13773                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13774                         break;
13775                 default:
13776                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13777                         break;
13778                 }
13779                 break;
13780         case FLASH_5717VENDOR_ST_M_M25PE10:
13781         case FLASH_5717VENDOR_ST_A_M25PE10:
13782         case FLASH_5717VENDOR_ST_M_M45PE10:
13783         case FLASH_5717VENDOR_ST_A_M45PE10:
13784         case FLASH_5717VENDOR_ST_M_M25PE20:
13785         case FLASH_5717VENDOR_ST_A_M25PE20:
13786         case FLASH_5717VENDOR_ST_M_M45PE20:
13787         case FLASH_5717VENDOR_ST_A_M45PE20:
13788         case FLASH_5717VENDOR_ST_25USPT:
13789         case FLASH_5717VENDOR_ST_45USPT:
13790                 tp->nvram_jedecnum = JEDEC_ST;
13791                 tg3_flag_set(tp, NVRAM_BUFFERED);
13792                 tg3_flag_set(tp, FLASH);
13793
13794                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13795                 case FLASH_5717VENDOR_ST_M_M25PE20:
13796                 case FLASH_5717VENDOR_ST_M_M45PE20:
13797                         /* Detect size with tg3_nvram_get_size() */
13798                         break;
13799                 case FLASH_5717VENDOR_ST_A_M25PE20:
13800                 case FLASH_5717VENDOR_ST_A_M45PE20:
13801                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13802                         break;
13803                 default:
13804                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13805                         break;
13806                 }
13807                 break;
13808         default:
13809                 tg3_flag_set(tp, NO_NVRAM);
13810                 return;
13811         }
13812
13813         tg3_nvram_get_pagesize(tp, nvcfg1);
13814         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13815                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13816 }
13817
13818 static void tg3_get_5720_nvram_info(struct tg3 *tp)
13819 {
13820         u32 nvcfg1, nvmpinstrp;
13821
13822         nvcfg1 = tr32(NVRAM_CFG1);
13823         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13824
13825         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
13826                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
13827                         tg3_flag_set(tp, NO_NVRAM);
13828                         return;
13829                 }
13830
13831                 switch (nvmpinstrp) {
13832                 case FLASH_5762_EEPROM_HD:
13833                         nvmpinstrp = FLASH_5720_EEPROM_HD;
13834                         break;
13835                 case FLASH_5762_EEPROM_LD:
13836                         nvmpinstrp = FLASH_5720_EEPROM_LD;
13837                         break;
13838                 }
13839         }
13840
13841         switch (nvmpinstrp) {
13842         case FLASH_5720_EEPROM_HD:
13843         case FLASH_5720_EEPROM_LD:
13844                 tp->nvram_jedecnum = JEDEC_ATMEL;
13845                 tg3_flag_set(tp, NVRAM_BUFFERED);
13846
13847                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13848                 tw32(NVRAM_CFG1, nvcfg1);
13849                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
13850                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13851                 else
13852                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
13853                 return;
13854         case FLASH_5720VENDOR_M_ATMEL_DB011D:
13855         case FLASH_5720VENDOR_A_ATMEL_DB011B:
13856         case FLASH_5720VENDOR_A_ATMEL_DB011D:
13857         case FLASH_5720VENDOR_M_ATMEL_DB021D:
13858         case FLASH_5720VENDOR_A_ATMEL_DB021B:
13859         case FLASH_5720VENDOR_A_ATMEL_DB021D:
13860         case FLASH_5720VENDOR_M_ATMEL_DB041D:
13861         case FLASH_5720VENDOR_A_ATMEL_DB041B:
13862         case FLASH_5720VENDOR_A_ATMEL_DB041D:
13863         case FLASH_5720VENDOR_M_ATMEL_DB081D:
13864         case FLASH_5720VENDOR_A_ATMEL_DB081D:
13865         case FLASH_5720VENDOR_ATMEL_45USPT:
13866                 tp->nvram_jedecnum = JEDEC_ATMEL;
13867                 tg3_flag_set(tp, NVRAM_BUFFERED);
13868                 tg3_flag_set(tp, FLASH);
13869
13870                 switch (nvmpinstrp) {
13871                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13872                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13873                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13874                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13875                         break;
13876                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13877                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13878                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13879                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13880                         break;
13881                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13882                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13883                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13884                         break;
13885                 default:
13886                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13887                         break;
13888                 }
13889                 break;
13890         case FLASH_5720VENDOR_M_ST_M25PE10:
13891         case FLASH_5720VENDOR_M_ST_M45PE10:
13892         case FLASH_5720VENDOR_A_ST_M25PE10:
13893         case FLASH_5720VENDOR_A_ST_M45PE10:
13894         case FLASH_5720VENDOR_M_ST_M25PE20:
13895         case FLASH_5720VENDOR_M_ST_M45PE20:
13896         case FLASH_5720VENDOR_A_ST_M25PE20:
13897         case FLASH_5720VENDOR_A_ST_M45PE20:
13898         case FLASH_5720VENDOR_M_ST_M25PE40:
13899         case FLASH_5720VENDOR_M_ST_M45PE40:
13900         case FLASH_5720VENDOR_A_ST_M25PE40:
13901         case FLASH_5720VENDOR_A_ST_M45PE40:
13902         case FLASH_5720VENDOR_M_ST_M25PE80:
13903         case FLASH_5720VENDOR_M_ST_M45PE80:
13904         case FLASH_5720VENDOR_A_ST_M25PE80:
13905         case FLASH_5720VENDOR_A_ST_M45PE80:
13906         case FLASH_5720VENDOR_ST_25USPT:
13907         case FLASH_5720VENDOR_ST_45USPT:
13908                 tp->nvram_jedecnum = JEDEC_ST;
13909                 tg3_flag_set(tp, NVRAM_BUFFERED);
13910                 tg3_flag_set(tp, FLASH);
13911
13912                 switch (nvmpinstrp) {
13913                 case FLASH_5720VENDOR_M_ST_M25PE20:
13914                 case FLASH_5720VENDOR_M_ST_M45PE20:
13915                 case FLASH_5720VENDOR_A_ST_M25PE20:
13916                 case FLASH_5720VENDOR_A_ST_M45PE20:
13917                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13918                         break;
13919                 case FLASH_5720VENDOR_M_ST_M25PE40:
13920                 case FLASH_5720VENDOR_M_ST_M45PE40:
13921                 case FLASH_5720VENDOR_A_ST_M25PE40:
13922                 case FLASH_5720VENDOR_A_ST_M45PE40:
13923                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13924                         break;
13925                 case FLASH_5720VENDOR_M_ST_M25PE80:
13926                 case FLASH_5720VENDOR_M_ST_M45PE80:
13927                 case FLASH_5720VENDOR_A_ST_M25PE80:
13928                 case FLASH_5720VENDOR_A_ST_M45PE80:
13929                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13930                         break;
13931                 default:
13932                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13933                         break;
13934                 }
13935                 break;
13936         default:
13937                 tg3_flag_set(tp, NO_NVRAM);
13938                 return;
13939         }
13940
13941         tg3_nvram_get_pagesize(tp, nvcfg1);
13942         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13943                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13944
13945         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
13946                 u32 val;
13947
13948                 if (tg3_nvram_read(tp, 0, &val))
13949                         return;
13950
13951                 if (val != TG3_EEPROM_MAGIC &&
13952                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
13953                         tg3_flag_set(tp, NO_NVRAM);
13954         }
13955 }
13956
13957 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
13958 static void tg3_nvram_init(struct tg3 *tp)
13959 {
13960         if (tg3_flag(tp, IS_SSB_CORE)) {
13961                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
13962                 tg3_flag_clear(tp, NVRAM);
13963                 tg3_flag_clear(tp, NVRAM_BUFFERED);
13964                 tg3_flag_set(tp, NO_NVRAM);
13965                 return;
13966         }
13967
13968         tw32_f(GRC_EEPROM_ADDR,
13969              (EEPROM_ADDR_FSM_RESET |
13970               (EEPROM_DEFAULT_CLOCK_PERIOD <<
13971                EEPROM_ADDR_CLKPERD_SHIFT)));
13972
13973         msleep(1);
13974
13975         /* Enable seeprom accesses. */
13976         tw32_f(GRC_LOCAL_CTRL,
13977              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13978         udelay(100);
13979
13980         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13981             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
13982                 tg3_flag_set(tp, NVRAM);
13983
13984                 if (tg3_nvram_lock(tp)) {
13985                         netdev_warn(tp->dev,
13986                                     "Cannot get nvram lock, %s failed\n",
13987                                     __func__);
13988                         return;
13989                 }
13990                 tg3_enable_nvram_access(tp);
13991
13992                 tp->nvram_size = 0;
13993
13994                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13995                         tg3_get_5752_nvram_info(tp);
13996                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13997                         tg3_get_5755_nvram_info(tp);
13998                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13999                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14000                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
14001                         tg3_get_5787_nvram_info(tp);
14002                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
14003                         tg3_get_5761_nvram_info(tp);
14004                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14005                         tg3_get_5906_nvram_info(tp);
14006                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14007                          tg3_flag(tp, 57765_CLASS))
14008                         tg3_get_57780_nvram_info(tp);
14009                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14010                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14011                         tg3_get_5717_nvram_info(tp);
14012                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
14013                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
14014                         tg3_get_5720_nvram_info(tp);
14015                 else
14016                         tg3_get_nvram_info(tp);
14017
14018                 if (tp->nvram_size == 0)
14019                         tg3_get_nvram_size(tp);
14020
14021                 tg3_disable_nvram_access(tp);
14022                 tg3_nvram_unlock(tp);
14023
14024         } else {
14025                 tg3_flag_clear(tp, NVRAM);
14026                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14027
14028                 tg3_get_eeprom_size(tp);
14029         }
14030 }
14031
14032 struct subsys_tbl_ent {
14033         u16 subsys_vendor, subsys_devid;
14034         u32 phy_id;
14035 };
14036
14037 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14038         /* Broadcom boards. */
14039         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14040           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14041         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14042           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14043         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14044           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14045         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14046           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14047         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14048           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14049         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14050           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14051         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14052           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14053         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14054           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14055         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14056           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14057         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14058           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14059         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14060           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14061
14062         /* 3com boards. */
14063         { TG3PCI_SUBVENDOR_ID_3COM,
14064           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14065         { TG3PCI_SUBVENDOR_ID_3COM,
14066           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14067         { TG3PCI_SUBVENDOR_ID_3COM,
14068           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14069         { TG3PCI_SUBVENDOR_ID_3COM,
14070           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14071         { TG3PCI_SUBVENDOR_ID_3COM,
14072           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14073
14074         /* DELL boards. */
14075         { TG3PCI_SUBVENDOR_ID_DELL,
14076           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14077         { TG3PCI_SUBVENDOR_ID_DELL,
14078           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14079         { TG3PCI_SUBVENDOR_ID_DELL,
14080           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14081         { TG3PCI_SUBVENDOR_ID_DELL,
14082           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14083
14084         /* Compaq boards. */
14085         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14086           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14087         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14088           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14089         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14090           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14091         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14092           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14093         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14094           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14095
14096         /* IBM boards. */
14097         { TG3PCI_SUBVENDOR_ID_IBM,
14098           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14099 };
14100
14101 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14102 {
14103         int i;
14104
14105         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14106                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14107                      tp->pdev->subsystem_vendor) &&
14108                     (subsys_id_to_phy_id[i].subsys_devid ==
14109                      tp->pdev->subsystem_device))
14110                         return &subsys_id_to_phy_id[i];
14111         }
14112         return NULL;
14113 }
14114
14115 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14116 {
14117         u32 val;
14118
14119         tp->phy_id = TG3_PHY_ID_INVALID;
14120         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14121
14122         /* Assume an onboard device and WOL capable by default.  */
14123         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14124         tg3_flag_set(tp, WOL_CAP);
14125
14126         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14127                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14128                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14129                         tg3_flag_set(tp, IS_NIC);
14130                 }
14131                 val = tr32(VCPU_CFGSHDW);
14132                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14133                         tg3_flag_set(tp, ASPM_WORKAROUND);
14134                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14135                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14136                         tg3_flag_set(tp, WOL_ENABLE);
14137                         device_set_wakeup_enable(&tp->pdev->dev, true);
14138                 }
14139                 goto done;
14140         }
14141
14142         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14143         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14144                 u32 nic_cfg, led_cfg;
14145                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14146                 int eeprom_phy_serdes = 0;
14147
14148                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14149                 tp->nic_sram_data_cfg = nic_cfg;
14150
14151                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14152                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14153                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14154                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14155                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
14156                     (ver > 0) && (ver < 0x100))
14157                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14158
14159                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
14160                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14161
14162                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14163                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14164                         eeprom_phy_serdes = 1;
14165
14166                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14167                 if (nic_phy_id != 0) {
14168                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14169                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14170
14171                         eeprom_phy_id  = (id1 >> 16) << 10;
14172                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
14173                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
14174                 } else
14175                         eeprom_phy_id = 0;
14176
14177                 tp->phy_id = eeprom_phy_id;
14178                 if (eeprom_phy_serdes) {
14179                         if (!tg3_flag(tp, 5705_PLUS))
14180                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14181                         else
14182                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14183                 }
14184
14185                 if (tg3_flag(tp, 5750_PLUS))
14186                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14187                                     SHASTA_EXT_LED_MODE_MASK);
14188                 else
14189                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14190
14191                 switch (led_cfg) {
14192                 default:
14193                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14194                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14195                         break;
14196
14197                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14198                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14199                         break;
14200
14201                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14202                         tp->led_ctrl = LED_CTRL_MODE_MAC;
14203
14204                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14205                          * read on some older 5700/5701 bootcode.
14206                          */
14207                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14208                             ASIC_REV_5700 ||
14209                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
14210                             ASIC_REV_5701)
14211                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14212
14213                         break;
14214
14215                 case SHASTA_EXT_LED_SHARED:
14216                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
14217                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
14218                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
14219                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14220                                                  LED_CTRL_MODE_PHY_2);
14221                         break;
14222
14223                 case SHASTA_EXT_LED_MAC:
14224                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14225                         break;
14226
14227                 case SHASTA_EXT_LED_COMBO:
14228                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
14229                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
14230                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14231                                                  LED_CTRL_MODE_PHY_2);
14232                         break;
14233
14234                 }
14235
14236                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14237                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
14238                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14239                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14240
14241                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
14242                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14243
14244                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14245                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14246                         if ((tp->pdev->subsystem_vendor ==
14247                              PCI_VENDOR_ID_ARIMA) &&
14248                             (tp->pdev->subsystem_device == 0x205a ||
14249                              tp->pdev->subsystem_device == 0x2063))
14250                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14251                 } else {
14252                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14253                         tg3_flag_set(tp, IS_NIC);
14254                 }
14255
14256                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14257                         tg3_flag_set(tp, ENABLE_ASF);
14258                         if (tg3_flag(tp, 5750_PLUS))
14259                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14260                 }
14261
14262                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14263                     tg3_flag(tp, 5750_PLUS))
14264                         tg3_flag_set(tp, ENABLE_APE);
14265
14266                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14267                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14268                         tg3_flag_clear(tp, WOL_CAP);
14269
14270                 if (tg3_flag(tp, WOL_CAP) &&
14271                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14272                         tg3_flag_set(tp, WOL_ENABLE);
14273                         device_set_wakeup_enable(&tp->pdev->dev, true);
14274                 }
14275
14276                 if (cfg2 & (1 << 17))
14277                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14278
14279                 /* serdes signal pre-emphasis in register 0x590 set by */
14280                 /* bootcode if bit 18 is set */
14281                 if (cfg2 & (1 << 18))
14282                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14283
14284                 if ((tg3_flag(tp, 57765_PLUS) ||
14285                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14286                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
14287                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14288                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14289
14290                 if (tg3_flag(tp, PCI_EXPRESS) &&
14291                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14292                     !tg3_flag(tp, 57765_PLUS)) {
14293                         u32 cfg3;
14294
14295                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14296                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
14297                                 tg3_flag_set(tp, ASPM_WORKAROUND);
14298                 }
14299
14300                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14301                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14302                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14303                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14304                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14305                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14306         }
14307 done:
14308         if (tg3_flag(tp, WOL_CAP))
14309                 device_set_wakeup_enable(&tp->pdev->dev,
14310                                          tg3_flag(tp, WOL_ENABLE));
14311         else
14312                 device_set_wakeup_capable(&tp->pdev->dev, false);
14313 }
14314
14315 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14316 {
14317         int i, err;
14318         u32 val2, off = offset * 8;
14319
14320         err = tg3_nvram_lock(tp);
14321         if (err)
14322                 return err;
14323
14324         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14325         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14326                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14327         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14328         udelay(10);
14329
14330         for (i = 0; i < 100; i++) {
14331                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14332                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14333                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14334                         break;
14335                 }
14336                 udelay(10);
14337         }
14338
14339         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14340
14341         tg3_nvram_unlock(tp);
14342         if (val2 & APE_OTP_STATUS_CMD_DONE)
14343                 return 0;
14344
14345         return -EBUSY;
14346 }
14347
14348 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14349 {
14350         int i;
14351         u32 val;
14352
14353         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14354         tw32(OTP_CTRL, cmd);
14355
14356         /* Wait for up to 1 ms for command to execute. */
14357         for (i = 0; i < 100; i++) {
14358                 val = tr32(OTP_STATUS);
14359                 if (val & OTP_STATUS_CMD_DONE)
14360                         break;
14361                 udelay(10);
14362         }
14363
14364         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14365 }
14366
14367 /* Read the gphy configuration from the OTP region of the chip.  The gphy
14368  * configuration is a 32-bit value that straddles the alignment boundary.
14369  * We do two 32-bit reads and then shift and merge the results.
14370  */
14371 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14372 {
14373         u32 bhalf_otp, thalf_otp;
14374
14375         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14376
14377         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14378                 return 0;
14379
14380         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14381
14382         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14383                 return 0;
14384
14385         thalf_otp = tr32(OTP_READ_DATA);
14386
14387         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14388
14389         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14390                 return 0;
14391
14392         bhalf_otp = tr32(OTP_READ_DATA);
14393
14394         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14395 }
14396
14397 static void tg3_phy_init_link_config(struct tg3 *tp)
14398 {
14399         u32 adv = ADVERTISED_Autoneg;
14400
14401         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14402                 adv |= ADVERTISED_1000baseT_Half |
14403                        ADVERTISED_1000baseT_Full;
14404
14405         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14406                 adv |= ADVERTISED_100baseT_Half |
14407                        ADVERTISED_100baseT_Full |
14408                        ADVERTISED_10baseT_Half |
14409                        ADVERTISED_10baseT_Full |
14410                        ADVERTISED_TP;
14411         else
14412                 adv |= ADVERTISED_FIBRE;
14413
14414         tp->link_config.advertising = adv;
14415         tp->link_config.speed = SPEED_UNKNOWN;
14416         tp->link_config.duplex = DUPLEX_UNKNOWN;
14417         tp->link_config.autoneg = AUTONEG_ENABLE;
14418         tp->link_config.active_speed = SPEED_UNKNOWN;
14419         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14420
14421         tp->old_link = -1;
14422 }
14423
14424 static int tg3_phy_probe(struct tg3 *tp)
14425 {
14426         u32 hw_phy_id_1, hw_phy_id_2;
14427         u32 hw_phy_id, hw_phy_id_masked;
14428         int err;
14429
14430         /* flow control autonegotiation is default behavior */
14431         tg3_flag_set(tp, PAUSE_AUTONEG);
14432         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14433
14434         if (tg3_flag(tp, ENABLE_APE)) {
14435                 switch (tp->pci_fn) {
14436                 case 0:
14437                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14438                         break;
14439                 case 1:
14440                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14441                         break;
14442                 case 2:
14443                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14444                         break;
14445                 case 3:
14446                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14447                         break;
14448                 }
14449         }
14450
14451         if (tg3_flag(tp, USE_PHYLIB))
14452                 return tg3_phy_init(tp);
14453
14454         /* Reading the PHY ID register can conflict with ASF
14455          * firmware access to the PHY hardware.
14456          */
14457         err = 0;
14458         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14459                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14460         } else {
14461                 /* Now read the physical PHY_ID from the chip and verify
14462                  * that it is sane.  If it doesn't look good, we fall back
14463                  * to either the hard-coded table based PHY_ID and failing
14464                  * that the value found in the eeprom area.
14465                  */
14466                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
14467                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
14468
14469                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
14470                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
14471                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
14472
14473                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
14474         }
14475
14476         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
14477                 tp->phy_id = hw_phy_id;
14478                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
14479                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14480                 else
14481                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
14482         } else {
14483                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
14484                         /* Do nothing, phy ID already set up in
14485                          * tg3_get_eeprom_hw_cfg().
14486                          */
14487                 } else {
14488                         struct subsys_tbl_ent *p;
14489
14490                         /* No eeprom signature?  Try the hardcoded
14491                          * subsys device table.
14492                          */
14493                         p = tg3_lookup_by_subsys(tp);
14494                         if (p) {
14495                                 tp->phy_id = p->phy_id;
14496                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
14497                                 /* For now we saw the IDs 0xbc050cd0,
14498                                  * 0xbc050f80 and 0xbc050c30 on devices
14499                                  * connected to an BCM4785 and there are
14500                                  * probably more. Just assume that the phy is
14501                                  * supported when it is connected to a SSB core
14502                                  * for now.
14503                                  */
14504                                 return -ENODEV;
14505                         }
14506
14507                         if (!tp->phy_id ||
14508                             tp->phy_id == TG3_PHY_ID_BCM8002)
14509                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14510                 }
14511         }
14512
14513         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14514             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14515              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
14516              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762 ||
14517              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
14518               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
14519              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
14520               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
14521                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
14522
14523         tg3_phy_init_link_config(tp);
14524
14525         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14526             !tg3_flag(tp, ENABLE_APE) &&
14527             !tg3_flag(tp, ENABLE_ASF)) {
14528                 u32 bmsr, dummy;
14529
14530                 tg3_readphy(tp, MII_BMSR, &bmsr);
14531                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
14532                     (bmsr & BMSR_LSTATUS))
14533                         goto skip_phy_reset;
14534
14535                 err = tg3_phy_reset(tp);
14536                 if (err)
14537                         return err;
14538
14539                 tg3_phy_set_wirespeed(tp);
14540
14541                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
14542                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
14543                                             tp->link_config.flowctrl);
14544
14545                         tg3_writephy(tp, MII_BMCR,
14546                                      BMCR_ANENABLE | BMCR_ANRESTART);
14547                 }
14548         }
14549
14550 skip_phy_reset:
14551         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
14552                 err = tg3_init_5401phy_dsp(tp);
14553                 if (err)
14554                         return err;
14555
14556                 err = tg3_init_5401phy_dsp(tp);
14557         }
14558
14559         return err;
14560 }
14561
14562 static void tg3_read_vpd(struct tg3 *tp)
14563 {
14564         u8 *vpd_data;
14565         unsigned int block_end, rosize, len;
14566         u32 vpdlen;
14567         int j, i = 0;
14568
14569         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
14570         if (!vpd_data)
14571                 goto out_no_vpd;
14572
14573         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
14574         if (i < 0)
14575                 goto out_not_found;
14576
14577         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
14578         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
14579         i += PCI_VPD_LRDT_TAG_SIZE;
14580
14581         if (block_end > vpdlen)
14582                 goto out_not_found;
14583
14584         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14585                                       PCI_VPD_RO_KEYWORD_MFR_ID);
14586         if (j > 0) {
14587                 len = pci_vpd_info_field_size(&vpd_data[j]);
14588
14589                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14590                 if (j + len > block_end || len != 4 ||
14591                     memcmp(&vpd_data[j], "1028", 4))
14592                         goto partno;
14593
14594                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14595                                               PCI_VPD_RO_KEYWORD_VENDOR0);
14596                 if (j < 0)
14597                         goto partno;
14598
14599                 len = pci_vpd_info_field_size(&vpd_data[j]);
14600
14601                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14602                 if (j + len > block_end)
14603                         goto partno;
14604
14605                 memcpy(tp->fw_ver, &vpd_data[j], len);
14606                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
14607         }
14608
14609 partno:
14610         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14611                                       PCI_VPD_RO_KEYWORD_PARTNO);
14612         if (i < 0)
14613                 goto out_not_found;
14614
14615         len = pci_vpd_info_field_size(&vpd_data[i]);
14616
14617         i += PCI_VPD_INFO_FLD_HDR_SIZE;
14618         if (len > TG3_BPN_SIZE ||
14619             (len + i) > vpdlen)
14620                 goto out_not_found;
14621
14622         memcpy(tp->board_part_number, &vpd_data[i], len);
14623
14624 out_not_found:
14625         kfree(vpd_data);
14626         if (tp->board_part_number[0])
14627                 return;
14628
14629 out_no_vpd:
14630         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14631                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14632                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
14633                         strcpy(tp->board_part_number, "BCM5717");
14634                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14635                         strcpy(tp->board_part_number, "BCM5718");
14636                 else
14637                         goto nomatch;
14638         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
14639                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14640                         strcpy(tp->board_part_number, "BCM57780");
14641                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14642                         strcpy(tp->board_part_number, "BCM57760");
14643                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14644                         strcpy(tp->board_part_number, "BCM57790");
14645                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14646                         strcpy(tp->board_part_number, "BCM57788");
14647                 else
14648                         goto nomatch;
14649         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
14650                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14651                         strcpy(tp->board_part_number, "BCM57761");
14652                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14653                         strcpy(tp->board_part_number, "BCM57765");
14654                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14655                         strcpy(tp->board_part_number, "BCM57781");
14656                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14657                         strcpy(tp->board_part_number, "BCM57785");
14658                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14659                         strcpy(tp->board_part_number, "BCM57791");
14660                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14661                         strcpy(tp->board_part_number, "BCM57795");
14662                 else
14663                         goto nomatch;
14664         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
14665                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14666                         strcpy(tp->board_part_number, "BCM57762");
14667                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14668                         strcpy(tp->board_part_number, "BCM57766");
14669                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14670                         strcpy(tp->board_part_number, "BCM57782");
14671                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14672                         strcpy(tp->board_part_number, "BCM57786");
14673                 else
14674                         goto nomatch;
14675         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14676                 strcpy(tp->board_part_number, "BCM95906");
14677         } else {
14678 nomatch:
14679                 strcpy(tp->board_part_number, "none");
14680         }
14681 }
14682
14683 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14684 {
14685         u32 val;
14686
14687         if (tg3_nvram_read(tp, offset, &val) ||
14688             (val & 0xfc000000) != 0x0c000000 ||
14689             tg3_nvram_read(tp, offset + 4, &val) ||
14690             val != 0)
14691                 return 0;
14692
14693         return 1;
14694 }
14695
14696 static void tg3_read_bc_ver(struct tg3 *tp)
14697 {
14698         u32 val, offset, start, ver_offset;
14699         int i, dst_off;
14700         bool newver = false;
14701
14702         if (tg3_nvram_read(tp, 0xc, &offset) ||
14703             tg3_nvram_read(tp, 0x4, &start))
14704                 return;
14705
14706         offset = tg3_nvram_logical_addr(tp, offset);
14707
14708         if (tg3_nvram_read(tp, offset, &val))
14709                 return;
14710
14711         if ((val & 0xfc000000) == 0x0c000000) {
14712                 if (tg3_nvram_read(tp, offset + 4, &val))
14713                         return;
14714
14715                 if (val == 0)
14716                         newver = true;
14717         }
14718
14719         dst_off = strlen(tp->fw_ver);
14720
14721         if (newver) {
14722                 if (TG3_VER_SIZE - dst_off < 16 ||
14723                     tg3_nvram_read(tp, offset + 8, &ver_offset))
14724                         return;
14725
14726                 offset = offset + ver_offset - start;
14727                 for (i = 0; i < 16; i += 4) {
14728                         __be32 v;
14729                         if (tg3_nvram_read_be32(tp, offset + i, &v))
14730                                 return;
14731
14732                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
14733                 }
14734         } else {
14735                 u32 major, minor;
14736
14737                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14738                         return;
14739
14740                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14741                         TG3_NVM_BCVER_MAJSFT;
14742                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
14743                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14744                          "v%d.%02d", major, minor);
14745         }
14746 }
14747
14748 static void tg3_read_hwsb_ver(struct tg3 *tp)
14749 {
14750         u32 val, major, minor;
14751
14752         /* Use native endian representation */
14753         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14754                 return;
14755
14756         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14757                 TG3_NVM_HWSB_CFG1_MAJSFT;
14758         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14759                 TG3_NVM_HWSB_CFG1_MINSFT;
14760
14761         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14762 }
14763
14764 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
14765 {
14766         u32 offset, major, minor, build;
14767
14768         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
14769
14770         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14771                 return;
14772
14773         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14774         case TG3_EEPROM_SB_REVISION_0:
14775                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14776                 break;
14777         case TG3_EEPROM_SB_REVISION_2:
14778                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14779                 break;
14780         case TG3_EEPROM_SB_REVISION_3:
14781                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14782                 break;
14783         case TG3_EEPROM_SB_REVISION_4:
14784                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14785                 break;
14786         case TG3_EEPROM_SB_REVISION_5:
14787                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14788                 break;
14789         case TG3_EEPROM_SB_REVISION_6:
14790                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14791                 break;
14792         default:
14793                 return;
14794         }
14795
14796         if (tg3_nvram_read(tp, offset, &val))
14797                 return;
14798
14799         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14800                 TG3_EEPROM_SB_EDH_BLD_SHFT;
14801         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14802                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
14803         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
14804
14805         if (minor > 99 || build > 26)
14806                 return;
14807
14808         offset = strlen(tp->fw_ver);
14809         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14810                  " v%d.%02d", major, minor);
14811
14812         if (build > 0) {
14813                 offset = strlen(tp->fw_ver);
14814                 if (offset < TG3_VER_SIZE - 1)
14815                         tp->fw_ver[offset] = 'a' + build - 1;
14816         }
14817 }
14818
14819 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
14820 {
14821         u32 val, offset, start;
14822         int i, vlen;
14823
14824         for (offset = TG3_NVM_DIR_START;
14825              offset < TG3_NVM_DIR_END;
14826              offset += TG3_NVM_DIRENT_SIZE) {
14827                 if (tg3_nvram_read(tp, offset, &val))
14828                         return;
14829
14830                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14831                         break;
14832         }
14833
14834         if (offset == TG3_NVM_DIR_END)
14835                 return;
14836
14837         if (!tg3_flag(tp, 5705_PLUS))
14838                 start = 0x08000000;
14839         else if (tg3_nvram_read(tp, offset - 4, &start))
14840                 return;
14841
14842         if (tg3_nvram_read(tp, offset + 4, &offset) ||
14843             !tg3_fw_img_is_valid(tp, offset) ||
14844             tg3_nvram_read(tp, offset + 8, &val))
14845                 return;
14846
14847         offset += val - start;
14848
14849         vlen = strlen(tp->fw_ver);
14850
14851         tp->fw_ver[vlen++] = ',';
14852         tp->fw_ver[vlen++] = ' ';
14853
14854         for (i = 0; i < 4; i++) {
14855                 __be32 v;
14856                 if (tg3_nvram_read_be32(tp, offset, &v))
14857                         return;
14858
14859                 offset += sizeof(v);
14860
14861                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
14862                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
14863                         break;
14864                 }
14865
14866                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
14867                 vlen += sizeof(v);
14868         }
14869 }
14870
14871 static void tg3_probe_ncsi(struct tg3 *tp)
14872 {
14873         u32 apedata;
14874
14875         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
14876         if (apedata != APE_SEG_SIG_MAGIC)
14877                 return;
14878
14879         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
14880         if (!(apedata & APE_FW_STATUS_READY))
14881                 return;
14882
14883         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
14884                 tg3_flag_set(tp, APE_HAS_NCSI);
14885 }
14886
14887 static void tg3_read_dash_ver(struct tg3 *tp)
14888 {
14889         int vlen;
14890         u32 apedata;
14891         char *fwtype;
14892
14893         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
14894
14895         if (tg3_flag(tp, APE_HAS_NCSI))
14896                 fwtype = "NCSI";
14897         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
14898                 fwtype = "SMASH";
14899         else
14900                 fwtype = "DASH";
14901
14902         vlen = strlen(tp->fw_ver);
14903
14904         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
14905                  fwtype,
14906                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
14907                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
14908                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
14909                  (apedata & APE_FW_VERSION_BLDMSK));
14910 }
14911
14912 static void tg3_read_otp_ver(struct tg3 *tp)
14913 {
14914         u32 val, val2;
14915
14916         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5762)
14917                 return;
14918
14919         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
14920             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
14921             TG3_OTP_MAGIC0_VALID(val)) {
14922                 u64 val64 = (u64) val << 32 | val2;
14923                 u32 ver = 0;
14924                 int i, vlen;
14925
14926                 for (i = 0; i < 7; i++) {
14927                         if ((val64 & 0xff) == 0)
14928                                 break;
14929                         ver = val64 & 0xff;
14930                         val64 >>= 8;
14931                 }
14932                 vlen = strlen(tp->fw_ver);
14933                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
14934         }
14935 }
14936
14937 static void tg3_read_fw_ver(struct tg3 *tp)
14938 {
14939         u32 val;
14940         bool vpd_vers = false;
14941
14942         if (tp->fw_ver[0] != 0)
14943                 vpd_vers = true;
14944
14945         if (tg3_flag(tp, NO_NVRAM)) {
14946                 strcat(tp->fw_ver, "sb");
14947                 tg3_read_otp_ver(tp);
14948                 return;
14949         }
14950
14951         if (tg3_nvram_read(tp, 0, &val))
14952                 return;
14953
14954         if (val == TG3_EEPROM_MAGIC)
14955                 tg3_read_bc_ver(tp);
14956         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
14957                 tg3_read_sb_ver(tp, val);
14958         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
14959                 tg3_read_hwsb_ver(tp);
14960
14961         if (tg3_flag(tp, ENABLE_ASF)) {
14962                 if (tg3_flag(tp, ENABLE_APE)) {
14963                         tg3_probe_ncsi(tp);
14964                         if (!vpd_vers)
14965                                 tg3_read_dash_ver(tp);
14966                 } else if (!vpd_vers) {
14967                         tg3_read_mgmtfw_ver(tp);
14968                 }
14969         }
14970
14971         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
14972 }
14973
14974 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
14975 {
14976         if (tg3_flag(tp, LRG_PROD_RING_CAP))
14977                 return TG3_RX_RET_MAX_SIZE_5717;
14978         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
14979                 return TG3_RX_RET_MAX_SIZE_5700;
14980         else
14981                 return TG3_RX_RET_MAX_SIZE_5705;
14982 }
14983
14984 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
14985         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
14986         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
14987         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
14988         { },
14989 };
14990
14991 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
14992 {
14993         struct pci_dev *peer;
14994         unsigned int func, devnr = tp->pdev->devfn & ~7;
14995
14996         for (func = 0; func < 8; func++) {
14997                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14998                 if (peer && peer != tp->pdev)
14999                         break;
15000                 pci_dev_put(peer);
15001         }
15002         /* 5704 can be configured in single-port mode, set peer to
15003          * tp->pdev in that case.
15004          */
15005         if (!peer) {
15006                 peer = tp->pdev;
15007                 return peer;
15008         }
15009
15010         /*
15011          * We don't need to keep the refcount elevated; there's no way
15012          * to remove one half of this device without removing the other
15013          */
15014         pci_dev_put(peer);
15015
15016         return peer;
15017 }
15018
15019 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15020 {
15021         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15022         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
15023                 u32 reg;
15024
15025                 /* All devices that use the alternate
15026                  * ASIC REV location have a CPMU.
15027                  */
15028                 tg3_flag_set(tp, CPMU_PRESENT);
15029
15030                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15031                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15032                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15033                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15034                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15035                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15036                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15037                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15038                         reg = TG3PCI_GEN2_PRODID_ASICREV;
15039                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15040                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15041                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15042                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15043                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15044                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15045                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15046                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15047                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15048                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15049                         reg = TG3PCI_GEN15_PRODID_ASICREV;
15050                 else
15051                         reg = TG3PCI_PRODID_ASICREV;
15052
15053                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15054         }
15055
15056         /* Wrong chip ID in 5752 A0. This code can be removed later
15057          * as A0 is not in production.
15058          */
15059         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
15060                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15061
15062         if (tp->pci_chip_rev_id == CHIPREV_ID_5717_C0)
15063                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15064
15065         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15066             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
15067             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
15068                 tg3_flag_set(tp, 5717_PLUS);
15069
15070         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
15071             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
15072                 tg3_flag_set(tp, 57765_CLASS);
15073
15074         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15075              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
15076                 tg3_flag_set(tp, 57765_PLUS);
15077
15078         /* Intentionally exclude ASIC_REV_5906 */
15079         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
15080             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
15081             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
15082             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15083             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15084             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
15085             tg3_flag(tp, 57765_PLUS))
15086                 tg3_flag_set(tp, 5755_PLUS);
15087
15088         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
15089             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
15090                 tg3_flag_set(tp, 5780_CLASS);
15091
15092         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
15093             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
15094             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
15095             tg3_flag(tp, 5755_PLUS) ||
15096             tg3_flag(tp, 5780_CLASS))
15097                 tg3_flag_set(tp, 5750_PLUS);
15098
15099         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15100             tg3_flag(tp, 5750_PLUS))
15101                 tg3_flag_set(tp, 5705_PLUS);
15102 }
15103
15104 static bool tg3_10_100_only_device(struct tg3 *tp,
15105                                    const struct pci_device_id *ent)
15106 {
15107         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15108
15109         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
15110             (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15111             (tp->phy_flags & TG3_PHYFLG_IS_FET))
15112                 return true;
15113
15114         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15115                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
15116                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15117                                 return true;
15118                 } else {
15119                         return true;
15120                 }
15121         }
15122
15123         return false;
15124 }
15125
15126 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15127 {
15128         u32 misc_ctrl_reg;
15129         u32 pci_state_reg, grc_misc_cfg;
15130         u32 val;
15131         u16 pci_cmd;
15132         int err;
15133
15134         /* Force memory write invalidate off.  If we leave it on,
15135          * then on 5700_BX chips we have to enable a workaround.
15136          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15137          * to match the cacheline size.  The Broadcom driver have this
15138          * workaround but turns MWI off all the times so never uses
15139          * it.  This seems to suggest that the workaround is insufficient.
15140          */
15141         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15142         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15143         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15144
15145         /* Important! -- Make sure register accesses are byteswapped
15146          * correctly.  Also, for those chips that require it, make
15147          * sure that indirect register accesses are enabled before
15148          * the first operation.
15149          */
15150         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15151                               &misc_ctrl_reg);
15152         tp->misc_host_ctrl |= (misc_ctrl_reg &
15153                                MISC_HOST_CTRL_CHIPREV);
15154         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15155                                tp->misc_host_ctrl);
15156
15157         tg3_detect_asic_rev(tp, misc_ctrl_reg);
15158
15159         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15160          * we need to disable memory and use config. cycles
15161          * only to access all registers. The 5702/03 chips
15162          * can mistakenly decode the special cycles from the
15163          * ICH chipsets as memory write cycles, causing corruption
15164          * of register and memory space. Only certain ICH bridges
15165          * will drive special cycles with non-zero data during the
15166          * address phase which can fall within the 5703's address
15167          * range. This is not an ICH bug as the PCI spec allows
15168          * non-zero address during special cycles. However, only
15169          * these ICH bridges are known to drive non-zero addresses
15170          * during special cycles.
15171          *
15172          * Since special cycles do not cross PCI bridges, we only
15173          * enable this workaround if the 5703 is on the secondary
15174          * bus of these ICH bridges.
15175          */
15176         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
15177             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
15178                 static struct tg3_dev_id {
15179                         u32     vendor;
15180                         u32     device;
15181                         u32     rev;
15182                 } ich_chipsets[] = {
15183                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15184                           PCI_ANY_ID },
15185                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15186                           PCI_ANY_ID },
15187                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15188                           0xa },
15189                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15190                           PCI_ANY_ID },
15191                         { },
15192                 };
15193                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15194                 struct pci_dev *bridge = NULL;
15195
15196                 while (pci_id->vendor != 0) {
15197                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
15198                                                 bridge);
15199                         if (!bridge) {
15200                                 pci_id++;
15201                                 continue;
15202                         }
15203                         if (pci_id->rev != PCI_ANY_ID) {
15204                                 if (bridge->revision > pci_id->rev)
15205                                         continue;
15206                         }
15207                         if (bridge->subordinate &&
15208                             (bridge->subordinate->number ==
15209                              tp->pdev->bus->number)) {
15210                                 tg3_flag_set(tp, ICH_WORKAROUND);
15211                                 pci_dev_put(bridge);
15212                                 break;
15213                         }
15214                 }
15215         }
15216
15217         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15218                 static struct tg3_dev_id {
15219                         u32     vendor;
15220                         u32     device;
15221                 } bridge_chipsets[] = {
15222                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15223                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15224                         { },
15225                 };
15226                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15227                 struct pci_dev *bridge = NULL;
15228
15229                 while (pci_id->vendor != 0) {
15230                         bridge = pci_get_device(pci_id->vendor,
15231                                                 pci_id->device,
15232                                                 bridge);
15233                         if (!bridge) {
15234                                 pci_id++;
15235                                 continue;
15236                         }
15237                         if (bridge->subordinate &&
15238                             (bridge->subordinate->number <=
15239                              tp->pdev->bus->number) &&
15240                             (bridge->subordinate->busn_res.end >=
15241                              tp->pdev->bus->number)) {
15242                                 tg3_flag_set(tp, 5701_DMA_BUG);
15243                                 pci_dev_put(bridge);
15244                                 break;
15245                         }
15246                 }
15247         }
15248
15249         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15250          * DMA addresses > 40-bit. This bridge may have other additional
15251          * 57xx devices behind it in some 4-port NIC designs for example.
15252          * Any tg3 device found behind the bridge will also need the 40-bit
15253          * DMA workaround.
15254          */
15255         if (tg3_flag(tp, 5780_CLASS)) {
15256                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15257                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15258         } else {
15259                 struct pci_dev *bridge = NULL;
15260
15261                 do {
15262                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15263                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
15264                                                 bridge);
15265                         if (bridge && bridge->subordinate &&
15266                             (bridge->subordinate->number <=
15267                              tp->pdev->bus->number) &&
15268                             (bridge->subordinate->busn_res.end >=
15269                              tp->pdev->bus->number)) {
15270                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15271                                 pci_dev_put(bridge);
15272                                 break;
15273                         }
15274                 } while (bridge);
15275         }
15276
15277         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15278             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
15279                 tp->pdev_peer = tg3_find_peer(tp);
15280
15281         /* Determine TSO capabilities */
15282         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
15283                 ; /* Do nothing. HW bug. */
15284         else if (tg3_flag(tp, 57765_PLUS))
15285                 tg3_flag_set(tp, HW_TSO_3);
15286         else if (tg3_flag(tp, 5755_PLUS) ||
15287                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15288                 tg3_flag_set(tp, HW_TSO_2);
15289         else if (tg3_flag(tp, 5750_PLUS)) {
15290                 tg3_flag_set(tp, HW_TSO_1);
15291                 tg3_flag_set(tp, TSO_BUG);
15292                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
15293                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
15294                         tg3_flag_clear(tp, TSO_BUG);
15295         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15296                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
15297                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
15298                         tg3_flag_set(tp, TSO_BUG);
15299                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
15300                         tp->fw_needed = FIRMWARE_TG3TSO5;
15301                 else
15302                         tp->fw_needed = FIRMWARE_TG3TSO;
15303         }
15304
15305         /* Selectively allow TSO based on operating conditions */
15306         if (tg3_flag(tp, HW_TSO_1) ||
15307             tg3_flag(tp, HW_TSO_2) ||
15308             tg3_flag(tp, HW_TSO_3) ||
15309             tp->fw_needed) {
15310                 /* For firmware TSO, assume ASF is disabled.
15311                  * We'll disable TSO later if we discover ASF
15312                  * is enabled in tg3_get_eeprom_hw_cfg().
15313                  */
15314                 tg3_flag_set(tp, TSO_CAPABLE);
15315         } else {
15316                 tg3_flag_clear(tp, TSO_CAPABLE);
15317                 tg3_flag_clear(tp, TSO_BUG);
15318                 tp->fw_needed = NULL;
15319         }
15320
15321         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
15322                 tp->fw_needed = FIRMWARE_TG3;
15323
15324         tp->irq_max = 1;
15325
15326         if (tg3_flag(tp, 5750_PLUS)) {
15327                 tg3_flag_set(tp, SUPPORT_MSI);
15328                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
15329                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
15330                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
15331                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
15332                      tp->pdev_peer == tp->pdev))
15333                         tg3_flag_clear(tp, SUPPORT_MSI);
15334
15335                 if (tg3_flag(tp, 5755_PLUS) ||
15336                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15337                         tg3_flag_set(tp, 1SHOT_MSI);
15338                 }
15339
15340                 if (tg3_flag(tp, 57765_PLUS)) {
15341                         tg3_flag_set(tp, SUPPORT_MSIX);
15342                         tp->irq_max = TG3_IRQ_MAX_VECS;
15343                 }
15344         }
15345
15346         tp->txq_max = 1;
15347         tp->rxq_max = 1;
15348         if (tp->irq_max > 1) {
15349                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15350                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15351
15352                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
15353                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
15354                         tp->txq_max = tp->irq_max - 1;
15355         }
15356
15357         if (tg3_flag(tp, 5755_PLUS) ||
15358             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15359                 tg3_flag_set(tp, SHORT_DMA_BUG);
15360
15361         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
15362                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15363
15364         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15365             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
15366             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
15367             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
15368                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15369
15370         if (tg3_flag(tp, 57765_PLUS) &&
15371             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
15372                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15373
15374         if (!tg3_flag(tp, 5705_PLUS) ||
15375             tg3_flag(tp, 5780_CLASS) ||
15376             tg3_flag(tp, USE_JUMBO_BDFLAG))
15377                 tg3_flag_set(tp, JUMBO_CAPABLE);
15378
15379         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15380                               &pci_state_reg);
15381
15382         if (pci_is_pcie(tp->pdev)) {
15383                 u16 lnkctl;
15384
15385                 tg3_flag_set(tp, PCI_EXPRESS);
15386
15387                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15388                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15389                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
15390                             ASIC_REV_5906) {
15391                                 tg3_flag_clear(tp, HW_TSO_2);
15392                                 tg3_flag_clear(tp, TSO_CAPABLE);
15393                         }
15394                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
15395                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15396                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
15397                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
15398                                 tg3_flag_set(tp, CLKREQ_BUG);
15399                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
15400                         tg3_flag_set(tp, L1PLLPD_EN);
15401                 }
15402         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
15403                 /* BCM5785 devices are effectively PCIe devices, and should
15404                  * follow PCIe codepaths, but do not have a PCIe capabilities
15405                  * section.
15406                  */
15407                 tg3_flag_set(tp, PCI_EXPRESS);
15408         } else if (!tg3_flag(tp, 5705_PLUS) ||
15409                    tg3_flag(tp, 5780_CLASS)) {
15410                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15411                 if (!tp->pcix_cap) {
15412                         dev_err(&tp->pdev->dev,
15413                                 "Cannot find PCI-X capability, aborting\n");
15414                         return -EIO;
15415                 }
15416
15417                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15418                         tg3_flag_set(tp, PCIX_MODE);
15419         }
15420
15421         /* If we have an AMD 762 or VIA K8T800 chipset, write
15422          * reordering to the mailbox registers done by the host
15423          * controller can cause major troubles.  We read back from
15424          * every mailbox register write to force the writes to be
15425          * posted to the chip in order.
15426          */
15427         if (pci_dev_present(tg3_write_reorder_chipsets) &&
15428             !tg3_flag(tp, PCI_EXPRESS))
15429                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
15430
15431         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15432                              &tp->pci_cacheline_sz);
15433         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15434                              &tp->pci_lat_timer);
15435         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
15436             tp->pci_lat_timer < 64) {
15437                 tp->pci_lat_timer = 64;
15438                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15439                                       tp->pci_lat_timer);
15440         }
15441
15442         /* Important! -- It is critical that the PCI-X hw workaround
15443          * situation is decided before the first MMIO register access.
15444          */
15445         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
15446                 /* 5700 BX chips need to have their TX producer index
15447                  * mailboxes written twice to workaround a bug.
15448                  */
15449                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
15450
15451                 /* If we are in PCI-X mode, enable register write workaround.
15452                  *
15453                  * The workaround is to use indirect register accesses
15454                  * for all chip writes not to mailbox registers.
15455                  */
15456                 if (tg3_flag(tp, PCIX_MODE)) {
15457                         u32 pm_reg;
15458
15459                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15460
15461                         /* The chip can have it's power management PCI config
15462                          * space registers clobbered due to this bug.
15463                          * So explicitly force the chip into D0 here.
15464                          */
15465                         pci_read_config_dword(tp->pdev,
15466                                               tp->pm_cap + PCI_PM_CTRL,
15467                                               &pm_reg);
15468                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
15469                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
15470                         pci_write_config_dword(tp->pdev,
15471                                                tp->pm_cap + PCI_PM_CTRL,
15472                                                pm_reg);
15473
15474                         /* Also, force SERR#/PERR# in PCI command. */
15475                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15476                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
15477                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15478                 }
15479         }
15480
15481         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
15482                 tg3_flag_set(tp, PCI_HIGH_SPEED);
15483         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
15484                 tg3_flag_set(tp, PCI_32BIT);
15485
15486         /* Chip-specific fixup from Broadcom driver */
15487         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
15488             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
15489                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
15490                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
15491         }
15492
15493         /* Default fast path register access methods */
15494         tp->read32 = tg3_read32;
15495         tp->write32 = tg3_write32;
15496         tp->read32_mbox = tg3_read32;
15497         tp->write32_mbox = tg3_write32;
15498         tp->write32_tx_mbox = tg3_write32;
15499         tp->write32_rx_mbox = tg3_write32;
15500
15501         /* Various workaround register access methods */
15502         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
15503                 tp->write32 = tg3_write_indirect_reg32;
15504         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
15505                  (tg3_flag(tp, PCI_EXPRESS) &&
15506                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
15507                 /*
15508                  * Back to back register writes can cause problems on these
15509                  * chips, the workaround is to read back all reg writes
15510                  * except those to mailbox regs.
15511                  *
15512                  * See tg3_write_indirect_reg32().
15513                  */
15514                 tp->write32 = tg3_write_flush_reg32;
15515         }
15516
15517         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
15518                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
15519                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
15520                         tp->write32_rx_mbox = tg3_write_flush_reg32;
15521         }
15522
15523         if (tg3_flag(tp, ICH_WORKAROUND)) {
15524                 tp->read32 = tg3_read_indirect_reg32;
15525                 tp->write32 = tg3_write_indirect_reg32;
15526                 tp->read32_mbox = tg3_read_indirect_mbox;
15527                 tp->write32_mbox = tg3_write_indirect_mbox;
15528                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
15529                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
15530
15531                 iounmap(tp->regs);
15532                 tp->regs = NULL;
15533
15534                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15535                 pci_cmd &= ~PCI_COMMAND_MEMORY;
15536                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15537         }
15538         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15539                 tp->read32_mbox = tg3_read32_mbox_5906;
15540                 tp->write32_mbox = tg3_write32_mbox_5906;
15541                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
15542                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
15543         }
15544
15545         if (tp->write32 == tg3_write_indirect_reg32 ||
15546             (tg3_flag(tp, PCIX_MODE) &&
15547              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15548               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
15549                 tg3_flag_set(tp, SRAM_USE_CONFIG);
15550
15551         /* The memory arbiter has to be enabled in order for SRAM accesses
15552          * to succeed.  Normally on powerup the tg3 chip firmware will make
15553          * sure it is enabled, but other entities such as system netboot
15554          * code might disable it.
15555          */
15556         val = tr32(MEMARB_MODE);
15557         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
15558
15559         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
15560         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15561             tg3_flag(tp, 5780_CLASS)) {
15562                 if (tg3_flag(tp, PCIX_MODE)) {
15563                         pci_read_config_dword(tp->pdev,
15564                                               tp->pcix_cap + PCI_X_STATUS,
15565                                               &val);
15566                         tp->pci_fn = val & 0x7;
15567                 }
15568         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15569                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
15570                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
15571                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
15572                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
15573                         val = tr32(TG3_CPMU_STATUS);
15574
15575                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
15576                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
15577                 else
15578                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
15579                                      TG3_CPMU_STATUS_FSHFT_5719;
15580         }
15581
15582         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
15583                 tp->write32_tx_mbox = tg3_write_flush_reg32;
15584                 tp->write32_rx_mbox = tg3_write_flush_reg32;
15585         }
15586
15587         /* Get eeprom hw config before calling tg3_set_power_state().
15588          * In particular, the TG3_FLAG_IS_NIC flag must be
15589          * determined before calling tg3_set_power_state() so that
15590          * we know whether or not to switch out of Vaux power.
15591          * When the flag is set, it means that GPIO1 is used for eeprom
15592          * write protect and also implies that it is a LOM where GPIOs
15593          * are not used to switch power.
15594          */
15595         tg3_get_eeprom_hw_cfg(tp);
15596
15597         if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
15598                 tg3_flag_clear(tp, TSO_CAPABLE);
15599                 tg3_flag_clear(tp, TSO_BUG);
15600                 tp->fw_needed = NULL;
15601         }
15602
15603         if (tg3_flag(tp, ENABLE_APE)) {
15604                 /* Allow reads and writes to the
15605                  * APE register and memory space.
15606                  */
15607                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
15608                                  PCISTATE_ALLOW_APE_SHMEM_WR |
15609                                  PCISTATE_ALLOW_APE_PSPACE_WR;
15610                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
15611                                        pci_state_reg);
15612
15613                 tg3_ape_lock_init(tp);
15614         }
15615
15616         /* Set up tp->grc_local_ctrl before calling
15617          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
15618          * will bring 5700's external PHY out of reset.
15619          * It is also used as eeprom write protect on LOMs.
15620          */
15621         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
15622         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15623             tg3_flag(tp, EEPROM_WRITE_PROT))
15624                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
15625                                        GRC_LCLCTRL_GPIO_OUTPUT1);
15626         /* Unused GPIO3 must be driven as output on 5752 because there
15627          * are no pull-up resistors on unused GPIO pins.
15628          */
15629         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
15630                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
15631
15632         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
15633             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
15634             tg3_flag(tp, 57765_CLASS))
15635                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15636
15637         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15638             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
15639                 /* Turn off the debug UART. */
15640                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15641                 if (tg3_flag(tp, IS_NIC))
15642                         /* Keep VMain power. */
15643                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
15644                                               GRC_LCLCTRL_GPIO_OUTPUT0;
15645         }
15646
15647         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
15648                 tp->grc_local_ctrl |=
15649                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
15650
15651         /* Switch out of Vaux if it is a NIC */
15652         tg3_pwrsrc_switch_to_vmain(tp);
15653
15654         /* Derive initial jumbo mode from MTU assigned in
15655          * ether_setup() via the alloc_etherdev() call
15656          */
15657         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
15658                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
15659
15660         /* Determine WakeOnLan speed to use. */
15661         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15662             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
15663             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
15664             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
15665                 tg3_flag_clear(tp, WOL_SPEED_100MB);
15666         } else {
15667                 tg3_flag_set(tp, WOL_SPEED_100MB);
15668         }
15669
15670         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15671                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
15672
15673         /* A few boards don't want Ethernet@WireSpeed phy feature */
15674         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15675             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15676              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
15677              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
15678             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15679             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15680                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
15681
15682         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
15683             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
15684                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
15685         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
15686                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
15687
15688         if (tg3_flag(tp, 5705_PLUS) &&
15689             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
15690             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
15691             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
15692             !tg3_flag(tp, 57765_PLUS)) {
15693                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
15694                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
15695                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
15696                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
15697                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15698                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
15699                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
15700                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
15701                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
15702                 } else
15703                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
15704         }
15705
15706         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15707             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
15708                 tp->phy_otp = tg3_read_otp_phycfg(tp);
15709                 if (tp->phy_otp == 0)
15710                         tp->phy_otp = TG3_OTP_DEFAULT;
15711         }
15712
15713         if (tg3_flag(tp, CPMU_PRESENT))
15714                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15715         else
15716                 tp->mi_mode = MAC_MI_MODE_BASE;
15717
15718         tp->coalesce_mode = 0;
15719         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
15720             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
15721                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15722
15723         /* Set these bits to enable statistics workaround. */
15724         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15725             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
15726             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
15727                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15728                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15729         }
15730
15731         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15732             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15733                 tg3_flag_set(tp, USE_PHYLIB);
15734
15735         err = tg3_mdio_init(tp);
15736         if (err)
15737                 return err;
15738
15739         /* Initialize data/descriptor byte/word swapping. */
15740         val = tr32(GRC_MODE);
15741         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
15742             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
15743                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15744                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
15745                         GRC_MODE_B2HRX_ENABLE |
15746                         GRC_MODE_HTX2B_ENABLE |
15747                         GRC_MODE_HOST_STACKUP);
15748         else
15749                 val &= GRC_MODE_HOST_STACKUP;
15750
15751         tw32(GRC_MODE, val | tp->grc_mode);
15752
15753         tg3_switch_clocks(tp);
15754
15755         /* Clear this out for sanity. */
15756         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15757
15758         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15759                               &pci_state_reg);
15760         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
15761             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
15762                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
15763
15764                 if (chiprevid == CHIPREV_ID_5701_A0 ||
15765                     chiprevid == CHIPREV_ID_5701_B0 ||
15766                     chiprevid == CHIPREV_ID_5701_B2 ||
15767                     chiprevid == CHIPREV_ID_5701_B5) {
15768                         void __iomem *sram_base;
15769
15770                         /* Write some dummy words into the SRAM status block
15771                          * area, see if it reads back correctly.  If the return
15772                          * value is bad, force enable the PCIX workaround.
15773                          */
15774                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15775
15776                         writel(0x00000000, sram_base);
15777                         writel(0x00000000, sram_base + 4);
15778                         writel(0xffffffff, sram_base + 4);
15779                         if (readl(sram_base) != 0x00000000)
15780                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15781                 }
15782         }
15783
15784         udelay(50);
15785         tg3_nvram_init(tp);
15786
15787         grc_misc_cfg = tr32(GRC_MISC_CFG);
15788         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15789
15790         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15791             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
15792              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
15793                 tg3_flag_set(tp, IS_5788);
15794
15795         if (!tg3_flag(tp, IS_5788) &&
15796             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
15797                 tg3_flag_set(tp, TAGGED_STATUS);
15798         if (tg3_flag(tp, TAGGED_STATUS)) {
15799                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
15800                                       HOSTCC_MODE_CLRTICK_TXBD);
15801
15802                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
15803                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15804                                        tp->misc_host_ctrl);
15805         }
15806
15807         /* Preserve the APE MAC_MODE bits */
15808         if (tg3_flag(tp, ENABLE_APE))
15809                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
15810         else
15811                 tp->mac_mode = 0;
15812
15813         if (tg3_10_100_only_device(tp, ent))
15814                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
15815
15816         err = tg3_phy_probe(tp);
15817         if (err) {
15818                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
15819                 /* ... but do not return immediately ... */
15820                 tg3_mdio_fini(tp);
15821         }
15822
15823         tg3_read_vpd(tp);
15824         tg3_read_fw_ver(tp);
15825
15826         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
15827                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15828         } else {
15829                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15830                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15831                 else
15832                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15833         }
15834
15835         /* 5700 {AX,BX} chips have a broken status block link
15836          * change bit implementation, so we must use the
15837          * status register in those cases.
15838          */
15839         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15840                 tg3_flag_set(tp, USE_LINKCHG_REG);
15841         else
15842                 tg3_flag_clear(tp, USE_LINKCHG_REG);
15843
15844         /* The led_ctrl is set during tg3_phy_probe, here we might
15845          * have to force the link status polling mechanism based
15846          * upon subsystem IDs.
15847          */
15848         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
15849             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15850             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
15851                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15852                 tg3_flag_set(tp, USE_LINKCHG_REG);
15853         }
15854
15855         /* For all SERDES we poll the MAC status register. */
15856         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
15857                 tg3_flag_set(tp, POLL_SERDES);
15858         else
15859                 tg3_flag_clear(tp, POLL_SERDES);
15860
15861         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
15862         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
15863         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15864             tg3_flag(tp, PCIX_MODE)) {
15865                 tp->rx_offset = NET_SKB_PAD;
15866 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
15867                 tp->rx_copy_thresh = ~(u16)0;
15868 #endif
15869         }
15870
15871         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
15872         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
15873         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
15874
15875         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
15876
15877         /* Increment the rx prod index on the rx std ring by at most
15878          * 8 for these chips to workaround hw errata.
15879          */
15880         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
15881             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
15882             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
15883                 tp->rx_std_max_post = 8;
15884
15885         if (tg3_flag(tp, ASPM_WORKAROUND))
15886                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
15887                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
15888
15889         return err;
15890 }
15891
15892 #ifdef CONFIG_SPARC
15893 static int tg3_get_macaddr_sparc(struct tg3 *tp)
15894 {
15895         struct net_device *dev = tp->dev;
15896         struct pci_dev *pdev = tp->pdev;
15897         struct device_node *dp = pci_device_to_OF_node(pdev);
15898         const unsigned char *addr;
15899         int len;
15900
15901         addr = of_get_property(dp, "local-mac-address", &len);
15902         if (addr && len == 6) {
15903                 memcpy(dev->dev_addr, addr, 6);
15904                 return 0;
15905         }
15906         return -ENODEV;
15907 }
15908
15909 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
15910 {
15911         struct net_device *dev = tp->dev;
15912
15913         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
15914         return 0;
15915 }
15916 #endif
15917
15918 static int tg3_get_device_address(struct tg3 *tp)
15919 {
15920         struct net_device *dev = tp->dev;
15921         u32 hi, lo, mac_offset;
15922         int addr_ok = 0;
15923         int err;
15924
15925 #ifdef CONFIG_SPARC
15926         if (!tg3_get_macaddr_sparc(tp))
15927                 return 0;
15928 #endif
15929
15930         if (tg3_flag(tp, IS_SSB_CORE)) {
15931                 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
15932                 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
15933                         return 0;
15934         }
15935
15936         mac_offset = 0x7c;
15937         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15938             tg3_flag(tp, 5780_CLASS)) {
15939                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
15940                         mac_offset = 0xcc;
15941                 if (tg3_nvram_lock(tp))
15942                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
15943                 else
15944                         tg3_nvram_unlock(tp);
15945         } else if (tg3_flag(tp, 5717_PLUS)) {
15946                 if (tp->pci_fn & 1)
15947                         mac_offset = 0xcc;
15948                 if (tp->pci_fn > 1)
15949                         mac_offset += 0x18c;
15950         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15951                 mac_offset = 0x10;
15952
15953         /* First try to get it from MAC address mailbox. */
15954         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
15955         if ((hi >> 16) == 0x484b) {
15956                 dev->dev_addr[0] = (hi >>  8) & 0xff;
15957                 dev->dev_addr[1] = (hi >>  0) & 0xff;
15958
15959                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
15960                 dev->dev_addr[2] = (lo >> 24) & 0xff;
15961                 dev->dev_addr[3] = (lo >> 16) & 0xff;
15962                 dev->dev_addr[4] = (lo >>  8) & 0xff;
15963                 dev->dev_addr[5] = (lo >>  0) & 0xff;
15964
15965                 /* Some old bootcode may report a 0 MAC address in SRAM */
15966                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
15967         }
15968         if (!addr_ok) {
15969                 /* Next, try NVRAM. */
15970                 if (!tg3_flag(tp, NO_NVRAM) &&
15971                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
15972                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
15973                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
15974                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
15975                 }
15976                 /* Finally just fetch it out of the MAC control regs. */
15977                 else {
15978                         hi = tr32(MAC_ADDR_0_HIGH);
15979                         lo = tr32(MAC_ADDR_0_LOW);
15980
15981                         dev->dev_addr[5] = lo & 0xff;
15982                         dev->dev_addr[4] = (lo >> 8) & 0xff;
15983                         dev->dev_addr[3] = (lo >> 16) & 0xff;
15984                         dev->dev_addr[2] = (lo >> 24) & 0xff;
15985                         dev->dev_addr[1] = hi & 0xff;
15986                         dev->dev_addr[0] = (hi >> 8) & 0xff;
15987                 }
15988         }
15989
15990         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
15991 #ifdef CONFIG_SPARC
15992                 if (!tg3_get_default_macaddr_sparc(tp))
15993                         return 0;
15994 #endif
15995                 return -EINVAL;
15996         }
15997         return 0;
15998 }
15999
16000 #define BOUNDARY_SINGLE_CACHELINE       1
16001 #define BOUNDARY_MULTI_CACHELINE        2
16002
16003 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16004 {
16005         int cacheline_size;
16006         u8 byte;
16007         int goal;
16008
16009         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16010         if (byte == 0)
16011                 cacheline_size = 1024;
16012         else
16013                 cacheline_size = (int) byte * 4;
16014
16015         /* On 5703 and later chips, the boundary bits have no
16016          * effect.
16017          */
16018         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
16019             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
16020             !tg3_flag(tp, PCI_EXPRESS))
16021                 goto out;
16022
16023 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16024         goal = BOUNDARY_MULTI_CACHELINE;
16025 #else
16026 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16027         goal = BOUNDARY_SINGLE_CACHELINE;
16028 #else
16029         goal = 0;
16030 #endif
16031 #endif
16032
16033         if (tg3_flag(tp, 57765_PLUS)) {
16034                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16035                 goto out;
16036         }
16037
16038         if (!goal)
16039                 goto out;
16040
16041         /* PCI controllers on most RISC systems tend to disconnect
16042          * when a device tries to burst across a cache-line boundary.
16043          * Therefore, letting tg3 do so just wastes PCI bandwidth.
16044          *
16045          * Unfortunately, for PCI-E there are only limited
16046          * write-side controls for this, and thus for reads
16047          * we will still get the disconnects.  We'll also waste
16048          * these PCI cycles for both read and write for chips
16049          * other than 5700 and 5701 which do not implement the
16050          * boundary bits.
16051          */
16052         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16053                 switch (cacheline_size) {
16054                 case 16:
16055                 case 32:
16056                 case 64:
16057                 case 128:
16058                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16059                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16060                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16061                         } else {
16062                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16063                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16064                         }
16065                         break;
16066
16067                 case 256:
16068                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16069                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16070                         break;
16071
16072                 default:
16073                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16074                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16075                         break;
16076                 }
16077         } else if (tg3_flag(tp, PCI_EXPRESS)) {
16078                 switch (cacheline_size) {
16079                 case 16:
16080                 case 32:
16081                 case 64:
16082                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16083                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16084                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16085                                 break;
16086                         }
16087                         /* fallthrough */
16088                 case 128:
16089                 default:
16090                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16091                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16092                         break;
16093                 }
16094         } else {
16095                 switch (cacheline_size) {
16096                 case 16:
16097                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16098                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16099                                         DMA_RWCTRL_WRITE_BNDRY_16);
16100                                 break;
16101                         }
16102                         /* fallthrough */
16103                 case 32:
16104                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16105                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16106                                         DMA_RWCTRL_WRITE_BNDRY_32);
16107                                 break;
16108                         }
16109                         /* fallthrough */
16110                 case 64:
16111                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16112                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16113                                         DMA_RWCTRL_WRITE_BNDRY_64);
16114                                 break;
16115                         }
16116                         /* fallthrough */
16117                 case 128:
16118                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16119                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16120                                         DMA_RWCTRL_WRITE_BNDRY_128);
16121                                 break;
16122                         }
16123                         /* fallthrough */
16124                 case 256:
16125                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
16126                                 DMA_RWCTRL_WRITE_BNDRY_256);
16127                         break;
16128                 case 512:
16129                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
16130                                 DMA_RWCTRL_WRITE_BNDRY_512);
16131                         break;
16132                 case 1024:
16133                 default:
16134                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16135                                 DMA_RWCTRL_WRITE_BNDRY_1024);
16136                         break;
16137                 }
16138         }
16139
16140 out:
16141         return val;
16142 }
16143
16144 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16145                            int size, int to_device)
16146 {
16147         struct tg3_internal_buffer_desc test_desc;
16148         u32 sram_dma_descs;
16149         int i, ret;
16150
16151         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16152
16153         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16154         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16155         tw32(RDMAC_STATUS, 0);
16156         tw32(WDMAC_STATUS, 0);
16157
16158         tw32(BUFMGR_MODE, 0);
16159         tw32(FTQ_RESET, 0);
16160
16161         test_desc.addr_hi = ((u64) buf_dma) >> 32;
16162         test_desc.addr_lo = buf_dma & 0xffffffff;
16163         test_desc.nic_mbuf = 0x00002100;
16164         test_desc.len = size;
16165
16166         /*
16167          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16168          * the *second* time the tg3 driver was getting loaded after an
16169          * initial scan.
16170          *
16171          * Broadcom tells me:
16172          *   ...the DMA engine is connected to the GRC block and a DMA
16173          *   reset may affect the GRC block in some unpredictable way...
16174          *   The behavior of resets to individual blocks has not been tested.
16175          *
16176          * Broadcom noted the GRC reset will also reset all sub-components.
16177          */
16178         if (to_device) {
16179                 test_desc.cqid_sqid = (13 << 8) | 2;
16180
16181                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16182                 udelay(40);
16183         } else {
16184                 test_desc.cqid_sqid = (16 << 8) | 7;
16185
16186                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16187                 udelay(40);
16188         }
16189         test_desc.flags = 0x00000005;
16190
16191         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16192                 u32 val;
16193
16194                 val = *(((u32 *)&test_desc) + i);
16195                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16196                                        sram_dma_descs + (i * sizeof(u32)));
16197                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16198         }
16199         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16200
16201         if (to_device)
16202                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16203         else
16204                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16205
16206         ret = -ENODEV;
16207         for (i = 0; i < 40; i++) {
16208                 u32 val;
16209
16210                 if (to_device)
16211                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16212                 else
16213                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16214                 if ((val & 0xffff) == sram_dma_descs) {
16215                         ret = 0;
16216                         break;
16217                 }
16218
16219                 udelay(100);
16220         }
16221
16222         return ret;
16223 }
16224
16225 #define TEST_BUFFER_SIZE        0x2000
16226
16227 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16228         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16229         { },
16230 };
16231
16232 static int tg3_test_dma(struct tg3 *tp)
16233 {
16234         dma_addr_t buf_dma;
16235         u32 *buf, saved_dma_rwctrl;
16236         int ret = 0;
16237
16238         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16239                                  &buf_dma, GFP_KERNEL);
16240         if (!buf) {
16241                 ret = -ENOMEM;
16242                 goto out_nofree;
16243         }
16244
16245         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16246                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16247
16248         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16249
16250         if (tg3_flag(tp, 57765_PLUS))
16251                 goto out;
16252
16253         if (tg3_flag(tp, PCI_EXPRESS)) {
16254                 /* DMA read watermark not used on PCIE */
16255                 tp->dma_rwctrl |= 0x00180000;
16256         } else if (!tg3_flag(tp, PCIX_MODE)) {
16257                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
16258                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
16259                         tp->dma_rwctrl |= 0x003f0000;
16260                 else
16261                         tp->dma_rwctrl |= 0x003f000f;
16262         } else {
16263                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
16264                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
16265                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16266                         u32 read_water = 0x7;
16267
16268                         /* If the 5704 is behind the EPB bridge, we can
16269                          * do the less restrictive ONE_DMA workaround for
16270                          * better performance.
16271                          */
16272                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16273                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
16274                                 tp->dma_rwctrl |= 0x8000;
16275                         else if (ccval == 0x6 || ccval == 0x7)
16276                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16277
16278                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
16279                                 read_water = 4;
16280                         /* Set bit 23 to enable PCIX hw bug fix */
16281                         tp->dma_rwctrl |=
16282                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16283                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16284                                 (1 << 23);
16285                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
16286                         /* 5780 always in PCIX mode */
16287                         tp->dma_rwctrl |= 0x00144000;
16288                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
16289                         /* 5714 always in PCIX mode */
16290                         tp->dma_rwctrl |= 0x00148000;
16291                 } else {
16292                         tp->dma_rwctrl |= 0x001b000f;
16293                 }
16294         }
16295         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16296                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16297
16298         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
16299             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
16300                 tp->dma_rwctrl &= 0xfffffff0;
16301
16302         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
16303             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
16304                 /* Remove this if it causes problems for some boards. */
16305                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16306
16307                 /* On 5700/5701 chips, we need to set this bit.
16308                  * Otherwise the chip will issue cacheline transactions
16309                  * to streamable DMA memory with not all the byte
16310                  * enables turned on.  This is an error on several
16311                  * RISC PCI controllers, in particular sparc64.
16312                  *
16313                  * On 5703/5704 chips, this bit has been reassigned
16314                  * a different meaning.  In particular, it is used
16315                  * on those chips to enable a PCI-X workaround.
16316                  */
16317                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16318         }
16319
16320         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16321
16322 #if 0
16323         /* Unneeded, already done by tg3_get_invariants.  */
16324         tg3_switch_clocks(tp);
16325 #endif
16326
16327         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
16328             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
16329                 goto out;
16330
16331         /* It is best to perform DMA test with maximum write burst size
16332          * to expose the 5700/5701 write DMA bug.
16333          */
16334         saved_dma_rwctrl = tp->dma_rwctrl;
16335         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16336         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16337
16338         while (1) {
16339                 u32 *p = buf, i;
16340
16341                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16342                         p[i] = i;
16343
16344                 /* Send the buffer to the chip. */
16345                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
16346                 if (ret) {
16347                         dev_err(&tp->pdev->dev,
16348                                 "%s: Buffer write failed. err = %d\n",
16349                                 __func__, ret);
16350                         break;
16351                 }
16352
16353 #if 0
16354                 /* validate data reached card RAM correctly. */
16355                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16356                         u32 val;
16357                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
16358                         if (le32_to_cpu(val) != p[i]) {
16359                                 dev_err(&tp->pdev->dev,
16360                                         "%s: Buffer corrupted on device! "
16361                                         "(%d != %d)\n", __func__, val, i);
16362                                 /* ret = -ENODEV here? */
16363                         }
16364                         p[i] = 0;
16365                 }
16366 #endif
16367                 /* Now read it back. */
16368                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
16369                 if (ret) {
16370                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16371                                 "err = %d\n", __func__, ret);
16372                         break;
16373                 }
16374
16375                 /* Verify it. */
16376                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16377                         if (p[i] == i)
16378                                 continue;
16379
16380                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16381                             DMA_RWCTRL_WRITE_BNDRY_16) {
16382                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16383                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16384                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16385                                 break;
16386                         } else {
16387                                 dev_err(&tp->pdev->dev,
16388                                         "%s: Buffer corrupted on read back! "
16389                                         "(%d != %d)\n", __func__, p[i], i);
16390                                 ret = -ENODEV;
16391                                 goto out;
16392                         }
16393                 }
16394
16395                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16396                         /* Success. */
16397                         ret = 0;
16398                         break;
16399                 }
16400         }
16401         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16402             DMA_RWCTRL_WRITE_BNDRY_16) {
16403                 /* DMA test passed without adjusting DMA boundary,
16404                  * now look for chipsets that are known to expose the
16405                  * DMA bug without failing the test.
16406                  */
16407                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16408                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16409                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16410                 } else {
16411                         /* Safe to use the calculated DMA boundary. */
16412                         tp->dma_rwctrl = saved_dma_rwctrl;
16413                 }
16414
16415                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16416         }
16417
16418 out:
16419         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16420 out_nofree:
16421         return ret;
16422 }
16423
16424 static void tg3_init_bufmgr_config(struct tg3 *tp)
16425 {
16426         if (tg3_flag(tp, 57765_PLUS)) {
16427                 tp->bufmgr_config.mbuf_read_dma_low_water =
16428                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16429                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16430                         DEFAULT_MB_MACRX_LOW_WATER_57765;
16431                 tp->bufmgr_config.mbuf_high_water =
16432                         DEFAULT_MB_HIGH_WATER_57765;
16433
16434                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16435                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16436                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16437                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16438                 tp->bufmgr_config.mbuf_high_water_jumbo =
16439                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16440         } else if (tg3_flag(tp, 5705_PLUS)) {
16441                 tp->bufmgr_config.mbuf_read_dma_low_water =
16442                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16443                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16444                         DEFAULT_MB_MACRX_LOW_WATER_5705;
16445                 tp->bufmgr_config.mbuf_high_water =
16446                         DEFAULT_MB_HIGH_WATER_5705;
16447                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
16448                         tp->bufmgr_config.mbuf_mac_rx_low_water =
16449                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
16450                         tp->bufmgr_config.mbuf_high_water =
16451                                 DEFAULT_MB_HIGH_WATER_5906;
16452                 }
16453
16454                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16455                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
16456                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16457                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
16458                 tp->bufmgr_config.mbuf_high_water_jumbo =
16459                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
16460         } else {
16461                 tp->bufmgr_config.mbuf_read_dma_low_water =
16462                         DEFAULT_MB_RDMA_LOW_WATER;
16463                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16464                         DEFAULT_MB_MACRX_LOW_WATER;
16465                 tp->bufmgr_config.mbuf_high_water =
16466                         DEFAULT_MB_HIGH_WATER;
16467
16468                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16469                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
16470                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16471                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
16472                 tp->bufmgr_config.mbuf_high_water_jumbo =
16473                         DEFAULT_MB_HIGH_WATER_JUMBO;
16474         }
16475
16476         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
16477         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
16478 }
16479
16480 static char *tg3_phy_string(struct tg3 *tp)
16481 {
16482         switch (tp->phy_id & TG3_PHY_ID_MASK) {
16483         case TG3_PHY_ID_BCM5400:        return "5400";
16484         case TG3_PHY_ID_BCM5401:        return "5401";
16485         case TG3_PHY_ID_BCM5411:        return "5411";
16486         case TG3_PHY_ID_BCM5701:        return "5701";
16487         case TG3_PHY_ID_BCM5703:        return "5703";
16488         case TG3_PHY_ID_BCM5704:        return "5704";
16489         case TG3_PHY_ID_BCM5705:        return "5705";
16490         case TG3_PHY_ID_BCM5750:        return "5750";
16491         case TG3_PHY_ID_BCM5752:        return "5752";
16492         case TG3_PHY_ID_BCM5714:        return "5714";
16493         case TG3_PHY_ID_BCM5780:        return "5780";
16494         case TG3_PHY_ID_BCM5755:        return "5755";
16495         case TG3_PHY_ID_BCM5787:        return "5787";
16496         case TG3_PHY_ID_BCM5784:        return "5784";
16497         case TG3_PHY_ID_BCM5756:        return "5722/5756";
16498         case TG3_PHY_ID_BCM5906:        return "5906";
16499         case TG3_PHY_ID_BCM5761:        return "5761";
16500         case TG3_PHY_ID_BCM5718C:       return "5718C";
16501         case TG3_PHY_ID_BCM5718S:       return "5718S";
16502         case TG3_PHY_ID_BCM57765:       return "57765";
16503         case TG3_PHY_ID_BCM5719C:       return "5719C";
16504         case TG3_PHY_ID_BCM5720C:       return "5720C";
16505         case TG3_PHY_ID_BCM5762:        return "5762C";
16506         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
16507         case 0:                 return "serdes";
16508         default:                return "unknown";
16509         }
16510 }
16511
16512 static char *tg3_bus_string(struct tg3 *tp, char *str)
16513 {
16514         if (tg3_flag(tp, PCI_EXPRESS)) {
16515                 strcpy(str, "PCI Express");
16516                 return str;
16517         } else if (tg3_flag(tp, PCIX_MODE)) {
16518                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
16519
16520                 strcpy(str, "PCIX:");
16521
16522                 if ((clock_ctrl == 7) ||
16523                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
16524                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
16525                         strcat(str, "133MHz");
16526                 else if (clock_ctrl == 0)
16527                         strcat(str, "33MHz");
16528                 else if (clock_ctrl == 2)
16529                         strcat(str, "50MHz");
16530                 else if (clock_ctrl == 4)
16531                         strcat(str, "66MHz");
16532                 else if (clock_ctrl == 6)
16533                         strcat(str, "100MHz");
16534         } else {
16535                 strcpy(str, "PCI:");
16536                 if (tg3_flag(tp, PCI_HIGH_SPEED))
16537                         strcat(str, "66MHz");
16538                 else
16539                         strcat(str, "33MHz");
16540         }
16541         if (tg3_flag(tp, PCI_32BIT))
16542                 strcat(str, ":32-bit");
16543         else
16544                 strcat(str, ":64-bit");
16545         return str;
16546 }
16547
16548 static void tg3_init_coal(struct tg3 *tp)
16549 {
16550         struct ethtool_coalesce *ec = &tp->coal;
16551
16552         memset(ec, 0, sizeof(*ec));
16553         ec->cmd = ETHTOOL_GCOALESCE;
16554         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
16555         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
16556         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
16557         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
16558         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
16559         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
16560         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
16561         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
16562         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
16563
16564         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
16565                                  HOSTCC_MODE_CLRTICK_TXBD)) {
16566                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
16567                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
16568                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
16569                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
16570         }
16571
16572         if (tg3_flag(tp, 5705_PLUS)) {
16573                 ec->rx_coalesce_usecs_irq = 0;
16574                 ec->tx_coalesce_usecs_irq = 0;
16575                 ec->stats_block_coalesce_usecs = 0;
16576         }
16577 }
16578
16579 static int tg3_init_one(struct pci_dev *pdev,
16580                                   const struct pci_device_id *ent)
16581 {
16582         struct net_device *dev;
16583         struct tg3 *tp;
16584         int i, err, pm_cap;
16585         u32 sndmbx, rcvmbx, intmbx;
16586         char str[40];
16587         u64 dma_mask, persist_dma_mask;
16588         netdev_features_t features = 0;
16589
16590         printk_once(KERN_INFO "%s\n", version);
16591
16592         err = pci_enable_device(pdev);
16593         if (err) {
16594                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
16595                 return err;
16596         }
16597
16598         err = pci_request_regions(pdev, DRV_MODULE_NAME);
16599         if (err) {
16600                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
16601                 goto err_out_disable_pdev;
16602         }
16603
16604         pci_set_master(pdev);
16605
16606         /* Find power-management capability. */
16607         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
16608         if (pm_cap == 0) {
16609                 dev_err(&pdev->dev,
16610                         "Cannot find Power Management capability, aborting\n");
16611                 err = -EIO;
16612                 goto err_out_free_res;
16613         }
16614
16615         err = pci_set_power_state(pdev, PCI_D0);
16616         if (err) {
16617                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
16618                 goto err_out_free_res;
16619         }
16620
16621         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
16622         if (!dev) {
16623                 err = -ENOMEM;
16624                 goto err_out_power_down;
16625         }
16626
16627         SET_NETDEV_DEV(dev, &pdev->dev);
16628
16629         tp = netdev_priv(dev);
16630         tp->pdev = pdev;
16631         tp->dev = dev;
16632         tp->pm_cap = pm_cap;
16633         tp->rx_mode = TG3_DEF_RX_MODE;
16634         tp->tx_mode = TG3_DEF_TX_MODE;
16635         tp->irq_sync = 1;
16636
16637         if (tg3_debug > 0)
16638                 tp->msg_enable = tg3_debug;
16639         else
16640                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
16641
16642         if (pdev_is_ssb_gige_core(pdev)) {
16643                 tg3_flag_set(tp, IS_SSB_CORE);
16644                 if (ssb_gige_must_flush_posted_writes(pdev))
16645                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
16646                 if (ssb_gige_one_dma_at_once(pdev))
16647                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
16648                 if (ssb_gige_have_roboswitch(pdev))
16649                         tg3_flag_set(tp, ROBOSWITCH);
16650                 if (ssb_gige_is_rgmii(pdev))
16651                         tg3_flag_set(tp, RGMII_MODE);
16652         }
16653
16654         /* The word/byte swap controls here control register access byte
16655          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
16656          * setting below.
16657          */
16658         tp->misc_host_ctrl =
16659                 MISC_HOST_CTRL_MASK_PCI_INT |
16660                 MISC_HOST_CTRL_WORD_SWAP |
16661                 MISC_HOST_CTRL_INDIR_ACCESS |
16662                 MISC_HOST_CTRL_PCISTATE_RW;
16663
16664         /* The NONFRM (non-frame) byte/word swap controls take effect
16665          * on descriptor entries, anything which isn't packet data.
16666          *
16667          * The StrongARM chips on the board (one for tx, one for rx)
16668          * are running in big-endian mode.
16669          */
16670         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
16671                         GRC_MODE_WSWAP_NONFRM_DATA);
16672 #ifdef __BIG_ENDIAN
16673         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
16674 #endif
16675         spin_lock_init(&tp->lock);
16676         spin_lock_init(&tp->indirect_lock);
16677         INIT_WORK(&tp->reset_task, tg3_reset_task);
16678
16679         tp->regs = pci_ioremap_bar(pdev, BAR_0);
16680         if (!tp->regs) {
16681                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
16682                 err = -ENOMEM;
16683                 goto err_out_free_dev;
16684         }
16685
16686         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16687             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16688             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16689             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16690             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16691             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16692             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16693             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16694             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16695             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16696             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16697             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
16698                 tg3_flag_set(tp, ENABLE_APE);
16699                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16700                 if (!tp->aperegs) {
16701                         dev_err(&pdev->dev,
16702                                 "Cannot map APE registers, aborting\n");
16703                         err = -ENOMEM;
16704                         goto err_out_iounmap;
16705                 }
16706         }
16707
16708         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16709         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
16710
16711         dev->ethtool_ops = &tg3_ethtool_ops;
16712         dev->watchdog_timeo = TG3_TX_TIMEOUT;
16713         dev->netdev_ops = &tg3_netdev_ops;
16714         dev->irq = pdev->irq;
16715
16716         err = tg3_get_invariants(tp, ent);
16717         if (err) {
16718                 dev_err(&pdev->dev,
16719                         "Problem fetching invariants of chip, aborting\n");
16720                 goto err_out_apeunmap;
16721         }
16722
16723         /* The EPB bridge inside 5714, 5715, and 5780 and any
16724          * device behind the EPB cannot support DMA addresses > 40-bit.
16725          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16726          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16727          * do DMA address check in tg3_start_xmit().
16728          */
16729         if (tg3_flag(tp, IS_5788))
16730                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
16731         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
16732                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
16733 #ifdef CONFIG_HIGHMEM
16734                 dma_mask = DMA_BIT_MASK(64);
16735 #endif
16736         } else
16737                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
16738
16739         /* Configure DMA attributes. */
16740         if (dma_mask > DMA_BIT_MASK(32)) {
16741                 err = pci_set_dma_mask(pdev, dma_mask);
16742                 if (!err) {
16743                         features |= NETIF_F_HIGHDMA;
16744                         err = pci_set_consistent_dma_mask(pdev,
16745                                                           persist_dma_mask);
16746                         if (err < 0) {
16747                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
16748                                         "DMA for consistent allocations\n");
16749                                 goto err_out_apeunmap;
16750                         }
16751                 }
16752         }
16753         if (err || dma_mask == DMA_BIT_MASK(32)) {
16754                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
16755                 if (err) {
16756                         dev_err(&pdev->dev,
16757                                 "No usable DMA configuration, aborting\n");
16758                         goto err_out_apeunmap;
16759                 }
16760         }
16761
16762         tg3_init_bufmgr_config(tp);
16763
16764         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16765
16766         /* 5700 B0 chips do not support checksumming correctly due
16767          * to hardware bugs.
16768          */
16769         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
16770                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16771
16772                 if (tg3_flag(tp, 5755_PLUS))
16773                         features |= NETIF_F_IPV6_CSUM;
16774         }
16775
16776         /* TSO is on by default on chips that support hardware TSO.
16777          * Firmware TSO on older chips gives lower performance, so it
16778          * is off by default, but can be enabled using ethtool.
16779          */
16780         if ((tg3_flag(tp, HW_TSO_1) ||
16781              tg3_flag(tp, HW_TSO_2) ||
16782              tg3_flag(tp, HW_TSO_3)) &&
16783             (features & NETIF_F_IP_CSUM))
16784                 features |= NETIF_F_TSO;
16785         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
16786                 if (features & NETIF_F_IPV6_CSUM)
16787                         features |= NETIF_F_TSO6;
16788                 if (tg3_flag(tp, HW_TSO_3) ||
16789                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
16790                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
16791                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
16792                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
16793                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
16794                         features |= NETIF_F_TSO_ECN;
16795         }
16796
16797         dev->features |= features;
16798         dev->vlan_features |= features;
16799
16800         /*
16801          * Add loopback capability only for a subset of devices that support
16802          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16803          * loopback for the remaining devices.
16804          */
16805         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
16806             !tg3_flag(tp, CPMU_PRESENT))
16807                 /* Add the loopback capability */
16808                 features |= NETIF_F_LOOPBACK;
16809
16810         dev->hw_features |= features;
16811
16812         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
16813             !tg3_flag(tp, TSO_CAPABLE) &&
16814             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
16815                 tg3_flag_set(tp, MAX_RXPEND_64);
16816                 tp->rx_pending = 63;
16817         }
16818
16819         err = tg3_get_device_address(tp);
16820         if (err) {
16821                 dev_err(&pdev->dev,
16822                         "Could not obtain valid ethernet address, aborting\n");
16823                 goto err_out_apeunmap;
16824         }
16825
16826         /*
16827          * Reset chip in case UNDI or EFI driver did not shutdown
16828          * DMA self test will enable WDMAC and we'll see (spurious)
16829          * pending DMA on the PCI bus at that point.
16830          */
16831         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
16832             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
16833                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
16834                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16835         }
16836
16837         err = tg3_test_dma(tp);
16838         if (err) {
16839                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
16840                 goto err_out_apeunmap;
16841         }
16842
16843         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
16844         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
16845         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
16846         for (i = 0; i < tp->irq_max; i++) {
16847                 struct tg3_napi *tnapi = &tp->napi[i];
16848
16849                 tnapi->tp = tp;
16850                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
16851
16852                 tnapi->int_mbox = intmbx;
16853                 if (i <= 4)
16854                         intmbx += 0x8;
16855                 else
16856                         intmbx += 0x4;
16857
16858                 tnapi->consmbox = rcvmbx;
16859                 tnapi->prodmbox = sndmbx;
16860
16861                 if (i)
16862                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
16863                 else
16864                         tnapi->coal_now = HOSTCC_MODE_NOW;
16865
16866                 if (!tg3_flag(tp, SUPPORT_MSIX))
16867                         break;
16868
16869                 /*
16870                  * If we support MSIX, we'll be using RSS.  If we're using
16871                  * RSS, the first vector only handles link interrupts and the
16872                  * remaining vectors handle rx and tx interrupts.  Reuse the
16873                  * mailbox values for the next iteration.  The values we setup
16874                  * above are still useful for the single vectored mode.
16875                  */
16876                 if (!i)
16877                         continue;
16878
16879                 rcvmbx += 0x8;
16880
16881                 if (sndmbx & 0x4)
16882                         sndmbx -= 0x4;
16883                 else
16884                         sndmbx += 0xc;
16885         }
16886
16887         tg3_init_coal(tp);
16888
16889         pci_set_drvdata(pdev, dev);
16890
16891         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
16892             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
16893             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
16894                 tg3_flag_set(tp, PTP_CAPABLE);
16895
16896         if (tg3_flag(tp, 5717_PLUS)) {
16897                 /* Resume a low-power mode */
16898                 tg3_frob_aux_power(tp, false);
16899         }
16900
16901         tg3_timer_init(tp);
16902
16903         err = register_netdev(dev);
16904         if (err) {
16905                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
16906                 goto err_out_apeunmap;
16907         }
16908
16909         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16910                     tp->board_part_number,
16911                     tp->pci_chip_rev_id,
16912                     tg3_bus_string(tp, str),
16913                     dev->dev_addr);
16914
16915         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
16916                 struct phy_device *phydev;
16917                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
16918                 netdev_info(dev,
16919                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
16920                             phydev->drv->name, dev_name(&phydev->dev));
16921         } else {
16922                 char *ethtype;
16923
16924                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
16925                         ethtype = "10/100Base-TX";
16926                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
16927                         ethtype = "1000Base-SX";
16928                 else
16929                         ethtype = "10/100/1000Base-T";
16930
16931                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
16932                             "(WireSpeed[%d], EEE[%d])\n",
16933                             tg3_phy_string(tp), ethtype,
16934                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
16935                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
16936         }
16937
16938         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
16939                     (dev->features & NETIF_F_RXCSUM) != 0,
16940                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
16941                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
16942                     tg3_flag(tp, ENABLE_ASF) != 0,
16943                     tg3_flag(tp, TSO_CAPABLE) != 0);
16944         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16945                     tp->dma_rwctrl,
16946                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
16947                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
16948
16949         pci_save_state(pdev);
16950
16951         return 0;
16952
16953 err_out_apeunmap:
16954         if (tp->aperegs) {
16955                 iounmap(tp->aperegs);
16956                 tp->aperegs = NULL;
16957         }
16958
16959 err_out_iounmap:
16960         if (tp->regs) {
16961                 iounmap(tp->regs);
16962                 tp->regs = NULL;
16963         }
16964
16965 err_out_free_dev:
16966         free_netdev(dev);
16967
16968 err_out_power_down:
16969         pci_set_power_state(pdev, PCI_D3hot);
16970
16971 err_out_free_res:
16972         pci_release_regions(pdev);
16973
16974 err_out_disable_pdev:
16975         pci_disable_device(pdev);
16976         pci_set_drvdata(pdev, NULL);
16977         return err;
16978 }
16979
16980 static void tg3_remove_one(struct pci_dev *pdev)
16981 {
16982         struct net_device *dev = pci_get_drvdata(pdev);
16983
16984         if (dev) {
16985                 struct tg3 *tp = netdev_priv(dev);
16986
16987                 release_firmware(tp->fw);
16988
16989                 tg3_reset_task_cancel(tp);
16990
16991                 if (tg3_flag(tp, USE_PHYLIB)) {
16992                         tg3_phy_fini(tp);
16993                         tg3_mdio_fini(tp);
16994                 }
16995
16996                 unregister_netdev(dev);
16997                 if (tp->aperegs) {
16998                         iounmap(tp->aperegs);
16999                         tp->aperegs = NULL;
17000                 }
17001                 if (tp->regs) {
17002                         iounmap(tp->regs);
17003                         tp->regs = NULL;
17004                 }
17005                 free_netdev(dev);
17006                 pci_release_regions(pdev);
17007                 pci_disable_device(pdev);
17008                 pci_set_drvdata(pdev, NULL);
17009         }
17010 }
17011
17012 #ifdef CONFIG_PM_SLEEP
17013 static int tg3_suspend(struct device *device)
17014 {
17015         struct pci_dev *pdev = to_pci_dev(device);
17016         struct net_device *dev = pci_get_drvdata(pdev);
17017         struct tg3 *tp = netdev_priv(dev);
17018         int err;
17019
17020         if (!netif_running(dev))
17021                 return 0;
17022
17023         tg3_reset_task_cancel(tp);
17024         tg3_phy_stop(tp);
17025         tg3_netif_stop(tp);
17026
17027         tg3_timer_stop(tp);
17028
17029         tg3_full_lock(tp, 1);
17030         tg3_disable_ints(tp);
17031         tg3_full_unlock(tp);
17032
17033         netif_device_detach(dev);
17034
17035         tg3_full_lock(tp, 0);
17036         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17037         tg3_flag_clear(tp, INIT_COMPLETE);
17038         tg3_full_unlock(tp);
17039
17040         err = tg3_power_down_prepare(tp);
17041         if (err) {
17042                 int err2;
17043
17044                 tg3_full_lock(tp, 0);
17045
17046                 tg3_flag_set(tp, INIT_COMPLETE);
17047                 err2 = tg3_restart_hw(tp, 1);
17048                 if (err2)
17049                         goto out;
17050
17051                 tg3_timer_start(tp);
17052
17053                 netif_device_attach(dev);
17054                 tg3_netif_start(tp);
17055
17056 out:
17057                 tg3_full_unlock(tp);
17058
17059                 if (!err2)
17060                         tg3_phy_start(tp);
17061         }
17062
17063         return err;
17064 }
17065
17066 static int tg3_resume(struct device *device)
17067 {
17068         struct pci_dev *pdev = to_pci_dev(device);
17069         struct net_device *dev = pci_get_drvdata(pdev);
17070         struct tg3 *tp = netdev_priv(dev);
17071         int err;
17072
17073         if (!netif_running(dev))
17074                 return 0;
17075
17076         netif_device_attach(dev);
17077
17078         tg3_full_lock(tp, 0);
17079
17080         tg3_flag_set(tp, INIT_COMPLETE);
17081         err = tg3_restart_hw(tp, 1);
17082         if (err)
17083                 goto out;
17084
17085         tg3_timer_start(tp);
17086
17087         tg3_netif_start(tp);
17088
17089 out:
17090         tg3_full_unlock(tp);
17091
17092         if (!err)
17093                 tg3_phy_start(tp);
17094
17095         return err;
17096 }
17097
17098 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17099 #define TG3_PM_OPS (&tg3_pm_ops)
17100
17101 #else
17102
17103 #define TG3_PM_OPS NULL
17104
17105 #endif /* CONFIG_PM_SLEEP */
17106
17107 /**
17108  * tg3_io_error_detected - called when PCI error is detected
17109  * @pdev: Pointer to PCI device
17110  * @state: The current pci connection state
17111  *
17112  * This function is called after a PCI bus error affecting
17113  * this device has been detected.
17114  */
17115 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17116                                               pci_channel_state_t state)
17117 {
17118         struct net_device *netdev = pci_get_drvdata(pdev);
17119         struct tg3 *tp = netdev_priv(netdev);
17120         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17121
17122         netdev_info(netdev, "PCI I/O error detected\n");
17123
17124         rtnl_lock();
17125
17126         if (!netif_running(netdev))
17127                 goto done;
17128
17129         tg3_phy_stop(tp);
17130
17131         tg3_netif_stop(tp);
17132
17133         tg3_timer_stop(tp);
17134
17135         /* Want to make sure that the reset task doesn't run */
17136         tg3_reset_task_cancel(tp);
17137
17138         netif_device_detach(netdev);
17139
17140         /* Clean up software state, even if MMIO is blocked */
17141         tg3_full_lock(tp, 0);
17142         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17143         tg3_full_unlock(tp);
17144
17145 done:
17146         if (state == pci_channel_io_perm_failure)
17147                 err = PCI_ERS_RESULT_DISCONNECT;
17148         else
17149                 pci_disable_device(pdev);
17150
17151         rtnl_unlock();
17152
17153         return err;
17154 }
17155
17156 /**
17157  * tg3_io_slot_reset - called after the pci bus has been reset.
17158  * @pdev: Pointer to PCI device
17159  *
17160  * Restart the card from scratch, as if from a cold-boot.
17161  * At this point, the card has exprienced a hard reset,
17162  * followed by fixups by BIOS, and has its config space
17163  * set up identically to what it was at cold boot.
17164  */
17165 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17166 {
17167         struct net_device *netdev = pci_get_drvdata(pdev);
17168         struct tg3 *tp = netdev_priv(netdev);
17169         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17170         int err;
17171
17172         rtnl_lock();
17173
17174         if (pci_enable_device(pdev)) {
17175                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17176                 goto done;
17177         }
17178
17179         pci_set_master(pdev);
17180         pci_restore_state(pdev);
17181         pci_save_state(pdev);
17182
17183         if (!netif_running(netdev)) {
17184                 rc = PCI_ERS_RESULT_RECOVERED;
17185                 goto done;
17186         }
17187
17188         err = tg3_power_up(tp);
17189         if (err)
17190                 goto done;
17191
17192         rc = PCI_ERS_RESULT_RECOVERED;
17193
17194 done:
17195         rtnl_unlock();
17196
17197         return rc;
17198 }
17199
17200 /**
17201  * tg3_io_resume - called when traffic can start flowing again.
17202  * @pdev: Pointer to PCI device
17203  *
17204  * This callback is called when the error recovery driver tells
17205  * us that its OK to resume normal operation.
17206  */
17207 static void tg3_io_resume(struct pci_dev *pdev)
17208 {
17209         struct net_device *netdev = pci_get_drvdata(pdev);
17210         struct tg3 *tp = netdev_priv(netdev);
17211         int err;
17212
17213         rtnl_lock();
17214
17215         if (!netif_running(netdev))
17216                 goto done;
17217
17218         tg3_full_lock(tp, 0);
17219         tg3_flag_set(tp, INIT_COMPLETE);
17220         err = tg3_restart_hw(tp, 1);
17221         if (err) {
17222                 tg3_full_unlock(tp);
17223                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17224                 goto done;
17225         }
17226
17227         netif_device_attach(netdev);
17228
17229         tg3_timer_start(tp);
17230
17231         tg3_netif_start(tp);
17232
17233         tg3_full_unlock(tp);
17234
17235         tg3_phy_start(tp);
17236
17237 done:
17238         rtnl_unlock();
17239 }
17240
17241 static const struct pci_error_handlers tg3_err_handler = {
17242         .error_detected = tg3_io_error_detected,
17243         .slot_reset     = tg3_io_slot_reset,
17244         .resume         = tg3_io_resume
17245 };
17246
17247 static struct pci_driver tg3_driver = {
17248         .name           = DRV_MODULE_NAME,
17249         .id_table       = tg3_pci_tbl,
17250         .probe          = tg3_init_one,
17251         .remove         = tg3_remove_one,
17252         .err_handler    = &tg3_err_handler,
17253         .driver.pm      = TG3_PM_OPS,
17254 };
17255
17256 static int __init tg3_init(void)
17257 {
17258         return pci_register_driver(&tg3_driver);
17259 }
17260
17261 static void __exit tg3_cleanup(void)
17262 {
17263         pci_unregister_driver(&tg3_driver);
17264 }
17265
17266 module_init(tg3_init);
17267 module_exit(tg3_cleanup);