]> Pileus Git - ~andy/linux/blob - drivers/net/ethernet/broadcom/tg3.c
80d55bf699baaaecf45de49540c718660e3ceedc
[~andy/linux] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2013 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50
51 #include <net/checksum.h>
52 #include <net/ip.h>
53
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65
66 #define BAR_0   0
67 #define BAR_2   2
68
69 #include "tg3.h"
70
71 /* Functions & macros to verify TG3_FLAGS types */
72
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         return test_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         set_bit(flag, bits);
81 }
82
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85         clear_bit(flag, bits);
86 }
87
88 #define tg3_flag(tp, flag)                              \
89         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag)                          \
91         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag)                        \
93         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94
95 #define DRV_MODULE_NAME         "tg3"
96 #define TG3_MAJ_NUM                     3
97 #define TG3_MIN_NUM                     135
98 #define DRV_MODULE_VERSION      \
99         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE      "Nov 14, 2013"
101
102 #define RESET_KIND_SHUTDOWN     0
103 #define RESET_KIND_INIT         1
104 #define RESET_KIND_SUSPEND      2
105
106 #define TG3_DEF_RX_MODE         0
107 #define TG3_DEF_TX_MODE         0
108 #define TG3_DEF_MSG_ENABLE        \
109         (NETIF_MSG_DRV          | \
110          NETIF_MSG_PROBE        | \
111          NETIF_MSG_LINK         | \
112          NETIF_MSG_TIMER        | \
113          NETIF_MSG_IFDOWN       | \
114          NETIF_MSG_IFUP         | \
115          NETIF_MSG_RX_ERR       | \
116          NETIF_MSG_TX_ERR)
117
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
119
120 /* length of time before we decide the hardware is borked,
121  * and dev->tx_timeout() should be called to fix the problem
122  */
123
124 #define TG3_TX_TIMEOUT                  (5 * HZ)
125
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU                     60
128 #define TG3_MAX_MTU(tp) \
129         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132  * You can't change the ring sizes, but you can change where you place
133  * them in the NIC onboard memory.
134  */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING         200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
143
144 /* Do not place this n-ring entries value into the tp struct itself,
145  * we really want to expose these constants to GCC so that modulo et
146  * al.  operations are done with shifts and masks instead of with
147  * hw multiply/modulo instructions.  Another solution would be to
148  * replace things like '% foo' with '& (foo - 1)'.
149  */
150
151 #define TG3_TX_RING_SIZE                512
152 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
153
154 #define TG3_RX_STD_RING_BYTES(tp) \
155         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
161                                  TG3_TX_RING_SIZE)
162 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
163
164 #define TG3_DMA_BYTE_ENAB               64
165
166 #define TG3_RX_STD_DMA_SZ               1536
167 #define TG3_RX_JMB_DMA_SZ               9046
168
169 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
170
171 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181  * that are at least dword aligned when used in PCIX mode.  The driver
182  * works around this bug by double copying the packet.  This workaround
183  * is built into the normal double copy length check for efficiency.
184  *
185  * However, the double copy is only necessary on those architectures
186  * where unaligned memory accesses are inefficient.  For those architectures
187  * where unaligned memory accesses incur little penalty, we can reintegrate
188  * the 5701 in the normal rx path.  Doing so saves a device structure
189  * dereference by hardcoding the double copy threshold in place.
190  */
191 #define TG3_RX_COPY_THRESHOLD           256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
194 #else
195         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
196 #endif
197
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
202 #endif
203
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K            2048
207 #define TG3_TX_BD_DMA_MAX_4K            4096
208
209 #define TG3_RAW_IP_ALIGN 2
210
211 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
212 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213
214 #define FIRMWARE_TG3            "tigon/tg3.bin"
215 #define FIRMWARE_TG357766       "tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
218
219 static char version[] =
220         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
221
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
229
230 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
233
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
236
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258                         TG3_DRV_DATA_FLAG_5705_10_100},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261                         TG3_DRV_DATA_FLAG_5705_10_100},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265                         TG3_DRV_DATA_FLAG_5705_10_100},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287                         PCI_VENDOR_ID_LENOVO,
288                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
341         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
342         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
343         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
344         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
345         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
346         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
347         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
348         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
349         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
350         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
351         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
352         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
353         {}
354 };
355
356 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
357
358 static const struct {
359         const char string[ETH_GSTRING_LEN];
360 } ethtool_stats_keys[] = {
361         { "rx_octets" },
362         { "rx_fragments" },
363         { "rx_ucast_packets" },
364         { "rx_mcast_packets" },
365         { "rx_bcast_packets" },
366         { "rx_fcs_errors" },
367         { "rx_align_errors" },
368         { "rx_xon_pause_rcvd" },
369         { "rx_xoff_pause_rcvd" },
370         { "rx_mac_ctrl_rcvd" },
371         { "rx_xoff_entered" },
372         { "rx_frame_too_long_errors" },
373         { "rx_jabbers" },
374         { "rx_undersize_packets" },
375         { "rx_in_length_errors" },
376         { "rx_out_length_errors" },
377         { "rx_64_or_less_octet_packets" },
378         { "rx_65_to_127_octet_packets" },
379         { "rx_128_to_255_octet_packets" },
380         { "rx_256_to_511_octet_packets" },
381         { "rx_512_to_1023_octet_packets" },
382         { "rx_1024_to_1522_octet_packets" },
383         { "rx_1523_to_2047_octet_packets" },
384         { "rx_2048_to_4095_octet_packets" },
385         { "rx_4096_to_8191_octet_packets" },
386         { "rx_8192_to_9022_octet_packets" },
387
388         { "tx_octets" },
389         { "tx_collisions" },
390
391         { "tx_xon_sent" },
392         { "tx_xoff_sent" },
393         { "tx_flow_control" },
394         { "tx_mac_errors" },
395         { "tx_single_collisions" },
396         { "tx_mult_collisions" },
397         { "tx_deferred" },
398         { "tx_excessive_collisions" },
399         { "tx_late_collisions" },
400         { "tx_collide_2times" },
401         { "tx_collide_3times" },
402         { "tx_collide_4times" },
403         { "tx_collide_5times" },
404         { "tx_collide_6times" },
405         { "tx_collide_7times" },
406         { "tx_collide_8times" },
407         { "tx_collide_9times" },
408         { "tx_collide_10times" },
409         { "tx_collide_11times" },
410         { "tx_collide_12times" },
411         { "tx_collide_13times" },
412         { "tx_collide_14times" },
413         { "tx_collide_15times" },
414         { "tx_ucast_packets" },
415         { "tx_mcast_packets" },
416         { "tx_bcast_packets" },
417         { "tx_carrier_sense_errors" },
418         { "tx_discards" },
419         { "tx_errors" },
420
421         { "dma_writeq_full" },
422         { "dma_write_prioq_full" },
423         { "rxbds_empty" },
424         { "rx_discards" },
425         { "rx_errors" },
426         { "rx_threshold_hit" },
427
428         { "dma_readq_full" },
429         { "dma_read_prioq_full" },
430         { "tx_comp_queue_full" },
431
432         { "ring_set_send_prod_index" },
433         { "ring_status_update" },
434         { "nic_irqs" },
435         { "nic_avoided_irqs" },
436         { "nic_tx_threshold_hit" },
437
438         { "mbuf_lwm_thresh_hit" },
439 };
440
441 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
442 #define TG3_NVRAM_TEST          0
443 #define TG3_LINK_TEST           1
444 #define TG3_REGISTER_TEST       2
445 #define TG3_MEMORY_TEST         3
446 #define TG3_MAC_LOOPB_TEST      4
447 #define TG3_PHY_LOOPB_TEST      5
448 #define TG3_EXT_LOOPB_TEST      6
449 #define TG3_INTERRUPT_TEST      7
450
451
452 static const struct {
453         const char string[ETH_GSTRING_LEN];
454 } ethtool_test_keys[] = {
455         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
456         [TG3_LINK_TEST]         = { "link test         (online) " },
457         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
458         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
459         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
460         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
461         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
462         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
463 };
464
465 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
466
467
468 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
469 {
470         writel(val, tp->regs + off);
471 }
472
473 static u32 tg3_read32(struct tg3 *tp, u32 off)
474 {
475         return readl(tp->regs + off);
476 }
477
478 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
479 {
480         writel(val, tp->aperegs + off);
481 }
482
483 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
484 {
485         return readl(tp->aperegs + off);
486 }
487
488 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
489 {
490         unsigned long flags;
491
492         spin_lock_irqsave(&tp->indirect_lock, flags);
493         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
494         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
495         spin_unlock_irqrestore(&tp->indirect_lock, flags);
496 }
497
498 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
499 {
500         writel(val, tp->regs + off);
501         readl(tp->regs + off);
502 }
503
504 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
505 {
506         unsigned long flags;
507         u32 val;
508
509         spin_lock_irqsave(&tp->indirect_lock, flags);
510         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
511         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
512         spin_unlock_irqrestore(&tp->indirect_lock, flags);
513         return val;
514 }
515
516 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
517 {
518         unsigned long flags;
519
520         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
521                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
522                                        TG3_64BIT_REG_LOW, val);
523                 return;
524         }
525         if (off == TG3_RX_STD_PROD_IDX_REG) {
526                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
527                                        TG3_64BIT_REG_LOW, val);
528                 return;
529         }
530
531         spin_lock_irqsave(&tp->indirect_lock, flags);
532         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
533         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
534         spin_unlock_irqrestore(&tp->indirect_lock, flags);
535
536         /* In indirect mode when disabling interrupts, we also need
537          * to clear the interrupt bit in the GRC local ctrl register.
538          */
539         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
540             (val == 0x1)) {
541                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
542                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
543         }
544 }
545
546 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
547 {
548         unsigned long flags;
549         u32 val;
550
551         spin_lock_irqsave(&tp->indirect_lock, flags);
552         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
553         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
554         spin_unlock_irqrestore(&tp->indirect_lock, flags);
555         return val;
556 }
557
558 /* usec_wait specifies the wait time in usec when writing to certain registers
559  * where it is unsafe to read back the register without some delay.
560  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
561  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
562  */
563 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
564 {
565         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
566                 /* Non-posted methods */
567                 tp->write32(tp, off, val);
568         else {
569                 /* Posted method */
570                 tg3_write32(tp, off, val);
571                 if (usec_wait)
572                         udelay(usec_wait);
573                 tp->read32(tp, off);
574         }
575         /* Wait again after the read for the posted method to guarantee that
576          * the wait time is met.
577          */
578         if (usec_wait)
579                 udelay(usec_wait);
580 }
581
582 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
583 {
584         tp->write32_mbox(tp, off, val);
585         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
586             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
587              !tg3_flag(tp, ICH_WORKAROUND)))
588                 tp->read32_mbox(tp, off);
589 }
590
591 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
592 {
593         void __iomem *mbox = tp->regs + off;
594         writel(val, mbox);
595         if (tg3_flag(tp, TXD_MBOX_HWBUG))
596                 writel(val, mbox);
597         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
598             tg3_flag(tp, FLUSH_POSTED_WRITES))
599                 readl(mbox);
600 }
601
602 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
603 {
604         return readl(tp->regs + off + GRCMBOX_BASE);
605 }
606
607 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
608 {
609         writel(val, tp->regs + off + GRCMBOX_BASE);
610 }
611
612 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
613 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
614 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
615 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
616 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
617
618 #define tw32(reg, val)                  tp->write32(tp, reg, val)
619 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
620 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
621 #define tr32(reg)                       tp->read32(tp, reg)
622
623 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
624 {
625         unsigned long flags;
626
627         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
628             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
629                 return;
630
631         spin_lock_irqsave(&tp->indirect_lock, flags);
632         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
633                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
634                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
635
636                 /* Always leave this as zero. */
637                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
638         } else {
639                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
640                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
641
642                 /* Always leave this as zero. */
643                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
644         }
645         spin_unlock_irqrestore(&tp->indirect_lock, flags);
646 }
647
648 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
649 {
650         unsigned long flags;
651
652         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
653             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
654                 *val = 0;
655                 return;
656         }
657
658         spin_lock_irqsave(&tp->indirect_lock, flags);
659         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
660                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
661                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
662
663                 /* Always leave this as zero. */
664                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
665         } else {
666                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
667                 *val = tr32(TG3PCI_MEM_WIN_DATA);
668
669                 /* Always leave this as zero. */
670                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
671         }
672         spin_unlock_irqrestore(&tp->indirect_lock, flags);
673 }
674
675 static void tg3_ape_lock_init(struct tg3 *tp)
676 {
677         int i;
678         u32 regbase, bit;
679
680         if (tg3_asic_rev(tp) == ASIC_REV_5761)
681                 regbase = TG3_APE_LOCK_GRANT;
682         else
683                 regbase = TG3_APE_PER_LOCK_GRANT;
684
685         /* Make sure the driver hasn't any stale locks. */
686         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
687                 switch (i) {
688                 case TG3_APE_LOCK_PHY0:
689                 case TG3_APE_LOCK_PHY1:
690                 case TG3_APE_LOCK_PHY2:
691                 case TG3_APE_LOCK_PHY3:
692                         bit = APE_LOCK_GRANT_DRIVER;
693                         break;
694                 default:
695                         if (!tp->pci_fn)
696                                 bit = APE_LOCK_GRANT_DRIVER;
697                         else
698                                 bit = 1 << tp->pci_fn;
699                 }
700                 tg3_ape_write32(tp, regbase + 4 * i, bit);
701         }
702
703 }
704
705 static int tg3_ape_lock(struct tg3 *tp, int locknum)
706 {
707         int i, off;
708         int ret = 0;
709         u32 status, req, gnt, bit;
710
711         if (!tg3_flag(tp, ENABLE_APE))
712                 return 0;
713
714         switch (locknum) {
715         case TG3_APE_LOCK_GPIO:
716                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
717                         return 0;
718         case TG3_APE_LOCK_GRC:
719         case TG3_APE_LOCK_MEM:
720                 if (!tp->pci_fn)
721                         bit = APE_LOCK_REQ_DRIVER;
722                 else
723                         bit = 1 << tp->pci_fn;
724                 break;
725         case TG3_APE_LOCK_PHY0:
726         case TG3_APE_LOCK_PHY1:
727         case TG3_APE_LOCK_PHY2:
728         case TG3_APE_LOCK_PHY3:
729                 bit = APE_LOCK_REQ_DRIVER;
730                 break;
731         default:
732                 return -EINVAL;
733         }
734
735         if (tg3_asic_rev(tp) == ASIC_REV_5761) {
736                 req = TG3_APE_LOCK_REQ;
737                 gnt = TG3_APE_LOCK_GRANT;
738         } else {
739                 req = TG3_APE_PER_LOCK_REQ;
740                 gnt = TG3_APE_PER_LOCK_GRANT;
741         }
742
743         off = 4 * locknum;
744
745         tg3_ape_write32(tp, req + off, bit);
746
747         /* Wait for up to 1 millisecond to acquire lock. */
748         for (i = 0; i < 100; i++) {
749                 status = tg3_ape_read32(tp, gnt + off);
750                 if (status == bit)
751                         break;
752                 if (pci_channel_offline(tp->pdev))
753                         break;
754
755                 udelay(10);
756         }
757
758         if (status != bit) {
759                 /* Revoke the lock request. */
760                 tg3_ape_write32(tp, gnt + off, bit);
761                 ret = -EBUSY;
762         }
763
764         return ret;
765 }
766
767 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
768 {
769         u32 gnt, bit;
770
771         if (!tg3_flag(tp, ENABLE_APE))
772                 return;
773
774         switch (locknum) {
775         case TG3_APE_LOCK_GPIO:
776                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
777                         return;
778         case TG3_APE_LOCK_GRC:
779         case TG3_APE_LOCK_MEM:
780                 if (!tp->pci_fn)
781                         bit = APE_LOCK_GRANT_DRIVER;
782                 else
783                         bit = 1 << tp->pci_fn;
784                 break;
785         case TG3_APE_LOCK_PHY0:
786         case TG3_APE_LOCK_PHY1:
787         case TG3_APE_LOCK_PHY2:
788         case TG3_APE_LOCK_PHY3:
789                 bit = APE_LOCK_GRANT_DRIVER;
790                 break;
791         default:
792                 return;
793         }
794
795         if (tg3_asic_rev(tp) == ASIC_REV_5761)
796                 gnt = TG3_APE_LOCK_GRANT;
797         else
798                 gnt = TG3_APE_PER_LOCK_GRANT;
799
800         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
801 }
802
803 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
804 {
805         u32 apedata;
806
807         while (timeout_us) {
808                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
809                         return -EBUSY;
810
811                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
812                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
813                         break;
814
815                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
816
817                 udelay(10);
818                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
819         }
820
821         return timeout_us ? 0 : -EBUSY;
822 }
823
824 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
825 {
826         u32 i, apedata;
827
828         for (i = 0; i < timeout_us / 10; i++) {
829                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
830
831                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
832                         break;
833
834                 udelay(10);
835         }
836
837         return i == timeout_us / 10;
838 }
839
840 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
841                                    u32 len)
842 {
843         int err;
844         u32 i, bufoff, msgoff, maxlen, apedata;
845
846         if (!tg3_flag(tp, APE_HAS_NCSI))
847                 return 0;
848
849         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
850         if (apedata != APE_SEG_SIG_MAGIC)
851                 return -ENODEV;
852
853         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
854         if (!(apedata & APE_FW_STATUS_READY))
855                 return -EAGAIN;
856
857         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
858                  TG3_APE_SHMEM_BASE;
859         msgoff = bufoff + 2 * sizeof(u32);
860         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
861
862         while (len) {
863                 u32 length;
864
865                 /* Cap xfer sizes to scratchpad limits. */
866                 length = (len > maxlen) ? maxlen : len;
867                 len -= length;
868
869                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
870                 if (!(apedata & APE_FW_STATUS_READY))
871                         return -EAGAIN;
872
873                 /* Wait for up to 1 msec for APE to service previous event. */
874                 err = tg3_ape_event_lock(tp, 1000);
875                 if (err)
876                         return err;
877
878                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
879                           APE_EVENT_STATUS_SCRTCHPD_READ |
880                           APE_EVENT_STATUS_EVENT_PENDING;
881                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
882
883                 tg3_ape_write32(tp, bufoff, base_off);
884                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
885
886                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
887                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
888
889                 base_off += length;
890
891                 if (tg3_ape_wait_for_event(tp, 30000))
892                         return -EAGAIN;
893
894                 for (i = 0; length; i += 4, length -= 4) {
895                         u32 val = tg3_ape_read32(tp, msgoff + i);
896                         memcpy(data, &val, sizeof(u32));
897                         data++;
898                 }
899         }
900
901         return 0;
902 }
903
904 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
905 {
906         int err;
907         u32 apedata;
908
909         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
910         if (apedata != APE_SEG_SIG_MAGIC)
911                 return -EAGAIN;
912
913         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
914         if (!(apedata & APE_FW_STATUS_READY))
915                 return -EAGAIN;
916
917         /* Wait for up to 1 millisecond for APE to service previous event. */
918         err = tg3_ape_event_lock(tp, 1000);
919         if (err)
920                 return err;
921
922         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
923                         event | APE_EVENT_STATUS_EVENT_PENDING);
924
925         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
926         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
927
928         return 0;
929 }
930
931 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
932 {
933         u32 event;
934         u32 apedata;
935
936         if (!tg3_flag(tp, ENABLE_APE))
937                 return;
938
939         switch (kind) {
940         case RESET_KIND_INIT:
941                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
942                                 APE_HOST_SEG_SIG_MAGIC);
943                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
944                                 APE_HOST_SEG_LEN_MAGIC);
945                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
946                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
947                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
948                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
949                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
950                                 APE_HOST_BEHAV_NO_PHYLOCK);
951                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
952                                     TG3_APE_HOST_DRVR_STATE_START);
953
954                 event = APE_EVENT_STATUS_STATE_START;
955                 break;
956         case RESET_KIND_SHUTDOWN:
957                 /* With the interface we are currently using,
958                  * APE does not track driver state.  Wiping
959                  * out the HOST SEGMENT SIGNATURE forces
960                  * the APE to assume OS absent status.
961                  */
962                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
963
964                 if (device_may_wakeup(&tp->pdev->dev) &&
965                     tg3_flag(tp, WOL_ENABLE)) {
966                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
967                                             TG3_APE_HOST_WOL_SPEED_AUTO);
968                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
969                 } else
970                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
971
972                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
973
974                 event = APE_EVENT_STATUS_STATE_UNLOAD;
975                 break;
976         default:
977                 return;
978         }
979
980         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
981
982         tg3_ape_send_event(tp, event);
983 }
984
985 static void tg3_disable_ints(struct tg3 *tp)
986 {
987         int i;
988
989         tw32(TG3PCI_MISC_HOST_CTRL,
990              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
991         for (i = 0; i < tp->irq_max; i++)
992                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
993 }
994
995 static void tg3_enable_ints(struct tg3 *tp)
996 {
997         int i;
998
999         tp->irq_sync = 0;
1000         wmb();
1001
1002         tw32(TG3PCI_MISC_HOST_CTRL,
1003              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1004
1005         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1006         for (i = 0; i < tp->irq_cnt; i++) {
1007                 struct tg3_napi *tnapi = &tp->napi[i];
1008
1009                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1010                 if (tg3_flag(tp, 1SHOT_MSI))
1011                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1012
1013                 tp->coal_now |= tnapi->coal_now;
1014         }
1015
1016         /* Force an initial interrupt */
1017         if (!tg3_flag(tp, TAGGED_STATUS) &&
1018             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1019                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1020         else
1021                 tw32(HOSTCC_MODE, tp->coal_now);
1022
1023         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1024 }
1025
1026 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1027 {
1028         struct tg3 *tp = tnapi->tp;
1029         struct tg3_hw_status *sblk = tnapi->hw_status;
1030         unsigned int work_exists = 0;
1031
1032         /* check for phy events */
1033         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1034                 if (sblk->status & SD_STATUS_LINK_CHG)
1035                         work_exists = 1;
1036         }
1037
1038         /* check for TX work to do */
1039         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1040                 work_exists = 1;
1041
1042         /* check for RX work to do */
1043         if (tnapi->rx_rcb_prod_idx &&
1044             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1045                 work_exists = 1;
1046
1047         return work_exists;
1048 }
1049
1050 /* tg3_int_reenable
1051  *  similar to tg3_enable_ints, but it accurately determines whether there
1052  *  is new work pending and can return without flushing the PIO write
1053  *  which reenables interrupts
1054  */
1055 static void tg3_int_reenable(struct tg3_napi *tnapi)
1056 {
1057         struct tg3 *tp = tnapi->tp;
1058
1059         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1060         mmiowb();
1061
1062         /* When doing tagged status, this work check is unnecessary.
1063          * The last_tag we write above tells the chip which piece of
1064          * work we've completed.
1065          */
1066         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1067                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1068                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1069 }
1070
1071 static void tg3_switch_clocks(struct tg3 *tp)
1072 {
1073         u32 clock_ctrl;
1074         u32 orig_clock_ctrl;
1075
1076         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1077                 return;
1078
1079         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1080
1081         orig_clock_ctrl = clock_ctrl;
1082         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1083                        CLOCK_CTRL_CLKRUN_OENABLE |
1084                        0x1f);
1085         tp->pci_clock_ctrl = clock_ctrl;
1086
1087         if (tg3_flag(tp, 5705_PLUS)) {
1088                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1089                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1090                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1091                 }
1092         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1093                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1094                             clock_ctrl |
1095                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1096                             40);
1097                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1098                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1099                             40);
1100         }
1101         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1102 }
1103
1104 #define PHY_BUSY_LOOPS  5000
1105
1106 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1107                          u32 *val)
1108 {
1109         u32 frame_val;
1110         unsigned int loops;
1111         int ret;
1112
1113         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1114                 tw32_f(MAC_MI_MODE,
1115                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1116                 udelay(80);
1117         }
1118
1119         tg3_ape_lock(tp, tp->phy_ape_lock);
1120
1121         *val = 0x0;
1122
1123         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1124                       MI_COM_PHY_ADDR_MASK);
1125         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1126                       MI_COM_REG_ADDR_MASK);
1127         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1128
1129         tw32_f(MAC_MI_COM, frame_val);
1130
1131         loops = PHY_BUSY_LOOPS;
1132         while (loops != 0) {
1133                 udelay(10);
1134                 frame_val = tr32(MAC_MI_COM);
1135
1136                 if ((frame_val & MI_COM_BUSY) == 0) {
1137                         udelay(5);
1138                         frame_val = tr32(MAC_MI_COM);
1139                         break;
1140                 }
1141                 loops -= 1;
1142         }
1143
1144         ret = -EBUSY;
1145         if (loops != 0) {
1146                 *val = frame_val & MI_COM_DATA_MASK;
1147                 ret = 0;
1148         }
1149
1150         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1151                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1152                 udelay(80);
1153         }
1154
1155         tg3_ape_unlock(tp, tp->phy_ape_lock);
1156
1157         return ret;
1158 }
1159
1160 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1161 {
1162         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1163 }
1164
1165 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1166                           u32 val)
1167 {
1168         u32 frame_val;
1169         unsigned int loops;
1170         int ret;
1171
1172         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1173             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1174                 return 0;
1175
1176         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1177                 tw32_f(MAC_MI_MODE,
1178                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1179                 udelay(80);
1180         }
1181
1182         tg3_ape_lock(tp, tp->phy_ape_lock);
1183
1184         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1185                       MI_COM_PHY_ADDR_MASK);
1186         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1187                       MI_COM_REG_ADDR_MASK);
1188         frame_val |= (val & MI_COM_DATA_MASK);
1189         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1190
1191         tw32_f(MAC_MI_COM, frame_val);
1192
1193         loops = PHY_BUSY_LOOPS;
1194         while (loops != 0) {
1195                 udelay(10);
1196                 frame_val = tr32(MAC_MI_COM);
1197                 if ((frame_val & MI_COM_BUSY) == 0) {
1198                         udelay(5);
1199                         frame_val = tr32(MAC_MI_COM);
1200                         break;
1201                 }
1202                 loops -= 1;
1203         }
1204
1205         ret = -EBUSY;
1206         if (loops != 0)
1207                 ret = 0;
1208
1209         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1210                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1211                 udelay(80);
1212         }
1213
1214         tg3_ape_unlock(tp, tp->phy_ape_lock);
1215
1216         return ret;
1217 }
1218
1219 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1220 {
1221         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1222 }
1223
1224 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1225 {
1226         int err;
1227
1228         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1229         if (err)
1230                 goto done;
1231
1232         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1233         if (err)
1234                 goto done;
1235
1236         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1237                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1238         if (err)
1239                 goto done;
1240
1241         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1242
1243 done:
1244         return err;
1245 }
1246
1247 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1248 {
1249         int err;
1250
1251         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1252         if (err)
1253                 goto done;
1254
1255         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1256         if (err)
1257                 goto done;
1258
1259         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1260                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1261         if (err)
1262                 goto done;
1263
1264         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1265
1266 done:
1267         return err;
1268 }
1269
1270 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1271 {
1272         int err;
1273
1274         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1275         if (!err)
1276                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1277
1278         return err;
1279 }
1280
1281 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1282 {
1283         int err;
1284
1285         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1286         if (!err)
1287                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1288
1289         return err;
1290 }
1291
1292 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1293 {
1294         int err;
1295
1296         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1297                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1298                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1299         if (!err)
1300                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1301
1302         return err;
1303 }
1304
1305 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1306 {
1307         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1308                 set |= MII_TG3_AUXCTL_MISC_WREN;
1309
1310         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1311 }
1312
1313 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1314 {
1315         u32 val;
1316         int err;
1317
1318         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1319
1320         if (err)
1321                 return err;
1322
1323         if (enable)
1324                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1325         else
1326                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1327
1328         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1329                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1330
1331         return err;
1332 }
1333
1334 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1335 {
1336         return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1337                             reg | val | MII_TG3_MISC_SHDW_WREN);
1338 }
1339
1340 static int tg3_bmcr_reset(struct tg3 *tp)
1341 {
1342         u32 phy_control;
1343         int limit, err;
1344
1345         /* OK, reset it, and poll the BMCR_RESET bit until it
1346          * clears or we time out.
1347          */
1348         phy_control = BMCR_RESET;
1349         err = tg3_writephy(tp, MII_BMCR, phy_control);
1350         if (err != 0)
1351                 return -EBUSY;
1352
1353         limit = 5000;
1354         while (limit--) {
1355                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1356                 if (err != 0)
1357                         return -EBUSY;
1358
1359                 if ((phy_control & BMCR_RESET) == 0) {
1360                         udelay(40);
1361                         break;
1362                 }
1363                 udelay(10);
1364         }
1365         if (limit < 0)
1366                 return -EBUSY;
1367
1368         return 0;
1369 }
1370
1371 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1372 {
1373         struct tg3 *tp = bp->priv;
1374         u32 val;
1375
1376         spin_lock_bh(&tp->lock);
1377
1378         if (__tg3_readphy(tp, mii_id, reg, &val))
1379                 val = -EIO;
1380
1381         spin_unlock_bh(&tp->lock);
1382
1383         return val;
1384 }
1385
1386 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1387 {
1388         struct tg3 *tp = bp->priv;
1389         u32 ret = 0;
1390
1391         spin_lock_bh(&tp->lock);
1392
1393         if (__tg3_writephy(tp, mii_id, reg, val))
1394                 ret = -EIO;
1395
1396         spin_unlock_bh(&tp->lock);
1397
1398         return ret;
1399 }
1400
1401 static int tg3_mdio_reset(struct mii_bus *bp)
1402 {
1403         return 0;
1404 }
1405
1406 static void tg3_mdio_config_5785(struct tg3 *tp)
1407 {
1408         u32 val;
1409         struct phy_device *phydev;
1410
1411         phydev = tp->mdio_bus->phy_map[tp->phy_addr];
1412         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1413         case PHY_ID_BCM50610:
1414         case PHY_ID_BCM50610M:
1415                 val = MAC_PHYCFG2_50610_LED_MODES;
1416                 break;
1417         case PHY_ID_BCMAC131:
1418                 val = MAC_PHYCFG2_AC131_LED_MODES;
1419                 break;
1420         case PHY_ID_RTL8211C:
1421                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1422                 break;
1423         case PHY_ID_RTL8201E:
1424                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1425                 break;
1426         default:
1427                 return;
1428         }
1429
1430         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1431                 tw32(MAC_PHYCFG2, val);
1432
1433                 val = tr32(MAC_PHYCFG1);
1434                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1435                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1436                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1437                 tw32(MAC_PHYCFG1, val);
1438
1439                 return;
1440         }
1441
1442         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1443                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1444                        MAC_PHYCFG2_FMODE_MASK_MASK |
1445                        MAC_PHYCFG2_GMODE_MASK_MASK |
1446                        MAC_PHYCFG2_ACT_MASK_MASK   |
1447                        MAC_PHYCFG2_QUAL_MASK_MASK |
1448                        MAC_PHYCFG2_INBAND_ENABLE;
1449
1450         tw32(MAC_PHYCFG2, val);
1451
1452         val = tr32(MAC_PHYCFG1);
1453         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1454                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1455         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1456                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1457                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1458                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1459                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1460         }
1461         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1462                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1463         tw32(MAC_PHYCFG1, val);
1464
1465         val = tr32(MAC_EXT_RGMII_MODE);
1466         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1467                  MAC_RGMII_MODE_RX_QUALITY |
1468                  MAC_RGMII_MODE_RX_ACTIVITY |
1469                  MAC_RGMII_MODE_RX_ENG_DET |
1470                  MAC_RGMII_MODE_TX_ENABLE |
1471                  MAC_RGMII_MODE_TX_LOWPWR |
1472                  MAC_RGMII_MODE_TX_RESET);
1473         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1474                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1475                         val |= MAC_RGMII_MODE_RX_INT_B |
1476                                MAC_RGMII_MODE_RX_QUALITY |
1477                                MAC_RGMII_MODE_RX_ACTIVITY |
1478                                MAC_RGMII_MODE_RX_ENG_DET;
1479                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1480                         val |= MAC_RGMII_MODE_TX_ENABLE |
1481                                MAC_RGMII_MODE_TX_LOWPWR |
1482                                MAC_RGMII_MODE_TX_RESET;
1483         }
1484         tw32(MAC_EXT_RGMII_MODE, val);
1485 }
1486
1487 static void tg3_mdio_start(struct tg3 *tp)
1488 {
1489         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1490         tw32_f(MAC_MI_MODE, tp->mi_mode);
1491         udelay(80);
1492
1493         if (tg3_flag(tp, MDIOBUS_INITED) &&
1494             tg3_asic_rev(tp) == ASIC_REV_5785)
1495                 tg3_mdio_config_5785(tp);
1496 }
1497
1498 static int tg3_mdio_init(struct tg3 *tp)
1499 {
1500         int i;
1501         u32 reg;
1502         struct phy_device *phydev;
1503
1504         if (tg3_flag(tp, 5717_PLUS)) {
1505                 u32 is_serdes;
1506
1507                 tp->phy_addr = tp->pci_fn + 1;
1508
1509                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1510                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1511                 else
1512                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1513                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1514                 if (is_serdes)
1515                         tp->phy_addr += 7;
1516         } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1517                 int addr;
1518
1519                 addr = ssb_gige_get_phyaddr(tp->pdev);
1520                 if (addr < 0)
1521                         return addr;
1522                 tp->phy_addr = addr;
1523         } else
1524                 tp->phy_addr = TG3_PHY_MII_ADDR;
1525
1526         tg3_mdio_start(tp);
1527
1528         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1529                 return 0;
1530
1531         tp->mdio_bus = mdiobus_alloc();
1532         if (tp->mdio_bus == NULL)
1533                 return -ENOMEM;
1534
1535         tp->mdio_bus->name     = "tg3 mdio bus";
1536         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1537                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1538         tp->mdio_bus->priv     = tp;
1539         tp->mdio_bus->parent   = &tp->pdev->dev;
1540         tp->mdio_bus->read     = &tg3_mdio_read;
1541         tp->mdio_bus->write    = &tg3_mdio_write;
1542         tp->mdio_bus->reset    = &tg3_mdio_reset;
1543         tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1544         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1545
1546         for (i = 0; i < PHY_MAX_ADDR; i++)
1547                 tp->mdio_bus->irq[i] = PHY_POLL;
1548
1549         /* The bus registration will look for all the PHYs on the mdio bus.
1550          * Unfortunately, it does not ensure the PHY is powered up before
1551          * accessing the PHY ID registers.  A chip reset is the
1552          * quickest way to bring the device back to an operational state..
1553          */
1554         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1555                 tg3_bmcr_reset(tp);
1556
1557         i = mdiobus_register(tp->mdio_bus);
1558         if (i) {
1559                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1560                 mdiobus_free(tp->mdio_bus);
1561                 return i;
1562         }
1563
1564         phydev = tp->mdio_bus->phy_map[tp->phy_addr];
1565
1566         if (!phydev || !phydev->drv) {
1567                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1568                 mdiobus_unregister(tp->mdio_bus);
1569                 mdiobus_free(tp->mdio_bus);
1570                 return -ENODEV;
1571         }
1572
1573         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1574         case PHY_ID_BCM57780:
1575                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1576                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1577                 break;
1578         case PHY_ID_BCM50610:
1579         case PHY_ID_BCM50610M:
1580                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1581                                      PHY_BRCM_RX_REFCLK_UNUSED |
1582                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1583                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1584                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1585                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1586                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1587                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1588                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1589                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1590                 /* fallthru */
1591         case PHY_ID_RTL8211C:
1592                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1593                 break;
1594         case PHY_ID_RTL8201E:
1595         case PHY_ID_BCMAC131:
1596                 phydev->interface = PHY_INTERFACE_MODE_MII;
1597                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1598                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1599                 break;
1600         }
1601
1602         tg3_flag_set(tp, MDIOBUS_INITED);
1603
1604         if (tg3_asic_rev(tp) == ASIC_REV_5785)
1605                 tg3_mdio_config_5785(tp);
1606
1607         return 0;
1608 }
1609
1610 static void tg3_mdio_fini(struct tg3 *tp)
1611 {
1612         if (tg3_flag(tp, MDIOBUS_INITED)) {
1613                 tg3_flag_clear(tp, MDIOBUS_INITED);
1614                 mdiobus_unregister(tp->mdio_bus);
1615                 mdiobus_free(tp->mdio_bus);
1616         }
1617 }
1618
1619 /* tp->lock is held. */
1620 static inline void tg3_generate_fw_event(struct tg3 *tp)
1621 {
1622         u32 val;
1623
1624         val = tr32(GRC_RX_CPU_EVENT);
1625         val |= GRC_RX_CPU_DRIVER_EVENT;
1626         tw32_f(GRC_RX_CPU_EVENT, val);
1627
1628         tp->last_event_jiffies = jiffies;
1629 }
1630
1631 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1632
1633 /* tp->lock is held. */
1634 static void tg3_wait_for_event_ack(struct tg3 *tp)
1635 {
1636         int i;
1637         unsigned int delay_cnt;
1638         long time_remain;
1639
1640         /* If enough time has passed, no wait is necessary. */
1641         time_remain = (long)(tp->last_event_jiffies + 1 +
1642                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1643                       (long)jiffies;
1644         if (time_remain < 0)
1645                 return;
1646
1647         /* Check if we can shorten the wait time. */
1648         delay_cnt = jiffies_to_usecs(time_remain);
1649         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1650                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1651         delay_cnt = (delay_cnt >> 3) + 1;
1652
1653         for (i = 0; i < delay_cnt; i++) {
1654                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1655                         break;
1656                 if (pci_channel_offline(tp->pdev))
1657                         break;
1658
1659                 udelay(8);
1660         }
1661 }
1662
1663 /* tp->lock is held. */
1664 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1665 {
1666         u32 reg, val;
1667
1668         val = 0;
1669         if (!tg3_readphy(tp, MII_BMCR, &reg))
1670                 val = reg << 16;
1671         if (!tg3_readphy(tp, MII_BMSR, &reg))
1672                 val |= (reg & 0xffff);
1673         *data++ = val;
1674
1675         val = 0;
1676         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1677                 val = reg << 16;
1678         if (!tg3_readphy(tp, MII_LPA, &reg))
1679                 val |= (reg & 0xffff);
1680         *data++ = val;
1681
1682         val = 0;
1683         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1684                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1685                         val = reg << 16;
1686                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1687                         val |= (reg & 0xffff);
1688         }
1689         *data++ = val;
1690
1691         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1692                 val = reg << 16;
1693         else
1694                 val = 0;
1695         *data++ = val;
1696 }
1697
1698 /* tp->lock is held. */
1699 static void tg3_ump_link_report(struct tg3 *tp)
1700 {
1701         u32 data[4];
1702
1703         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1704                 return;
1705
1706         tg3_phy_gather_ump_data(tp, data);
1707
1708         tg3_wait_for_event_ack(tp);
1709
1710         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1711         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1712         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1713         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1714         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1715         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1716
1717         tg3_generate_fw_event(tp);
1718 }
1719
1720 /* tp->lock is held. */
1721 static void tg3_stop_fw(struct tg3 *tp)
1722 {
1723         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1724                 /* Wait for RX cpu to ACK the previous event. */
1725                 tg3_wait_for_event_ack(tp);
1726
1727                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1728
1729                 tg3_generate_fw_event(tp);
1730
1731                 /* Wait for RX cpu to ACK this event. */
1732                 tg3_wait_for_event_ack(tp);
1733         }
1734 }
1735
1736 /* tp->lock is held. */
1737 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1738 {
1739         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1740                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1741
1742         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1743                 switch (kind) {
1744                 case RESET_KIND_INIT:
1745                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1746                                       DRV_STATE_START);
1747                         break;
1748
1749                 case RESET_KIND_SHUTDOWN:
1750                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1751                                       DRV_STATE_UNLOAD);
1752                         break;
1753
1754                 case RESET_KIND_SUSPEND:
1755                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1756                                       DRV_STATE_SUSPEND);
1757                         break;
1758
1759                 default:
1760                         break;
1761                 }
1762         }
1763 }
1764
1765 /* tp->lock is held. */
1766 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1767 {
1768         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1769                 switch (kind) {
1770                 case RESET_KIND_INIT:
1771                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1772                                       DRV_STATE_START_DONE);
1773                         break;
1774
1775                 case RESET_KIND_SHUTDOWN:
1776                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1777                                       DRV_STATE_UNLOAD_DONE);
1778                         break;
1779
1780                 default:
1781                         break;
1782                 }
1783         }
1784 }
1785
1786 /* tp->lock is held. */
1787 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1788 {
1789         if (tg3_flag(tp, ENABLE_ASF)) {
1790                 switch (kind) {
1791                 case RESET_KIND_INIT:
1792                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1793                                       DRV_STATE_START);
1794                         break;
1795
1796                 case RESET_KIND_SHUTDOWN:
1797                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1798                                       DRV_STATE_UNLOAD);
1799                         break;
1800
1801                 case RESET_KIND_SUSPEND:
1802                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1803                                       DRV_STATE_SUSPEND);
1804                         break;
1805
1806                 default:
1807                         break;
1808                 }
1809         }
1810 }
1811
1812 static int tg3_poll_fw(struct tg3 *tp)
1813 {
1814         int i;
1815         u32 val;
1816
1817         if (tg3_flag(tp, NO_FWARE_REPORTED))
1818                 return 0;
1819
1820         if (tg3_flag(tp, IS_SSB_CORE)) {
1821                 /* We don't use firmware. */
1822                 return 0;
1823         }
1824
1825         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1826                 /* Wait up to 20ms for init done. */
1827                 for (i = 0; i < 200; i++) {
1828                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1829                                 return 0;
1830                         if (pci_channel_offline(tp->pdev))
1831                                 return -ENODEV;
1832
1833                         udelay(100);
1834                 }
1835                 return -ENODEV;
1836         }
1837
1838         /* Wait for firmware initialization to complete. */
1839         for (i = 0; i < 100000; i++) {
1840                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1841                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1842                         break;
1843                 if (pci_channel_offline(tp->pdev)) {
1844                         if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1845                                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1846                                 netdev_info(tp->dev, "No firmware running\n");
1847                         }
1848
1849                         break;
1850                 }
1851
1852                 udelay(10);
1853         }
1854
1855         /* Chip might not be fitted with firmware.  Some Sun onboard
1856          * parts are configured like that.  So don't signal the timeout
1857          * of the above loop as an error, but do report the lack of
1858          * running firmware once.
1859          */
1860         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1861                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1862
1863                 netdev_info(tp->dev, "No firmware running\n");
1864         }
1865
1866         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1867                 /* The 57765 A0 needs a little more
1868                  * time to do some important work.
1869                  */
1870                 mdelay(10);
1871         }
1872
1873         return 0;
1874 }
1875
1876 static void tg3_link_report(struct tg3 *tp)
1877 {
1878         if (!netif_carrier_ok(tp->dev)) {
1879                 netif_info(tp, link, tp->dev, "Link is down\n");
1880                 tg3_ump_link_report(tp);
1881         } else if (netif_msg_link(tp)) {
1882                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1883                             (tp->link_config.active_speed == SPEED_1000 ?
1884                              1000 :
1885                              (tp->link_config.active_speed == SPEED_100 ?
1886                               100 : 10)),
1887                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1888                              "full" : "half"));
1889
1890                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1891                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1892                             "on" : "off",
1893                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1894                             "on" : "off");
1895
1896                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1897                         netdev_info(tp->dev, "EEE is %s\n",
1898                                     tp->setlpicnt ? "enabled" : "disabled");
1899
1900                 tg3_ump_link_report(tp);
1901         }
1902
1903         tp->link_up = netif_carrier_ok(tp->dev);
1904 }
1905
1906 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1907 {
1908         u32 flowctrl = 0;
1909
1910         if (adv & ADVERTISE_PAUSE_CAP) {
1911                 flowctrl |= FLOW_CTRL_RX;
1912                 if (!(adv & ADVERTISE_PAUSE_ASYM))
1913                         flowctrl |= FLOW_CTRL_TX;
1914         } else if (adv & ADVERTISE_PAUSE_ASYM)
1915                 flowctrl |= FLOW_CTRL_TX;
1916
1917         return flowctrl;
1918 }
1919
1920 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1921 {
1922         u16 miireg;
1923
1924         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1925                 miireg = ADVERTISE_1000XPAUSE;
1926         else if (flow_ctrl & FLOW_CTRL_TX)
1927                 miireg = ADVERTISE_1000XPSE_ASYM;
1928         else if (flow_ctrl & FLOW_CTRL_RX)
1929                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1930         else
1931                 miireg = 0;
1932
1933         return miireg;
1934 }
1935
1936 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1937 {
1938         u32 flowctrl = 0;
1939
1940         if (adv & ADVERTISE_1000XPAUSE) {
1941                 flowctrl |= FLOW_CTRL_RX;
1942                 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1943                         flowctrl |= FLOW_CTRL_TX;
1944         } else if (adv & ADVERTISE_1000XPSE_ASYM)
1945                 flowctrl |= FLOW_CTRL_TX;
1946
1947         return flowctrl;
1948 }
1949
1950 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1951 {
1952         u8 cap = 0;
1953
1954         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1955                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1956         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1957                 if (lcladv & ADVERTISE_1000XPAUSE)
1958                         cap = FLOW_CTRL_RX;
1959                 if (rmtadv & ADVERTISE_1000XPAUSE)
1960                         cap = FLOW_CTRL_TX;
1961         }
1962
1963         return cap;
1964 }
1965
1966 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1967 {
1968         u8 autoneg;
1969         u8 flowctrl = 0;
1970         u32 old_rx_mode = tp->rx_mode;
1971         u32 old_tx_mode = tp->tx_mode;
1972
1973         if (tg3_flag(tp, USE_PHYLIB))
1974                 autoneg = tp->mdio_bus->phy_map[tp->phy_addr]->autoneg;
1975         else
1976                 autoneg = tp->link_config.autoneg;
1977
1978         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1979                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1980                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1981                 else
1982                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1983         } else
1984                 flowctrl = tp->link_config.flowctrl;
1985
1986         tp->link_config.active_flowctrl = flowctrl;
1987
1988         if (flowctrl & FLOW_CTRL_RX)
1989                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1990         else
1991                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1992
1993         if (old_rx_mode != tp->rx_mode)
1994                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1995
1996         if (flowctrl & FLOW_CTRL_TX)
1997                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1998         else
1999                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
2000
2001         if (old_tx_mode != tp->tx_mode)
2002                 tw32_f(MAC_TX_MODE, tp->tx_mode);
2003 }
2004
2005 static void tg3_adjust_link(struct net_device *dev)
2006 {
2007         u8 oldflowctrl, linkmesg = 0;
2008         u32 mac_mode, lcl_adv, rmt_adv;
2009         struct tg3 *tp = netdev_priv(dev);
2010         struct phy_device *phydev = tp->mdio_bus->phy_map[tp->phy_addr];
2011
2012         spin_lock_bh(&tp->lock);
2013
2014         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2015                                     MAC_MODE_HALF_DUPLEX);
2016
2017         oldflowctrl = tp->link_config.active_flowctrl;
2018
2019         if (phydev->link) {
2020                 lcl_adv = 0;
2021                 rmt_adv = 0;
2022
2023                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2024                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2025                 else if (phydev->speed == SPEED_1000 ||
2026                          tg3_asic_rev(tp) != ASIC_REV_5785)
2027                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
2028                 else
2029                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2030
2031                 if (phydev->duplex == DUPLEX_HALF)
2032                         mac_mode |= MAC_MODE_HALF_DUPLEX;
2033                 else {
2034                         lcl_adv = mii_advertise_flowctrl(
2035                                   tp->link_config.flowctrl);
2036
2037                         if (phydev->pause)
2038                                 rmt_adv = LPA_PAUSE_CAP;
2039                         if (phydev->asym_pause)
2040                                 rmt_adv |= LPA_PAUSE_ASYM;
2041                 }
2042
2043                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2044         } else
2045                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2046
2047         if (mac_mode != tp->mac_mode) {
2048                 tp->mac_mode = mac_mode;
2049                 tw32_f(MAC_MODE, tp->mac_mode);
2050                 udelay(40);
2051         }
2052
2053         if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2054                 if (phydev->speed == SPEED_10)
2055                         tw32(MAC_MI_STAT,
2056                              MAC_MI_STAT_10MBPS_MODE |
2057                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2058                 else
2059                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2060         }
2061
2062         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2063                 tw32(MAC_TX_LENGTHS,
2064                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2065                       (6 << TX_LENGTHS_IPG_SHIFT) |
2066                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2067         else
2068                 tw32(MAC_TX_LENGTHS,
2069                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2070                       (6 << TX_LENGTHS_IPG_SHIFT) |
2071                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2072
2073         if (phydev->link != tp->old_link ||
2074             phydev->speed != tp->link_config.active_speed ||
2075             phydev->duplex != tp->link_config.active_duplex ||
2076             oldflowctrl != tp->link_config.active_flowctrl)
2077                 linkmesg = 1;
2078
2079         tp->old_link = phydev->link;
2080         tp->link_config.active_speed = phydev->speed;
2081         tp->link_config.active_duplex = phydev->duplex;
2082
2083         spin_unlock_bh(&tp->lock);
2084
2085         if (linkmesg)
2086                 tg3_link_report(tp);
2087 }
2088
2089 static int tg3_phy_init(struct tg3 *tp)
2090 {
2091         struct phy_device *phydev;
2092
2093         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2094                 return 0;
2095
2096         /* Bring the PHY back to a known state. */
2097         tg3_bmcr_reset(tp);
2098
2099         phydev = tp->mdio_bus->phy_map[tp->phy_addr];
2100
2101         /* Attach the MAC to the PHY. */
2102         phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2103                              tg3_adjust_link, phydev->interface);
2104         if (IS_ERR(phydev)) {
2105                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2106                 return PTR_ERR(phydev);
2107         }
2108
2109         /* Mask with MAC supported features. */
2110         switch (phydev->interface) {
2111         case PHY_INTERFACE_MODE_GMII:
2112         case PHY_INTERFACE_MODE_RGMII:
2113                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2114                         phydev->supported &= (PHY_GBIT_FEATURES |
2115                                               SUPPORTED_Pause |
2116                                               SUPPORTED_Asym_Pause);
2117                         break;
2118                 }
2119                 /* fallthru */
2120         case PHY_INTERFACE_MODE_MII:
2121                 phydev->supported &= (PHY_BASIC_FEATURES |
2122                                       SUPPORTED_Pause |
2123                                       SUPPORTED_Asym_Pause);
2124                 break;
2125         default:
2126                 phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
2127                 return -EINVAL;
2128         }
2129
2130         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2131
2132         phydev->advertising = phydev->supported;
2133
2134         return 0;
2135 }
2136
2137 static void tg3_phy_start(struct tg3 *tp)
2138 {
2139         struct phy_device *phydev;
2140
2141         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2142                 return;
2143
2144         phydev = tp->mdio_bus->phy_map[tp->phy_addr];
2145
2146         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2147                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2148                 phydev->speed = tp->link_config.speed;
2149                 phydev->duplex = tp->link_config.duplex;
2150                 phydev->autoneg = tp->link_config.autoneg;
2151                 phydev->advertising = tp->link_config.advertising;
2152         }
2153
2154         phy_start(phydev);
2155
2156         phy_start_aneg(phydev);
2157 }
2158
2159 static void tg3_phy_stop(struct tg3 *tp)
2160 {
2161         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2162                 return;
2163
2164         phy_stop(tp->mdio_bus->phy_map[tp->phy_addr]);
2165 }
2166
2167 static void tg3_phy_fini(struct tg3 *tp)
2168 {
2169         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2170                 phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
2171                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2172         }
2173 }
2174
2175 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2176 {
2177         int err;
2178         u32 val;
2179
2180         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2181                 return 0;
2182
2183         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2184                 /* Cannot do read-modify-write on 5401 */
2185                 err = tg3_phy_auxctl_write(tp,
2186                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2187                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2188                                            0x4c20);
2189                 goto done;
2190         }
2191
2192         err = tg3_phy_auxctl_read(tp,
2193                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2194         if (err)
2195                 return err;
2196
2197         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2198         err = tg3_phy_auxctl_write(tp,
2199                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2200
2201 done:
2202         return err;
2203 }
2204
2205 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2206 {
2207         u32 phytest;
2208
2209         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2210                 u32 phy;
2211
2212                 tg3_writephy(tp, MII_TG3_FET_TEST,
2213                              phytest | MII_TG3_FET_SHADOW_EN);
2214                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2215                         if (enable)
2216                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2217                         else
2218                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2219                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2220                 }
2221                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2222         }
2223 }
2224
2225 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2226 {
2227         u32 reg;
2228
2229         if (!tg3_flag(tp, 5705_PLUS) ||
2230             (tg3_flag(tp, 5717_PLUS) &&
2231              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2232                 return;
2233
2234         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2235                 tg3_phy_fet_toggle_apd(tp, enable);
2236                 return;
2237         }
2238
2239         reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2240               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2241               MII_TG3_MISC_SHDW_SCR5_SDTL |
2242               MII_TG3_MISC_SHDW_SCR5_C125OE;
2243         if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2244                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2245
2246         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2247
2248
2249         reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2250         if (enable)
2251                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2252
2253         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2254 }
2255
2256 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2257 {
2258         u32 phy;
2259
2260         if (!tg3_flag(tp, 5705_PLUS) ||
2261             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2262                 return;
2263
2264         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2265                 u32 ephy;
2266
2267                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2268                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2269
2270                         tg3_writephy(tp, MII_TG3_FET_TEST,
2271                                      ephy | MII_TG3_FET_SHADOW_EN);
2272                         if (!tg3_readphy(tp, reg, &phy)) {
2273                                 if (enable)
2274                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2275                                 else
2276                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2277                                 tg3_writephy(tp, reg, phy);
2278                         }
2279                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2280                 }
2281         } else {
2282                 int ret;
2283
2284                 ret = tg3_phy_auxctl_read(tp,
2285                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2286                 if (!ret) {
2287                         if (enable)
2288                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2289                         else
2290                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2291                         tg3_phy_auxctl_write(tp,
2292                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2293                 }
2294         }
2295 }
2296
2297 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2298 {
2299         int ret;
2300         u32 val;
2301
2302         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2303                 return;
2304
2305         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2306         if (!ret)
2307                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2308                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2309 }
2310
2311 static void tg3_phy_apply_otp(struct tg3 *tp)
2312 {
2313         u32 otp, phy;
2314
2315         if (!tp->phy_otp)
2316                 return;
2317
2318         otp = tp->phy_otp;
2319
2320         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2321                 return;
2322
2323         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2324         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2325         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2326
2327         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2328               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2329         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2330
2331         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2332         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2333         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2334
2335         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2336         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2337
2338         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2339         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2340
2341         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2342               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2343         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2344
2345         tg3_phy_toggle_auxctl_smdsp(tp, false);
2346 }
2347
2348 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2349 {
2350         u32 val;
2351         struct ethtool_eee *dest = &tp->eee;
2352
2353         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2354                 return;
2355
2356         if (eee)
2357                 dest = eee;
2358
2359         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2360                 return;
2361
2362         /* Pull eee_active */
2363         if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2364             val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2365                 dest->eee_active = 1;
2366         } else
2367                 dest->eee_active = 0;
2368
2369         /* Pull lp advertised settings */
2370         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2371                 return;
2372         dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2373
2374         /* Pull advertised and eee_enabled settings */
2375         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2376                 return;
2377         dest->eee_enabled = !!val;
2378         dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2379
2380         /* Pull tx_lpi_enabled */
2381         val = tr32(TG3_CPMU_EEE_MODE);
2382         dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2383
2384         /* Pull lpi timer value */
2385         dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2386 }
2387
2388 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2389 {
2390         u32 val;
2391
2392         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2393                 return;
2394
2395         tp->setlpicnt = 0;
2396
2397         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2398             current_link_up &&
2399             tp->link_config.active_duplex == DUPLEX_FULL &&
2400             (tp->link_config.active_speed == SPEED_100 ||
2401              tp->link_config.active_speed == SPEED_1000)) {
2402                 u32 eeectl;
2403
2404                 if (tp->link_config.active_speed == SPEED_1000)
2405                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2406                 else
2407                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2408
2409                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2410
2411                 tg3_eee_pull_config(tp, NULL);
2412                 if (tp->eee.eee_active)
2413                         tp->setlpicnt = 2;
2414         }
2415
2416         if (!tp->setlpicnt) {
2417                 if (current_link_up &&
2418                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2419                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2420                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2421                 }
2422
2423                 val = tr32(TG3_CPMU_EEE_MODE);
2424                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2425         }
2426 }
2427
2428 static void tg3_phy_eee_enable(struct tg3 *tp)
2429 {
2430         u32 val;
2431
2432         if (tp->link_config.active_speed == SPEED_1000 &&
2433             (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2434              tg3_asic_rev(tp) == ASIC_REV_5719 ||
2435              tg3_flag(tp, 57765_CLASS)) &&
2436             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2437                 val = MII_TG3_DSP_TAP26_ALNOKO |
2438                       MII_TG3_DSP_TAP26_RMRXSTO;
2439                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2440                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2441         }
2442
2443         val = tr32(TG3_CPMU_EEE_MODE);
2444         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2445 }
2446
2447 static int tg3_wait_macro_done(struct tg3 *tp)
2448 {
2449         int limit = 100;
2450
2451         while (limit--) {
2452                 u32 tmp32;
2453
2454                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2455                         if ((tmp32 & 0x1000) == 0)
2456                                 break;
2457                 }
2458         }
2459         if (limit < 0)
2460                 return -EBUSY;
2461
2462         return 0;
2463 }
2464
2465 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2466 {
2467         static const u32 test_pat[4][6] = {
2468         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2469         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2470         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2471         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2472         };
2473         int chan;
2474
2475         for (chan = 0; chan < 4; chan++) {
2476                 int i;
2477
2478                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2479                              (chan * 0x2000) | 0x0200);
2480                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2481
2482                 for (i = 0; i < 6; i++)
2483                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2484                                      test_pat[chan][i]);
2485
2486                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2487                 if (tg3_wait_macro_done(tp)) {
2488                         *resetp = 1;
2489                         return -EBUSY;
2490                 }
2491
2492                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2493                              (chan * 0x2000) | 0x0200);
2494                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2495                 if (tg3_wait_macro_done(tp)) {
2496                         *resetp = 1;
2497                         return -EBUSY;
2498                 }
2499
2500                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2501                 if (tg3_wait_macro_done(tp)) {
2502                         *resetp = 1;
2503                         return -EBUSY;
2504                 }
2505
2506                 for (i = 0; i < 6; i += 2) {
2507                         u32 low, high;
2508
2509                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2510                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2511                             tg3_wait_macro_done(tp)) {
2512                                 *resetp = 1;
2513                                 return -EBUSY;
2514                         }
2515                         low &= 0x7fff;
2516                         high &= 0x000f;
2517                         if (low != test_pat[chan][i] ||
2518                             high != test_pat[chan][i+1]) {
2519                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2520                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2521                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2522
2523                                 return -EBUSY;
2524                         }
2525                 }
2526         }
2527
2528         return 0;
2529 }
2530
2531 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2532 {
2533         int chan;
2534
2535         for (chan = 0; chan < 4; chan++) {
2536                 int i;
2537
2538                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2539                              (chan * 0x2000) | 0x0200);
2540                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2541                 for (i = 0; i < 6; i++)
2542                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2543                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2544                 if (tg3_wait_macro_done(tp))
2545                         return -EBUSY;
2546         }
2547
2548         return 0;
2549 }
2550
2551 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2552 {
2553         u32 reg32, phy9_orig;
2554         int retries, do_phy_reset, err;
2555
2556         retries = 10;
2557         do_phy_reset = 1;
2558         do {
2559                 if (do_phy_reset) {
2560                         err = tg3_bmcr_reset(tp);
2561                         if (err)
2562                                 return err;
2563                         do_phy_reset = 0;
2564                 }
2565
2566                 /* Disable transmitter and interrupt.  */
2567                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2568                         continue;
2569
2570                 reg32 |= 0x3000;
2571                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2572
2573                 /* Set full-duplex, 1000 mbps.  */
2574                 tg3_writephy(tp, MII_BMCR,
2575                              BMCR_FULLDPLX | BMCR_SPEED1000);
2576
2577                 /* Set to master mode.  */
2578                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2579                         continue;
2580
2581                 tg3_writephy(tp, MII_CTRL1000,
2582                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2583
2584                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2585                 if (err)
2586                         return err;
2587
2588                 /* Block the PHY control access.  */
2589                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2590
2591                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2592                 if (!err)
2593                         break;
2594         } while (--retries);
2595
2596         err = tg3_phy_reset_chanpat(tp);
2597         if (err)
2598                 return err;
2599
2600         tg3_phydsp_write(tp, 0x8005, 0x0000);
2601
2602         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2603         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2604
2605         tg3_phy_toggle_auxctl_smdsp(tp, false);
2606
2607         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2608
2609         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2610                 reg32 &= ~0x3000;
2611                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2612         } else if (!err)
2613                 err = -EBUSY;
2614
2615         return err;
2616 }
2617
2618 static void tg3_carrier_off(struct tg3 *tp)
2619 {
2620         netif_carrier_off(tp->dev);
2621         tp->link_up = false;
2622 }
2623
2624 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2625 {
2626         if (tg3_flag(tp, ENABLE_ASF))
2627                 netdev_warn(tp->dev,
2628                             "Management side-band traffic will be interrupted during phy settings change\n");
2629 }
2630
2631 /* This will reset the tigon3 PHY if there is no valid
2632  * link unless the FORCE argument is non-zero.
2633  */
2634 static int tg3_phy_reset(struct tg3 *tp)
2635 {
2636         u32 val, cpmuctrl;
2637         int err;
2638
2639         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2640                 val = tr32(GRC_MISC_CFG);
2641                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2642                 udelay(40);
2643         }
2644         err  = tg3_readphy(tp, MII_BMSR, &val);
2645         err |= tg3_readphy(tp, MII_BMSR, &val);
2646         if (err != 0)
2647                 return -EBUSY;
2648
2649         if (netif_running(tp->dev) && tp->link_up) {
2650                 netif_carrier_off(tp->dev);
2651                 tg3_link_report(tp);
2652         }
2653
2654         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2655             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2656             tg3_asic_rev(tp) == ASIC_REV_5705) {
2657                 err = tg3_phy_reset_5703_4_5(tp);
2658                 if (err)
2659                         return err;
2660                 goto out;
2661         }
2662
2663         cpmuctrl = 0;
2664         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2665             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2666                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2667                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2668                         tw32(TG3_CPMU_CTRL,
2669                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2670         }
2671
2672         err = tg3_bmcr_reset(tp);
2673         if (err)
2674                 return err;
2675
2676         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2677                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2678                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2679
2680                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2681         }
2682
2683         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2684             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2685                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2686                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2687                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2688                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2689                         udelay(40);
2690                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2691                 }
2692         }
2693
2694         if (tg3_flag(tp, 5717_PLUS) &&
2695             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2696                 return 0;
2697
2698         tg3_phy_apply_otp(tp);
2699
2700         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2701                 tg3_phy_toggle_apd(tp, true);
2702         else
2703                 tg3_phy_toggle_apd(tp, false);
2704
2705 out:
2706         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2707             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2708                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2709                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2710                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2711         }
2712
2713         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2714                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2715                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2716         }
2717
2718         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2719                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2720                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2721                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2722                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2723                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2724                 }
2725         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2726                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2727                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2728                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2729                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2730                                 tg3_writephy(tp, MII_TG3_TEST1,
2731                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2732                         } else
2733                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2734
2735                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2736                 }
2737         }
2738
2739         /* Set Extended packet length bit (bit 14) on all chips that */
2740         /* support jumbo frames */
2741         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2742                 /* Cannot do read-modify-write on 5401 */
2743                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2744         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2745                 /* Set bit 14 with read-modify-write to preserve other bits */
2746                 err = tg3_phy_auxctl_read(tp,
2747                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2748                 if (!err)
2749                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2750                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2751         }
2752
2753         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2754          * jumbo frames transmission.
2755          */
2756         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2757                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2758                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2759                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2760         }
2761
2762         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2763                 /* adjust output voltage */
2764                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2765         }
2766
2767         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2768                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2769
2770         tg3_phy_toggle_automdix(tp, true);
2771         tg3_phy_set_wirespeed(tp);
2772         return 0;
2773 }
2774
2775 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2776 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2777 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2778                                           TG3_GPIO_MSG_NEED_VAUX)
2779 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2780         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2781          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2782          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2783          (TG3_GPIO_MSG_DRVR_PRES << 12))
2784
2785 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2786         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2787          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2788          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2789          (TG3_GPIO_MSG_NEED_VAUX << 12))
2790
2791 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2792 {
2793         u32 status, shift;
2794
2795         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2796             tg3_asic_rev(tp) == ASIC_REV_5719)
2797                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2798         else
2799                 status = tr32(TG3_CPMU_DRV_STATUS);
2800
2801         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2802         status &= ~(TG3_GPIO_MSG_MASK << shift);
2803         status |= (newstat << shift);
2804
2805         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2806             tg3_asic_rev(tp) == ASIC_REV_5719)
2807                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2808         else
2809                 tw32(TG3_CPMU_DRV_STATUS, status);
2810
2811         return status >> TG3_APE_GPIO_MSG_SHIFT;
2812 }
2813
2814 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2815 {
2816         if (!tg3_flag(tp, IS_NIC))
2817                 return 0;
2818
2819         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2820             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2821             tg3_asic_rev(tp) == ASIC_REV_5720) {
2822                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2823                         return -EIO;
2824
2825                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2826
2827                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2828                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2829
2830                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2831         } else {
2832                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2833                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2834         }
2835
2836         return 0;
2837 }
2838
2839 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2840 {
2841         u32 grc_local_ctrl;
2842
2843         if (!tg3_flag(tp, IS_NIC) ||
2844             tg3_asic_rev(tp) == ASIC_REV_5700 ||
2845             tg3_asic_rev(tp) == ASIC_REV_5701)
2846                 return;
2847
2848         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2849
2850         tw32_wait_f(GRC_LOCAL_CTRL,
2851                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2852                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2853
2854         tw32_wait_f(GRC_LOCAL_CTRL,
2855                     grc_local_ctrl,
2856                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2857
2858         tw32_wait_f(GRC_LOCAL_CTRL,
2859                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2860                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2861 }
2862
2863 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2864 {
2865         if (!tg3_flag(tp, IS_NIC))
2866                 return;
2867
2868         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2869             tg3_asic_rev(tp) == ASIC_REV_5701) {
2870                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2871                             (GRC_LCLCTRL_GPIO_OE0 |
2872                              GRC_LCLCTRL_GPIO_OE1 |
2873                              GRC_LCLCTRL_GPIO_OE2 |
2874                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2875                              GRC_LCLCTRL_GPIO_OUTPUT1),
2876                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2877         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2878                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2879                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2880                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2881                                      GRC_LCLCTRL_GPIO_OE1 |
2882                                      GRC_LCLCTRL_GPIO_OE2 |
2883                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2884                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2885                                      tp->grc_local_ctrl;
2886                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2887                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2888
2889                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2890                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2891                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2892
2893                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2894                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2895                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2896         } else {
2897                 u32 no_gpio2;
2898                 u32 grc_local_ctrl = 0;
2899
2900                 /* Workaround to prevent overdrawing Amps. */
2901                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2902                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2903                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2904                                     grc_local_ctrl,
2905                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2906                 }
2907
2908                 /* On 5753 and variants, GPIO2 cannot be used. */
2909                 no_gpio2 = tp->nic_sram_data_cfg &
2910                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2911
2912                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2913                                   GRC_LCLCTRL_GPIO_OE1 |
2914                                   GRC_LCLCTRL_GPIO_OE2 |
2915                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2916                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2917                 if (no_gpio2) {
2918                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2919                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2920                 }
2921                 tw32_wait_f(GRC_LOCAL_CTRL,
2922                             tp->grc_local_ctrl | grc_local_ctrl,
2923                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2924
2925                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2926
2927                 tw32_wait_f(GRC_LOCAL_CTRL,
2928                             tp->grc_local_ctrl | grc_local_ctrl,
2929                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2930
2931                 if (!no_gpio2) {
2932                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2933                         tw32_wait_f(GRC_LOCAL_CTRL,
2934                                     tp->grc_local_ctrl | grc_local_ctrl,
2935                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2936                 }
2937         }
2938 }
2939
2940 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2941 {
2942         u32 msg = 0;
2943
2944         /* Serialize power state transitions */
2945         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2946                 return;
2947
2948         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2949                 msg = TG3_GPIO_MSG_NEED_VAUX;
2950
2951         msg = tg3_set_function_status(tp, msg);
2952
2953         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2954                 goto done;
2955
2956         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2957                 tg3_pwrsrc_switch_to_vaux(tp);
2958         else
2959                 tg3_pwrsrc_die_with_vmain(tp);
2960
2961 done:
2962         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2963 }
2964
2965 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2966 {
2967         bool need_vaux = false;
2968
2969         /* The GPIOs do something completely different on 57765. */
2970         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2971                 return;
2972
2973         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2974             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2975             tg3_asic_rev(tp) == ASIC_REV_5720) {
2976                 tg3_frob_aux_power_5717(tp, include_wol ?
2977                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2978                 return;
2979         }
2980
2981         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2982                 struct net_device *dev_peer;
2983
2984                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2985
2986                 /* remove_one() may have been run on the peer. */
2987                 if (dev_peer) {
2988                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2989
2990                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2991                                 return;
2992
2993                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2994                             tg3_flag(tp_peer, ENABLE_ASF))
2995                                 need_vaux = true;
2996                 }
2997         }
2998
2999         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
3000             tg3_flag(tp, ENABLE_ASF))
3001                 need_vaux = true;
3002
3003         if (need_vaux)
3004                 tg3_pwrsrc_switch_to_vaux(tp);
3005         else
3006                 tg3_pwrsrc_die_with_vmain(tp);
3007 }
3008
3009 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3010 {
3011         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3012                 return 1;
3013         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3014                 if (speed != SPEED_10)
3015                         return 1;
3016         } else if (speed == SPEED_10)
3017                 return 1;
3018
3019         return 0;
3020 }
3021
3022 static bool tg3_phy_power_bug(struct tg3 *tp)
3023 {
3024         switch (tg3_asic_rev(tp)) {
3025         case ASIC_REV_5700:
3026         case ASIC_REV_5704:
3027                 return true;
3028         case ASIC_REV_5780:
3029                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3030                         return true;
3031                 return false;
3032         case ASIC_REV_5717:
3033                 if (!tp->pci_fn)
3034                         return true;
3035                 return false;
3036         case ASIC_REV_5719:
3037         case ASIC_REV_5720:
3038                 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3039                     !tp->pci_fn)
3040                         return true;
3041                 return false;
3042         }
3043
3044         return false;
3045 }
3046
3047 static bool tg3_phy_led_bug(struct tg3 *tp)
3048 {
3049         switch (tg3_asic_rev(tp)) {
3050         case ASIC_REV_5719:
3051         case ASIC_REV_5720:
3052                 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3053                     !tp->pci_fn)
3054                         return true;
3055                 return false;
3056         }
3057
3058         return false;
3059 }
3060
3061 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3062 {
3063         u32 val;
3064
3065         if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3066                 return;
3067
3068         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3069                 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3070                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3071                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3072
3073                         sg_dig_ctrl |=
3074                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3075                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
3076                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3077                 }
3078                 return;
3079         }
3080
3081         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3082                 tg3_bmcr_reset(tp);
3083                 val = tr32(GRC_MISC_CFG);
3084                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3085                 udelay(40);
3086                 return;
3087         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3088                 u32 phytest;
3089                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3090                         u32 phy;
3091
3092                         tg3_writephy(tp, MII_ADVERTISE, 0);
3093                         tg3_writephy(tp, MII_BMCR,
3094                                      BMCR_ANENABLE | BMCR_ANRESTART);
3095
3096                         tg3_writephy(tp, MII_TG3_FET_TEST,
3097                                      phytest | MII_TG3_FET_SHADOW_EN);
3098                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3099                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3100                                 tg3_writephy(tp,
3101                                              MII_TG3_FET_SHDW_AUXMODE4,
3102                                              phy);
3103                         }
3104                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3105                 }
3106                 return;
3107         } else if (do_low_power) {
3108                 if (!tg3_phy_led_bug(tp))
3109                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3110                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3111
3112                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3113                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3114                       MII_TG3_AUXCTL_PCTL_VREG_11V;
3115                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3116         }
3117
3118         /* The PHY should not be powered down on some chips because
3119          * of bugs.
3120          */
3121         if (tg3_phy_power_bug(tp))
3122                 return;
3123
3124         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3125             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3126                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3127                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3128                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3129                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3130         }
3131
3132         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3133 }
3134
3135 /* tp->lock is held. */
3136 static int tg3_nvram_lock(struct tg3 *tp)
3137 {
3138         if (tg3_flag(tp, NVRAM)) {
3139                 int i;
3140
3141                 if (tp->nvram_lock_cnt == 0) {
3142                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3143                         for (i = 0; i < 8000; i++) {
3144                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3145                                         break;
3146                                 udelay(20);
3147                         }
3148                         if (i == 8000) {
3149                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3150                                 return -ENODEV;
3151                         }
3152                 }
3153                 tp->nvram_lock_cnt++;
3154         }
3155         return 0;
3156 }
3157
3158 /* tp->lock is held. */
3159 static void tg3_nvram_unlock(struct tg3 *tp)
3160 {
3161         if (tg3_flag(tp, NVRAM)) {
3162                 if (tp->nvram_lock_cnt > 0)
3163                         tp->nvram_lock_cnt--;
3164                 if (tp->nvram_lock_cnt == 0)
3165                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3166         }
3167 }
3168
3169 /* tp->lock is held. */
3170 static void tg3_enable_nvram_access(struct tg3 *tp)
3171 {
3172         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3173                 u32 nvaccess = tr32(NVRAM_ACCESS);
3174
3175                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3176         }
3177 }
3178
3179 /* tp->lock is held. */
3180 static void tg3_disable_nvram_access(struct tg3 *tp)
3181 {
3182         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3183                 u32 nvaccess = tr32(NVRAM_ACCESS);
3184
3185                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3186         }
3187 }
3188
3189 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3190                                         u32 offset, u32 *val)
3191 {
3192         u32 tmp;
3193         int i;
3194
3195         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3196                 return -EINVAL;
3197
3198         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3199                                         EEPROM_ADDR_DEVID_MASK |
3200                                         EEPROM_ADDR_READ);
3201         tw32(GRC_EEPROM_ADDR,
3202              tmp |
3203              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3204              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3205               EEPROM_ADDR_ADDR_MASK) |
3206              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3207
3208         for (i = 0; i < 1000; i++) {
3209                 tmp = tr32(GRC_EEPROM_ADDR);
3210
3211                 if (tmp & EEPROM_ADDR_COMPLETE)
3212                         break;
3213                 msleep(1);
3214         }
3215         if (!(tmp & EEPROM_ADDR_COMPLETE))
3216                 return -EBUSY;
3217
3218         tmp = tr32(GRC_EEPROM_DATA);
3219
3220         /*
3221          * The data will always be opposite the native endian
3222          * format.  Perform a blind byteswap to compensate.
3223          */
3224         *val = swab32(tmp);
3225
3226         return 0;
3227 }
3228
3229 #define NVRAM_CMD_TIMEOUT 10000
3230
3231 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3232 {
3233         int i;
3234
3235         tw32(NVRAM_CMD, nvram_cmd);
3236         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3237                 udelay(10);
3238                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3239                         udelay(10);
3240                         break;
3241                 }
3242         }
3243
3244         if (i == NVRAM_CMD_TIMEOUT)
3245                 return -EBUSY;
3246
3247         return 0;
3248 }
3249
3250 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3251 {
3252         if (tg3_flag(tp, NVRAM) &&
3253             tg3_flag(tp, NVRAM_BUFFERED) &&
3254             tg3_flag(tp, FLASH) &&
3255             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3256             (tp->nvram_jedecnum == JEDEC_ATMEL))
3257
3258                 addr = ((addr / tp->nvram_pagesize) <<
3259                         ATMEL_AT45DB0X1B_PAGE_POS) +
3260                        (addr % tp->nvram_pagesize);
3261
3262         return addr;
3263 }
3264
3265 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3266 {
3267         if (tg3_flag(tp, NVRAM) &&
3268             tg3_flag(tp, NVRAM_BUFFERED) &&
3269             tg3_flag(tp, FLASH) &&
3270             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3271             (tp->nvram_jedecnum == JEDEC_ATMEL))
3272
3273                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3274                         tp->nvram_pagesize) +
3275                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3276
3277         return addr;
3278 }
3279
3280 /* NOTE: Data read in from NVRAM is byteswapped according to
3281  * the byteswapping settings for all other register accesses.
3282  * tg3 devices are BE devices, so on a BE machine, the data
3283  * returned will be exactly as it is seen in NVRAM.  On a LE
3284  * machine, the 32-bit value will be byteswapped.
3285  */
3286 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3287 {
3288         int ret;
3289
3290         if (!tg3_flag(tp, NVRAM))
3291                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3292
3293         offset = tg3_nvram_phys_addr(tp, offset);
3294
3295         if (offset > NVRAM_ADDR_MSK)
3296                 return -EINVAL;
3297
3298         ret = tg3_nvram_lock(tp);
3299         if (ret)
3300                 return ret;
3301
3302         tg3_enable_nvram_access(tp);
3303
3304         tw32(NVRAM_ADDR, offset);
3305         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3306                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3307
3308         if (ret == 0)
3309                 *val = tr32(NVRAM_RDDATA);
3310
3311         tg3_disable_nvram_access(tp);
3312
3313         tg3_nvram_unlock(tp);
3314
3315         return ret;
3316 }
3317
3318 /* Ensures NVRAM data is in bytestream format. */
3319 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3320 {
3321         u32 v;
3322         int res = tg3_nvram_read(tp, offset, &v);
3323         if (!res)
3324                 *val = cpu_to_be32(v);
3325         return res;
3326 }
3327
3328 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3329                                     u32 offset, u32 len, u8 *buf)
3330 {
3331         int i, j, rc = 0;
3332         u32 val;
3333
3334         for (i = 0; i < len; i += 4) {
3335                 u32 addr;
3336                 __be32 data;
3337
3338                 addr = offset + i;
3339
3340                 memcpy(&data, buf + i, 4);
3341
3342                 /*
3343                  * The SEEPROM interface expects the data to always be opposite
3344                  * the native endian format.  We accomplish this by reversing
3345                  * all the operations that would have been performed on the
3346                  * data from a call to tg3_nvram_read_be32().
3347                  */
3348                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3349
3350                 val = tr32(GRC_EEPROM_ADDR);
3351                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3352
3353                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3354                         EEPROM_ADDR_READ);
3355                 tw32(GRC_EEPROM_ADDR, val |
3356                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3357                         (addr & EEPROM_ADDR_ADDR_MASK) |
3358                         EEPROM_ADDR_START |
3359                         EEPROM_ADDR_WRITE);
3360
3361                 for (j = 0; j < 1000; j++) {
3362                         val = tr32(GRC_EEPROM_ADDR);
3363
3364                         if (val & EEPROM_ADDR_COMPLETE)
3365                                 break;
3366                         msleep(1);
3367                 }
3368                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3369                         rc = -EBUSY;
3370                         break;
3371                 }
3372         }
3373
3374         return rc;
3375 }
3376
3377 /* offset and length are dword aligned */
3378 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3379                 u8 *buf)
3380 {
3381         int ret = 0;
3382         u32 pagesize = tp->nvram_pagesize;
3383         u32 pagemask = pagesize - 1;
3384         u32 nvram_cmd;
3385         u8 *tmp;
3386
3387         tmp = kmalloc(pagesize, GFP_KERNEL);
3388         if (tmp == NULL)
3389                 return -ENOMEM;
3390
3391         while (len) {
3392                 int j;
3393                 u32 phy_addr, page_off, size;
3394
3395                 phy_addr = offset & ~pagemask;
3396
3397                 for (j = 0; j < pagesize; j += 4) {
3398                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3399                                                   (__be32 *) (tmp + j));
3400                         if (ret)
3401                                 break;
3402                 }
3403                 if (ret)
3404                         break;
3405
3406                 page_off = offset & pagemask;
3407                 size = pagesize;
3408                 if (len < size)
3409                         size = len;
3410
3411                 len -= size;
3412
3413                 memcpy(tmp + page_off, buf, size);
3414
3415                 offset = offset + (pagesize - page_off);
3416
3417                 tg3_enable_nvram_access(tp);
3418
3419                 /*
3420                  * Before we can erase the flash page, we need
3421                  * to issue a special "write enable" command.
3422                  */
3423                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3424
3425                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3426                         break;
3427
3428                 /* Erase the target page */
3429                 tw32(NVRAM_ADDR, phy_addr);
3430
3431                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3432                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3433
3434                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3435                         break;
3436
3437                 /* Issue another write enable to start the write. */
3438                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3439
3440                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3441                         break;
3442
3443                 for (j = 0; j < pagesize; j += 4) {
3444                         __be32 data;
3445
3446                         data = *((__be32 *) (tmp + j));
3447
3448                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3449
3450                         tw32(NVRAM_ADDR, phy_addr + j);
3451
3452                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3453                                 NVRAM_CMD_WR;
3454
3455                         if (j == 0)
3456                                 nvram_cmd |= NVRAM_CMD_FIRST;
3457                         else if (j == (pagesize - 4))
3458                                 nvram_cmd |= NVRAM_CMD_LAST;
3459
3460                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3461                         if (ret)
3462                                 break;
3463                 }
3464                 if (ret)
3465                         break;
3466         }
3467
3468         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3469         tg3_nvram_exec_cmd(tp, nvram_cmd);
3470
3471         kfree(tmp);
3472
3473         return ret;
3474 }
3475
3476 /* offset and length are dword aligned */
3477 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3478                 u8 *buf)
3479 {
3480         int i, ret = 0;
3481
3482         for (i = 0; i < len; i += 4, offset += 4) {
3483                 u32 page_off, phy_addr, nvram_cmd;
3484                 __be32 data;
3485
3486                 memcpy(&data, buf + i, 4);
3487                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3488
3489                 page_off = offset % tp->nvram_pagesize;
3490
3491                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3492
3493                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3494
3495                 if (page_off == 0 || i == 0)
3496                         nvram_cmd |= NVRAM_CMD_FIRST;
3497                 if (page_off == (tp->nvram_pagesize - 4))
3498                         nvram_cmd |= NVRAM_CMD_LAST;
3499
3500                 if (i == (len - 4))
3501                         nvram_cmd |= NVRAM_CMD_LAST;
3502
3503                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3504                     !tg3_flag(tp, FLASH) ||
3505                     !tg3_flag(tp, 57765_PLUS))
3506                         tw32(NVRAM_ADDR, phy_addr);
3507
3508                 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3509                     !tg3_flag(tp, 5755_PLUS) &&
3510                     (tp->nvram_jedecnum == JEDEC_ST) &&
3511                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3512                         u32 cmd;
3513
3514                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3515                         ret = tg3_nvram_exec_cmd(tp, cmd);
3516                         if (ret)
3517                                 break;
3518                 }
3519                 if (!tg3_flag(tp, FLASH)) {
3520                         /* We always do complete word writes to eeprom. */
3521                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3522                 }
3523
3524                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3525                 if (ret)
3526                         break;
3527         }
3528         return ret;
3529 }
3530
3531 /* offset and length are dword aligned */
3532 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3533 {
3534         int ret;
3535
3536         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3537                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3538                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3539                 udelay(40);
3540         }
3541
3542         if (!tg3_flag(tp, NVRAM)) {
3543                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3544         } else {
3545                 u32 grc_mode;
3546
3547                 ret = tg3_nvram_lock(tp);
3548                 if (ret)
3549                         return ret;
3550
3551                 tg3_enable_nvram_access(tp);
3552                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3553                         tw32(NVRAM_WRITE1, 0x406);
3554
3555                 grc_mode = tr32(GRC_MODE);
3556                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3557
3558                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3559                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3560                                 buf);
3561                 } else {
3562                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3563                                 buf);
3564                 }
3565
3566                 grc_mode = tr32(GRC_MODE);
3567                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3568
3569                 tg3_disable_nvram_access(tp);
3570                 tg3_nvram_unlock(tp);
3571         }
3572
3573         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3574                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3575                 udelay(40);
3576         }
3577
3578         return ret;
3579 }
3580
3581 #define RX_CPU_SCRATCH_BASE     0x30000
3582 #define RX_CPU_SCRATCH_SIZE     0x04000
3583 #define TX_CPU_SCRATCH_BASE     0x34000
3584 #define TX_CPU_SCRATCH_SIZE     0x04000
3585
3586 /* tp->lock is held. */
3587 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3588 {
3589         int i;
3590         const int iters = 10000;
3591
3592         for (i = 0; i < iters; i++) {
3593                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3594                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3595                 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3596                         break;
3597                 if (pci_channel_offline(tp->pdev))
3598                         return -EBUSY;
3599         }
3600
3601         return (i == iters) ? -EBUSY : 0;
3602 }
3603
3604 /* tp->lock is held. */
3605 static int tg3_rxcpu_pause(struct tg3 *tp)
3606 {
3607         int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3608
3609         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3610         tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3611         udelay(10);
3612
3613         return rc;
3614 }
3615
3616 /* tp->lock is held. */
3617 static int tg3_txcpu_pause(struct tg3 *tp)
3618 {
3619         return tg3_pause_cpu(tp, TX_CPU_BASE);
3620 }
3621
3622 /* tp->lock is held. */
3623 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3624 {
3625         tw32(cpu_base + CPU_STATE, 0xffffffff);
3626         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3627 }
3628
3629 /* tp->lock is held. */
3630 static void tg3_rxcpu_resume(struct tg3 *tp)
3631 {
3632         tg3_resume_cpu(tp, RX_CPU_BASE);
3633 }
3634
3635 /* tp->lock is held. */
3636 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3637 {
3638         int rc;
3639
3640         BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3641
3642         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3643                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3644
3645                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3646                 return 0;
3647         }
3648         if (cpu_base == RX_CPU_BASE) {
3649                 rc = tg3_rxcpu_pause(tp);
3650         } else {
3651                 /*
3652                  * There is only an Rx CPU for the 5750 derivative in the
3653                  * BCM4785.
3654                  */
3655                 if (tg3_flag(tp, IS_SSB_CORE))
3656                         return 0;
3657
3658                 rc = tg3_txcpu_pause(tp);
3659         }
3660
3661         if (rc) {
3662                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3663                            __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3664                 return -ENODEV;
3665         }
3666
3667         /* Clear firmware's nvram arbitration. */
3668         if (tg3_flag(tp, NVRAM))
3669                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3670         return 0;
3671 }
3672
3673 static int tg3_fw_data_len(struct tg3 *tp,
3674                            const struct tg3_firmware_hdr *fw_hdr)
3675 {
3676         int fw_len;
3677
3678         /* Non fragmented firmware have one firmware header followed by a
3679          * contiguous chunk of data to be written. The length field in that
3680          * header is not the length of data to be written but the complete
3681          * length of the bss. The data length is determined based on
3682          * tp->fw->size minus headers.
3683          *
3684          * Fragmented firmware have a main header followed by multiple
3685          * fragments. Each fragment is identical to non fragmented firmware
3686          * with a firmware header followed by a contiguous chunk of data. In
3687          * the main header, the length field is unused and set to 0xffffffff.
3688          * In each fragment header the length is the entire size of that
3689          * fragment i.e. fragment data + header length. Data length is
3690          * therefore length field in the header minus TG3_FW_HDR_LEN.
3691          */
3692         if (tp->fw_len == 0xffffffff)
3693                 fw_len = be32_to_cpu(fw_hdr->len);
3694         else
3695                 fw_len = tp->fw->size;
3696
3697         return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3698 }
3699
3700 /* tp->lock is held. */
3701 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3702                                  u32 cpu_scratch_base, int cpu_scratch_size,
3703                                  const struct tg3_firmware_hdr *fw_hdr)
3704 {
3705         int err, i;
3706         void (*write_op)(struct tg3 *, u32, u32);
3707         int total_len = tp->fw->size;
3708
3709         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3710                 netdev_err(tp->dev,
3711                            "%s: Trying to load TX cpu firmware which is 5705\n",
3712                            __func__);
3713                 return -EINVAL;
3714         }
3715
3716         if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3717                 write_op = tg3_write_mem;
3718         else
3719                 write_op = tg3_write_indirect_reg32;
3720
3721         if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3722                 /* It is possible that bootcode is still loading at this point.
3723                  * Get the nvram lock first before halting the cpu.
3724                  */
3725                 int lock_err = tg3_nvram_lock(tp);
3726                 err = tg3_halt_cpu(tp, cpu_base);
3727                 if (!lock_err)
3728                         tg3_nvram_unlock(tp);
3729                 if (err)
3730                         goto out;
3731
3732                 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3733                         write_op(tp, cpu_scratch_base + i, 0);
3734                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3735                 tw32(cpu_base + CPU_MODE,
3736                      tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3737         } else {
3738                 /* Subtract additional main header for fragmented firmware and
3739                  * advance to the first fragment
3740                  */
3741                 total_len -= TG3_FW_HDR_LEN;
3742                 fw_hdr++;
3743         }
3744
3745         do {
3746                 u32 *fw_data = (u32 *)(fw_hdr + 1);
3747                 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3748                         write_op(tp, cpu_scratch_base +
3749                                      (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3750                                      (i * sizeof(u32)),
3751                                  be32_to_cpu(fw_data[i]));
3752
3753                 total_len -= be32_to_cpu(fw_hdr->len);
3754
3755                 /* Advance to next fragment */
3756                 fw_hdr = (struct tg3_firmware_hdr *)
3757                          ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3758         } while (total_len > 0);
3759
3760         err = 0;
3761
3762 out:
3763         return err;
3764 }
3765
3766 /* tp->lock is held. */
3767 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3768 {
3769         int i;
3770         const int iters = 5;
3771
3772         tw32(cpu_base + CPU_STATE, 0xffffffff);
3773         tw32_f(cpu_base + CPU_PC, pc);
3774
3775         for (i = 0; i < iters; i++) {
3776                 if (tr32(cpu_base + CPU_PC) == pc)
3777                         break;
3778                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3779                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3780                 tw32_f(cpu_base + CPU_PC, pc);
3781                 udelay(1000);
3782         }
3783
3784         return (i == iters) ? -EBUSY : 0;
3785 }
3786
3787 /* tp->lock is held. */
3788 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3789 {
3790         const struct tg3_firmware_hdr *fw_hdr;
3791         int err;
3792
3793         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3794
3795         /* Firmware blob starts with version numbers, followed by
3796            start address and length. We are setting complete length.
3797            length = end_address_of_bss - start_address_of_text.
3798            Remainder is the blob to be loaded contiguously
3799            from start address. */
3800
3801         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3802                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3803                                     fw_hdr);
3804         if (err)
3805                 return err;
3806
3807         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3808                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3809                                     fw_hdr);
3810         if (err)
3811                 return err;
3812
3813         /* Now startup only the RX cpu. */
3814         err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3815                                        be32_to_cpu(fw_hdr->base_addr));
3816         if (err) {
3817                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3818                            "should be %08x\n", __func__,
3819                            tr32(RX_CPU_BASE + CPU_PC),
3820                                 be32_to_cpu(fw_hdr->base_addr));
3821                 return -ENODEV;
3822         }
3823
3824         tg3_rxcpu_resume(tp);
3825
3826         return 0;
3827 }
3828
3829 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3830 {
3831         const int iters = 1000;
3832         int i;
3833         u32 val;
3834
3835         /* Wait for boot code to complete initialization and enter service
3836          * loop. It is then safe to download service patches
3837          */
3838         for (i = 0; i < iters; i++) {
3839                 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3840                         break;
3841
3842                 udelay(10);
3843         }
3844
3845         if (i == iters) {
3846                 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3847                 return -EBUSY;
3848         }
3849
3850         val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3851         if (val & 0xff) {
3852                 netdev_warn(tp->dev,
3853                             "Other patches exist. Not downloading EEE patch\n");
3854                 return -EEXIST;
3855         }
3856
3857         return 0;
3858 }
3859
3860 /* tp->lock is held. */
3861 static void tg3_load_57766_firmware(struct tg3 *tp)
3862 {
3863         struct tg3_firmware_hdr *fw_hdr;
3864
3865         if (!tg3_flag(tp, NO_NVRAM))
3866                 return;
3867
3868         if (tg3_validate_rxcpu_state(tp))
3869                 return;
3870
3871         if (!tp->fw)
3872                 return;
3873
3874         /* This firmware blob has a different format than older firmware
3875          * releases as given below. The main difference is we have fragmented
3876          * data to be written to non-contiguous locations.
3877          *
3878          * In the beginning we have a firmware header identical to other
3879          * firmware which consists of version, base addr and length. The length
3880          * here is unused and set to 0xffffffff.
3881          *
3882          * This is followed by a series of firmware fragments which are
3883          * individually identical to previous firmware. i.e. they have the
3884          * firmware header and followed by data for that fragment. The version
3885          * field of the individual fragment header is unused.
3886          */
3887
3888         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3889         if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3890                 return;
3891
3892         if (tg3_rxcpu_pause(tp))
3893                 return;
3894
3895         /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3896         tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3897
3898         tg3_rxcpu_resume(tp);
3899 }
3900
3901 /* tp->lock is held. */
3902 static int tg3_load_tso_firmware(struct tg3 *tp)
3903 {
3904         const struct tg3_firmware_hdr *fw_hdr;
3905         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3906         int err;
3907
3908         if (!tg3_flag(tp, FW_TSO))
3909                 return 0;
3910
3911         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3912
3913         /* Firmware blob starts with version numbers, followed by
3914            start address and length. We are setting complete length.
3915            length = end_address_of_bss - start_address_of_text.
3916            Remainder is the blob to be loaded contiguously
3917            from start address. */
3918
3919         cpu_scratch_size = tp->fw_len;
3920
3921         if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3922                 cpu_base = RX_CPU_BASE;
3923                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3924         } else {
3925                 cpu_base = TX_CPU_BASE;
3926                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3927                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3928         }
3929
3930         err = tg3_load_firmware_cpu(tp, cpu_base,
3931                                     cpu_scratch_base, cpu_scratch_size,
3932                                     fw_hdr);
3933         if (err)
3934                 return err;
3935
3936         /* Now startup the cpu. */
3937         err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3938                                        be32_to_cpu(fw_hdr->base_addr));
3939         if (err) {
3940                 netdev_err(tp->dev,
3941                            "%s fails to set CPU PC, is %08x should be %08x\n",
3942                            __func__, tr32(cpu_base + CPU_PC),
3943                            be32_to_cpu(fw_hdr->base_addr));
3944                 return -ENODEV;
3945         }
3946
3947         tg3_resume_cpu(tp, cpu_base);
3948         return 0;
3949 }
3950
3951 /* tp->lock is held. */
3952 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3953 {
3954         u32 addr_high, addr_low;
3955
3956         addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3957         addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3958                     (mac_addr[4] <<  8) | mac_addr[5]);
3959
3960         if (index < 4) {
3961                 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3962                 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3963         } else {
3964                 index -= 4;
3965                 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3966                 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3967         }
3968 }
3969
3970 /* tp->lock is held. */
3971 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3972 {
3973         u32 addr_high;
3974         int i;
3975
3976         for (i = 0; i < 4; i++) {
3977                 if (i == 1 && skip_mac_1)
3978                         continue;
3979                 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3980         }
3981
3982         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3983             tg3_asic_rev(tp) == ASIC_REV_5704) {
3984                 for (i = 4; i < 16; i++)
3985                         __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3986         }
3987
3988         addr_high = (tp->dev->dev_addr[0] +
3989                      tp->dev->dev_addr[1] +
3990                      tp->dev->dev_addr[2] +
3991                      tp->dev->dev_addr[3] +
3992                      tp->dev->dev_addr[4] +
3993                      tp->dev->dev_addr[5]) &
3994                 TX_BACKOFF_SEED_MASK;
3995         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3996 }
3997
3998 static void tg3_enable_register_access(struct tg3 *tp)
3999 {
4000         /*
4001          * Make sure register accesses (indirect or otherwise) will function
4002          * correctly.
4003          */
4004         pci_write_config_dword(tp->pdev,
4005                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4006 }
4007
4008 static int tg3_power_up(struct tg3 *tp)
4009 {
4010         int err;
4011
4012         tg3_enable_register_access(tp);
4013
4014         err = pci_set_power_state(tp->pdev, PCI_D0);
4015         if (!err) {
4016                 /* Switch out of Vaux if it is a NIC */
4017                 tg3_pwrsrc_switch_to_vmain(tp);
4018         } else {
4019                 netdev_err(tp->dev, "Transition to D0 failed\n");
4020         }
4021
4022         return err;
4023 }
4024
4025 static int tg3_setup_phy(struct tg3 *, bool);
4026
4027 static int tg3_power_down_prepare(struct tg3 *tp)
4028 {
4029         u32 misc_host_ctrl;
4030         bool device_should_wake, do_low_power;
4031
4032         tg3_enable_register_access(tp);
4033
4034         /* Restore the CLKREQ setting. */
4035         if (tg3_flag(tp, CLKREQ_BUG))
4036                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4037                                          PCI_EXP_LNKCTL_CLKREQ_EN);
4038
4039         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4040         tw32(TG3PCI_MISC_HOST_CTRL,
4041              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4042
4043         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4044                              tg3_flag(tp, WOL_ENABLE);
4045
4046         if (tg3_flag(tp, USE_PHYLIB)) {
4047                 do_low_power = false;
4048                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4049                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4050                         struct phy_device *phydev;
4051                         u32 phyid, advertising;
4052
4053                         phydev = tp->mdio_bus->phy_map[tp->phy_addr];
4054
4055                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4056
4057                         tp->link_config.speed = phydev->speed;
4058                         tp->link_config.duplex = phydev->duplex;
4059                         tp->link_config.autoneg = phydev->autoneg;
4060                         tp->link_config.advertising = phydev->advertising;
4061
4062                         advertising = ADVERTISED_TP |
4063                                       ADVERTISED_Pause |
4064                                       ADVERTISED_Autoneg |
4065                                       ADVERTISED_10baseT_Half;
4066
4067                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4068                                 if (tg3_flag(tp, WOL_SPEED_100MB))
4069                                         advertising |=
4070                                                 ADVERTISED_100baseT_Half |
4071                                                 ADVERTISED_100baseT_Full |
4072                                                 ADVERTISED_10baseT_Full;
4073                                 else
4074                                         advertising |= ADVERTISED_10baseT_Full;
4075                         }
4076
4077                         phydev->advertising = advertising;
4078
4079                         phy_start_aneg(phydev);
4080
4081                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4082                         if (phyid != PHY_ID_BCMAC131) {
4083                                 phyid &= PHY_BCM_OUI_MASK;
4084                                 if (phyid == PHY_BCM_OUI_1 ||
4085                                     phyid == PHY_BCM_OUI_2 ||
4086                                     phyid == PHY_BCM_OUI_3)
4087                                         do_low_power = true;
4088                         }
4089                 }
4090         } else {
4091                 do_low_power = true;
4092
4093                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4094                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4095
4096                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4097                         tg3_setup_phy(tp, false);
4098         }
4099
4100         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4101                 u32 val;
4102
4103                 val = tr32(GRC_VCPU_EXT_CTRL);
4104                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4105         } else if (!tg3_flag(tp, ENABLE_ASF)) {
4106                 int i;
4107                 u32 val;
4108
4109                 for (i = 0; i < 200; i++) {
4110                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4111                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4112                                 break;
4113                         msleep(1);
4114                 }
4115         }
4116         if (tg3_flag(tp, WOL_CAP))
4117                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4118                                                      WOL_DRV_STATE_SHUTDOWN |
4119                                                      WOL_DRV_WOL |
4120                                                      WOL_SET_MAGIC_PKT);
4121
4122         if (device_should_wake) {
4123                 u32 mac_mode;
4124
4125                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4126                         if (do_low_power &&
4127                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4128                                 tg3_phy_auxctl_write(tp,
4129                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4130                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
4131                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4132                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4133                                 udelay(40);
4134                         }
4135
4136                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4137                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
4138                         else if (tp->phy_flags &
4139                                  TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4140                                 if (tp->link_config.active_speed == SPEED_1000)
4141                                         mac_mode = MAC_MODE_PORT_MODE_GMII;
4142                                 else
4143                                         mac_mode = MAC_MODE_PORT_MODE_MII;
4144                         } else
4145                                 mac_mode = MAC_MODE_PORT_MODE_MII;
4146
4147                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4148                         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4149                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4150                                              SPEED_100 : SPEED_10;
4151                                 if (tg3_5700_link_polarity(tp, speed))
4152                                         mac_mode |= MAC_MODE_LINK_POLARITY;
4153                                 else
4154                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
4155                         }
4156                 } else {
4157                         mac_mode = MAC_MODE_PORT_MODE_TBI;
4158                 }
4159
4160                 if (!tg3_flag(tp, 5750_PLUS))
4161                         tw32(MAC_LED_CTRL, tp->led_ctrl);
4162
4163                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4164                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4165                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4166                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4167
4168                 if (tg3_flag(tp, ENABLE_APE))
4169                         mac_mode |= MAC_MODE_APE_TX_EN |
4170                                     MAC_MODE_APE_RX_EN |
4171                                     MAC_MODE_TDE_ENABLE;
4172
4173                 tw32_f(MAC_MODE, mac_mode);
4174                 udelay(100);
4175
4176                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4177                 udelay(10);
4178         }
4179
4180         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4181             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4182              tg3_asic_rev(tp) == ASIC_REV_5701)) {
4183                 u32 base_val;
4184
4185                 base_val = tp->pci_clock_ctrl;
4186                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4187                              CLOCK_CTRL_TXCLK_DISABLE);
4188
4189                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4190                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
4191         } else if (tg3_flag(tp, 5780_CLASS) ||
4192                    tg3_flag(tp, CPMU_PRESENT) ||
4193                    tg3_asic_rev(tp) == ASIC_REV_5906) {
4194                 /* do nothing */
4195         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4196                 u32 newbits1, newbits2;
4197
4198                 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4199                     tg3_asic_rev(tp) == ASIC_REV_5701) {
4200                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4201                                     CLOCK_CTRL_TXCLK_DISABLE |
4202                                     CLOCK_CTRL_ALTCLK);
4203                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4204                 } else if (tg3_flag(tp, 5705_PLUS)) {
4205                         newbits1 = CLOCK_CTRL_625_CORE;
4206                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4207                 } else {
4208                         newbits1 = CLOCK_CTRL_ALTCLK;
4209                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4210                 }
4211
4212                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4213                             40);
4214
4215                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4216                             40);
4217
4218                 if (!tg3_flag(tp, 5705_PLUS)) {
4219                         u32 newbits3;
4220
4221                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4222                             tg3_asic_rev(tp) == ASIC_REV_5701) {
4223                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4224                                             CLOCK_CTRL_TXCLK_DISABLE |
4225                                             CLOCK_CTRL_44MHZ_CORE);
4226                         } else {
4227                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4228                         }
4229
4230                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
4231                                     tp->pci_clock_ctrl | newbits3, 40);
4232                 }
4233         }
4234
4235         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4236                 tg3_power_down_phy(tp, do_low_power);
4237
4238         tg3_frob_aux_power(tp, true);
4239
4240         /* Workaround for unstable PLL clock */
4241         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4242             ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4243              (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4244                 u32 val = tr32(0x7d00);
4245
4246                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4247                 tw32(0x7d00, val);
4248                 if (!tg3_flag(tp, ENABLE_ASF)) {
4249                         int err;
4250
4251                         err = tg3_nvram_lock(tp);
4252                         tg3_halt_cpu(tp, RX_CPU_BASE);
4253                         if (!err)
4254                                 tg3_nvram_unlock(tp);
4255                 }
4256         }
4257
4258         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4259
4260         tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4261
4262         return 0;
4263 }
4264
4265 static void tg3_power_down(struct tg3 *tp)
4266 {
4267         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4268         pci_set_power_state(tp->pdev, PCI_D3hot);
4269 }
4270
4271 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4272 {
4273         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4274         case MII_TG3_AUX_STAT_10HALF:
4275                 *speed = SPEED_10;
4276                 *duplex = DUPLEX_HALF;
4277                 break;
4278
4279         case MII_TG3_AUX_STAT_10FULL:
4280                 *speed = SPEED_10;
4281                 *duplex = DUPLEX_FULL;
4282                 break;
4283
4284         case MII_TG3_AUX_STAT_100HALF:
4285                 *speed = SPEED_100;
4286                 *duplex = DUPLEX_HALF;
4287                 break;
4288
4289         case MII_TG3_AUX_STAT_100FULL:
4290                 *speed = SPEED_100;
4291                 *duplex = DUPLEX_FULL;
4292                 break;
4293
4294         case MII_TG3_AUX_STAT_1000HALF:
4295                 *speed = SPEED_1000;
4296                 *duplex = DUPLEX_HALF;
4297                 break;
4298
4299         case MII_TG3_AUX_STAT_1000FULL:
4300                 *speed = SPEED_1000;
4301                 *duplex = DUPLEX_FULL;
4302                 break;
4303
4304         default:
4305                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4306                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4307                                  SPEED_10;
4308                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4309                                   DUPLEX_HALF;
4310                         break;
4311                 }
4312                 *speed = SPEED_UNKNOWN;
4313                 *duplex = DUPLEX_UNKNOWN;
4314                 break;
4315         }
4316 }
4317
4318 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4319 {
4320         int err = 0;
4321         u32 val, new_adv;
4322
4323         new_adv = ADVERTISE_CSMA;
4324         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4325         new_adv |= mii_advertise_flowctrl(flowctrl);
4326
4327         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4328         if (err)
4329                 goto done;
4330
4331         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4332                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4333
4334                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4335                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4336                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4337
4338                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4339                 if (err)
4340                         goto done;
4341         }
4342
4343         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4344                 goto done;
4345
4346         tw32(TG3_CPMU_EEE_MODE,
4347              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4348
4349         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4350         if (!err) {
4351                 u32 err2;
4352
4353                 val = 0;
4354                 /* Advertise 100-BaseTX EEE ability */
4355                 if (advertise & ADVERTISED_100baseT_Full)
4356                         val |= MDIO_AN_EEE_ADV_100TX;
4357                 /* Advertise 1000-BaseT EEE ability */
4358                 if (advertise & ADVERTISED_1000baseT_Full)
4359                         val |= MDIO_AN_EEE_ADV_1000T;
4360
4361                 if (!tp->eee.eee_enabled) {
4362                         val = 0;
4363                         tp->eee.advertised = 0;
4364                 } else {
4365                         tp->eee.advertised = advertise &
4366                                              (ADVERTISED_100baseT_Full |
4367                                               ADVERTISED_1000baseT_Full);
4368                 }
4369
4370                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4371                 if (err)
4372                         val = 0;
4373
4374                 switch (tg3_asic_rev(tp)) {
4375                 case ASIC_REV_5717:
4376                 case ASIC_REV_57765:
4377                 case ASIC_REV_57766:
4378                 case ASIC_REV_5719:
4379                         /* If we advertised any eee advertisements above... */
4380                         if (val)
4381                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4382                                       MII_TG3_DSP_TAP26_RMRXSTO |
4383                                       MII_TG3_DSP_TAP26_OPCSINPT;
4384                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4385                         /* Fall through */
4386                 case ASIC_REV_5720:
4387                 case ASIC_REV_5762:
4388                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4389                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4390                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4391                 }
4392
4393                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4394                 if (!err)
4395                         err = err2;
4396         }
4397
4398 done:
4399         return err;
4400 }
4401
4402 static void tg3_phy_copper_begin(struct tg3 *tp)
4403 {
4404         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4405             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4406                 u32 adv, fc;
4407
4408                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4409                     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4410                         adv = ADVERTISED_10baseT_Half |
4411                               ADVERTISED_10baseT_Full;
4412                         if (tg3_flag(tp, WOL_SPEED_100MB))
4413                                 adv |= ADVERTISED_100baseT_Half |
4414                                        ADVERTISED_100baseT_Full;
4415                         if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4416                                 if (!(tp->phy_flags &
4417                                       TG3_PHYFLG_DISABLE_1G_HD_ADV))
4418                                         adv |= ADVERTISED_1000baseT_Half;
4419                                 adv |= ADVERTISED_1000baseT_Full;
4420                         }
4421
4422                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4423                 } else {
4424                         adv = tp->link_config.advertising;
4425                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4426                                 adv &= ~(ADVERTISED_1000baseT_Half |
4427                                          ADVERTISED_1000baseT_Full);
4428
4429                         fc = tp->link_config.flowctrl;
4430                 }
4431
4432                 tg3_phy_autoneg_cfg(tp, adv, fc);
4433
4434                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4435                     (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4436                         /* Normally during power down we want to autonegotiate
4437                          * the lowest possible speed for WOL. However, to avoid
4438                          * link flap, we leave it untouched.
4439                          */
4440                         return;
4441                 }
4442
4443                 tg3_writephy(tp, MII_BMCR,
4444                              BMCR_ANENABLE | BMCR_ANRESTART);
4445         } else {
4446                 int i;
4447                 u32 bmcr, orig_bmcr;
4448
4449                 tp->link_config.active_speed = tp->link_config.speed;
4450                 tp->link_config.active_duplex = tp->link_config.duplex;
4451
4452                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4453                         /* With autoneg disabled, 5715 only links up when the
4454                          * advertisement register has the configured speed
4455                          * enabled.
4456                          */
4457                         tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4458                 }
4459
4460                 bmcr = 0;
4461                 switch (tp->link_config.speed) {
4462                 default:
4463                 case SPEED_10:
4464                         break;
4465
4466                 case SPEED_100:
4467                         bmcr |= BMCR_SPEED100;
4468                         break;
4469
4470                 case SPEED_1000:
4471                         bmcr |= BMCR_SPEED1000;
4472                         break;
4473                 }
4474
4475                 if (tp->link_config.duplex == DUPLEX_FULL)
4476                         bmcr |= BMCR_FULLDPLX;
4477
4478                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4479                     (bmcr != orig_bmcr)) {
4480                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4481                         for (i = 0; i < 1500; i++) {
4482                                 u32 tmp;
4483
4484                                 udelay(10);
4485                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4486                                     tg3_readphy(tp, MII_BMSR, &tmp))
4487                                         continue;
4488                                 if (!(tmp & BMSR_LSTATUS)) {
4489                                         udelay(40);
4490                                         break;
4491                                 }
4492                         }
4493                         tg3_writephy(tp, MII_BMCR, bmcr);
4494                         udelay(40);
4495                 }
4496         }
4497 }
4498
4499 static int tg3_phy_pull_config(struct tg3 *tp)
4500 {
4501         int err;
4502         u32 val;
4503
4504         err = tg3_readphy(tp, MII_BMCR, &val);
4505         if (err)
4506                 goto done;
4507
4508         if (!(val & BMCR_ANENABLE)) {
4509                 tp->link_config.autoneg = AUTONEG_DISABLE;
4510                 tp->link_config.advertising = 0;
4511                 tg3_flag_clear(tp, PAUSE_AUTONEG);
4512
4513                 err = -EIO;
4514
4515                 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4516                 case 0:
4517                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4518                                 goto done;
4519
4520                         tp->link_config.speed = SPEED_10;
4521                         break;
4522                 case BMCR_SPEED100:
4523                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4524                                 goto done;
4525
4526                         tp->link_config.speed = SPEED_100;
4527                         break;
4528                 case BMCR_SPEED1000:
4529                         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4530                                 tp->link_config.speed = SPEED_1000;
4531                                 break;
4532                         }
4533                         /* Fall through */
4534                 default:
4535                         goto done;
4536                 }
4537
4538                 if (val & BMCR_FULLDPLX)
4539                         tp->link_config.duplex = DUPLEX_FULL;
4540                 else
4541                         tp->link_config.duplex = DUPLEX_HALF;
4542
4543                 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4544
4545                 err = 0;
4546                 goto done;
4547         }
4548
4549         tp->link_config.autoneg = AUTONEG_ENABLE;
4550         tp->link_config.advertising = ADVERTISED_Autoneg;
4551         tg3_flag_set(tp, PAUSE_AUTONEG);
4552
4553         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4554                 u32 adv;
4555
4556                 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4557                 if (err)
4558                         goto done;
4559
4560                 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4561                 tp->link_config.advertising |= adv | ADVERTISED_TP;
4562
4563                 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4564         } else {
4565                 tp->link_config.advertising |= ADVERTISED_FIBRE;
4566         }
4567
4568         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4569                 u32 adv;
4570
4571                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4572                         err = tg3_readphy(tp, MII_CTRL1000, &val);
4573                         if (err)
4574                                 goto done;
4575
4576                         adv = mii_ctrl1000_to_ethtool_adv_t(val);
4577                 } else {
4578                         err = tg3_readphy(tp, MII_ADVERTISE, &val);
4579                         if (err)
4580                                 goto done;
4581
4582                         adv = tg3_decode_flowctrl_1000X(val);
4583                         tp->link_config.flowctrl = adv;
4584
4585                         val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4586                         adv = mii_adv_to_ethtool_adv_x(val);
4587                 }
4588
4589                 tp->link_config.advertising |= adv;
4590         }
4591
4592 done:
4593         return err;
4594 }
4595
4596 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4597 {
4598         int err;
4599
4600         /* Turn off tap power management. */
4601         /* Set Extended packet length bit */
4602         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4603
4604         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4605         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4606         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4607         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4608         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4609
4610         udelay(40);
4611
4612         return err;
4613 }
4614
4615 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4616 {
4617         struct ethtool_eee eee;
4618
4619         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4620                 return true;
4621
4622         tg3_eee_pull_config(tp, &eee);
4623
4624         if (tp->eee.eee_enabled) {
4625                 if (tp->eee.advertised != eee.advertised ||
4626                     tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4627                     tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4628                         return false;
4629         } else {
4630                 /* EEE is disabled but we're advertising */
4631                 if (eee.advertised)
4632                         return false;
4633         }
4634
4635         return true;
4636 }
4637
4638 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4639 {
4640         u32 advmsk, tgtadv, advertising;
4641
4642         advertising = tp->link_config.advertising;
4643         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4644
4645         advmsk = ADVERTISE_ALL;
4646         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4647                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4648                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4649         }
4650
4651         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4652                 return false;
4653
4654         if ((*lcladv & advmsk) != tgtadv)
4655                 return false;
4656
4657         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4658                 u32 tg3_ctrl;
4659
4660                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4661
4662                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4663                         return false;
4664
4665                 if (tgtadv &&
4666                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4667                      tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4668                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4669                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4670                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4671                 } else {
4672                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4673                 }
4674
4675                 if (tg3_ctrl != tgtadv)
4676                         return false;
4677         }
4678
4679         return true;
4680 }
4681
4682 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4683 {
4684         u32 lpeth = 0;
4685
4686         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4687                 u32 val;
4688
4689                 if (tg3_readphy(tp, MII_STAT1000, &val))
4690                         return false;
4691
4692                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4693         }
4694
4695         if (tg3_readphy(tp, MII_LPA, rmtadv))
4696                 return false;
4697
4698         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4699         tp->link_config.rmt_adv = lpeth;
4700
4701         return true;
4702 }
4703
4704 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4705 {
4706         if (curr_link_up != tp->link_up) {
4707                 if (curr_link_up) {
4708                         netif_carrier_on(tp->dev);
4709                 } else {
4710                         netif_carrier_off(tp->dev);
4711                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4712                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4713                 }
4714
4715                 tg3_link_report(tp);
4716                 return true;
4717         }
4718
4719         return false;
4720 }
4721
4722 static void tg3_clear_mac_status(struct tg3 *tp)
4723 {
4724         tw32(MAC_EVENT, 0);
4725
4726         tw32_f(MAC_STATUS,
4727                MAC_STATUS_SYNC_CHANGED |
4728                MAC_STATUS_CFG_CHANGED |
4729                MAC_STATUS_MI_COMPLETION |
4730                MAC_STATUS_LNKSTATE_CHANGED);
4731         udelay(40);
4732 }
4733
4734 static void tg3_setup_eee(struct tg3 *tp)
4735 {
4736         u32 val;
4737
4738         val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4739               TG3_CPMU_EEE_LNKIDL_UART_IDL;
4740         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4741                 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4742
4743         tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4744
4745         tw32_f(TG3_CPMU_EEE_CTRL,
4746                TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4747
4748         val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4749               (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4750               TG3_CPMU_EEEMD_LPI_IN_RX |
4751               TG3_CPMU_EEEMD_EEE_ENABLE;
4752
4753         if (tg3_asic_rev(tp) != ASIC_REV_5717)
4754                 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4755
4756         if (tg3_flag(tp, ENABLE_APE))
4757                 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4758
4759         tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4760
4761         tw32_f(TG3_CPMU_EEE_DBTMR1,
4762                TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4763                (tp->eee.tx_lpi_timer & 0xffff));
4764
4765         tw32_f(TG3_CPMU_EEE_DBTMR2,
4766                TG3_CPMU_DBTMR2_APE_TX_2047US |
4767                TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4768 }
4769
4770 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4771 {
4772         bool current_link_up;
4773         u32 bmsr, val;
4774         u32 lcl_adv, rmt_adv;
4775         u16 current_speed;
4776         u8 current_duplex;
4777         int i, err;
4778
4779         tg3_clear_mac_status(tp);
4780
4781         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4782                 tw32_f(MAC_MI_MODE,
4783                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4784                 udelay(80);
4785         }
4786
4787         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4788
4789         /* Some third-party PHYs need to be reset on link going
4790          * down.
4791          */
4792         if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4793              tg3_asic_rev(tp) == ASIC_REV_5704 ||
4794              tg3_asic_rev(tp) == ASIC_REV_5705) &&
4795             tp->link_up) {
4796                 tg3_readphy(tp, MII_BMSR, &bmsr);
4797                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4798                     !(bmsr & BMSR_LSTATUS))
4799                         force_reset = true;
4800         }
4801         if (force_reset)
4802                 tg3_phy_reset(tp);
4803
4804         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4805                 tg3_readphy(tp, MII_BMSR, &bmsr);
4806                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4807                     !tg3_flag(tp, INIT_COMPLETE))
4808                         bmsr = 0;
4809
4810                 if (!(bmsr & BMSR_LSTATUS)) {
4811                         err = tg3_init_5401phy_dsp(tp);
4812                         if (err)
4813                                 return err;
4814
4815                         tg3_readphy(tp, MII_BMSR, &bmsr);
4816                         for (i = 0; i < 1000; i++) {
4817                                 udelay(10);
4818                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4819                                     (bmsr & BMSR_LSTATUS)) {
4820                                         udelay(40);
4821                                         break;
4822                                 }
4823                         }
4824
4825                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4826                             TG3_PHY_REV_BCM5401_B0 &&
4827                             !(bmsr & BMSR_LSTATUS) &&
4828                             tp->link_config.active_speed == SPEED_1000) {
4829                                 err = tg3_phy_reset(tp);
4830                                 if (!err)
4831                                         err = tg3_init_5401phy_dsp(tp);
4832                                 if (err)
4833                                         return err;
4834                         }
4835                 }
4836         } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4837                    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4838                 /* 5701 {A0,B0} CRC bug workaround */
4839                 tg3_writephy(tp, 0x15, 0x0a75);
4840                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4841                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4842                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4843         }
4844
4845         /* Clear pending interrupts... */
4846         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4847         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4848
4849         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4850                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4851         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4852                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4853
4854         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4855             tg3_asic_rev(tp) == ASIC_REV_5701) {
4856                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4857                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4858                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4859                 else
4860                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4861         }
4862
4863         current_link_up = false;
4864         current_speed = SPEED_UNKNOWN;
4865         current_duplex = DUPLEX_UNKNOWN;
4866         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4867         tp->link_config.rmt_adv = 0;
4868
4869         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4870                 err = tg3_phy_auxctl_read(tp,
4871                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4872                                           &val);
4873                 if (!err && !(val & (1 << 10))) {
4874                         tg3_phy_auxctl_write(tp,
4875                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4876                                              val | (1 << 10));
4877                         goto relink;
4878                 }
4879         }
4880
4881         bmsr = 0;
4882         for (i = 0; i < 100; i++) {
4883                 tg3_readphy(tp, MII_BMSR, &bmsr);
4884                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4885                     (bmsr & BMSR_LSTATUS))
4886                         break;
4887                 udelay(40);
4888         }
4889
4890         if (bmsr & BMSR_LSTATUS) {
4891                 u32 aux_stat, bmcr;
4892
4893                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4894                 for (i = 0; i < 2000; i++) {
4895                         udelay(10);
4896                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4897                             aux_stat)
4898                                 break;
4899                 }
4900
4901                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4902                                              &current_speed,
4903                                              &current_duplex);
4904
4905                 bmcr = 0;
4906                 for (i = 0; i < 200; i++) {
4907                         tg3_readphy(tp, MII_BMCR, &bmcr);
4908                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4909                                 continue;
4910                         if (bmcr && bmcr != 0x7fff)
4911                                 break;
4912                         udelay(10);
4913                 }
4914
4915                 lcl_adv = 0;
4916                 rmt_adv = 0;
4917
4918                 tp->link_config.active_speed = current_speed;
4919                 tp->link_config.active_duplex = current_duplex;
4920
4921                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4922                         bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4923
4924                         if ((bmcr & BMCR_ANENABLE) &&
4925                             eee_config_ok &&
4926                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4927                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4928                                 current_link_up = true;
4929
4930                         /* EEE settings changes take effect only after a phy
4931                          * reset.  If we have skipped a reset due to Link Flap
4932                          * Avoidance being enabled, do it now.
4933                          */
4934                         if (!eee_config_ok &&
4935                             (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4936                             !force_reset) {
4937                                 tg3_setup_eee(tp);
4938                                 tg3_phy_reset(tp);
4939                         }
4940                 } else {
4941                         if (!(bmcr & BMCR_ANENABLE) &&
4942                             tp->link_config.speed == current_speed &&
4943                             tp->link_config.duplex == current_duplex) {
4944                                 current_link_up = true;
4945                         }
4946                 }
4947
4948                 if (current_link_up &&
4949                     tp->link_config.active_duplex == DUPLEX_FULL) {
4950                         u32 reg, bit;
4951
4952                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4953                                 reg = MII_TG3_FET_GEN_STAT;
4954                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4955                         } else {
4956                                 reg = MII_TG3_EXT_STAT;
4957                                 bit = MII_TG3_EXT_STAT_MDIX;
4958                         }
4959
4960                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4961                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4962
4963                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4964                 }
4965         }
4966
4967 relink:
4968         if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4969                 tg3_phy_copper_begin(tp);
4970
4971                 if (tg3_flag(tp, ROBOSWITCH)) {
4972                         current_link_up = true;
4973                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4974                         current_speed = SPEED_1000;
4975                         current_duplex = DUPLEX_FULL;
4976                         tp->link_config.active_speed = current_speed;
4977                         tp->link_config.active_duplex = current_duplex;
4978                 }
4979
4980                 tg3_readphy(tp, MII_BMSR, &bmsr);
4981                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4982                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4983                         current_link_up = true;
4984         }
4985
4986         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4987         if (current_link_up) {
4988                 if (tp->link_config.active_speed == SPEED_100 ||
4989                     tp->link_config.active_speed == SPEED_10)
4990                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4991                 else
4992                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4993         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4994                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4995         else
4996                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4997
4998         /* In order for the 5750 core in BCM4785 chip to work properly
4999          * in RGMII mode, the Led Control Register must be set up.
5000          */
5001         if (tg3_flag(tp, RGMII_MODE)) {
5002                 u32 led_ctrl = tr32(MAC_LED_CTRL);
5003                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5004
5005                 if (tp->link_config.active_speed == SPEED_10)
5006                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5007                 else if (tp->link_config.active_speed == SPEED_100)
5008                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5009                                      LED_CTRL_100MBPS_ON);
5010                 else if (tp->link_config.active_speed == SPEED_1000)
5011                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5012                                      LED_CTRL_1000MBPS_ON);
5013
5014                 tw32(MAC_LED_CTRL, led_ctrl);
5015                 udelay(40);
5016         }
5017
5018         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5019         if (tp->link_config.active_duplex == DUPLEX_HALF)
5020                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5021
5022         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5023                 if (current_link_up &&
5024                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5025                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5026                 else
5027                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5028         }
5029
5030         /* ??? Without this setting Netgear GA302T PHY does not
5031          * ??? send/receive packets...
5032          */
5033         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5034             tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5035                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5036                 tw32_f(MAC_MI_MODE, tp->mi_mode);
5037                 udelay(80);
5038         }
5039
5040         tw32_f(MAC_MODE, tp->mac_mode);
5041         udelay(40);
5042
5043         tg3_phy_eee_adjust(tp, current_link_up);
5044
5045         if (tg3_flag(tp, USE_LINKCHG_REG)) {
5046                 /* Polled via timer. */
5047                 tw32_f(MAC_EVENT, 0);
5048         } else {
5049                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5050         }
5051         udelay(40);
5052
5053         if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5054             current_link_up &&
5055             tp->link_config.active_speed == SPEED_1000 &&
5056             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5057                 udelay(120);
5058                 tw32_f(MAC_STATUS,
5059                      (MAC_STATUS_SYNC_CHANGED |
5060                       MAC_STATUS_CFG_CHANGED));
5061                 udelay(40);
5062                 tg3_write_mem(tp,
5063                               NIC_SRAM_FIRMWARE_MBOX,
5064                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5065         }
5066
5067         /* Prevent send BD corruption. */
5068         if (tg3_flag(tp, CLKREQ_BUG)) {
5069                 if (tp->link_config.active_speed == SPEED_100 ||
5070                     tp->link_config.active_speed == SPEED_10)
5071                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5072                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
5073                 else
5074                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5075                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
5076         }
5077
5078         tg3_test_and_report_link_chg(tp, current_link_up);
5079
5080         return 0;
5081 }
5082
5083 struct tg3_fiber_aneginfo {
5084         int state;
5085 #define ANEG_STATE_UNKNOWN              0
5086 #define ANEG_STATE_AN_ENABLE            1
5087 #define ANEG_STATE_RESTART_INIT         2
5088 #define ANEG_STATE_RESTART              3
5089 #define ANEG_STATE_DISABLE_LINK_OK      4
5090 #define ANEG_STATE_ABILITY_DETECT_INIT  5
5091 #define ANEG_STATE_ABILITY_DETECT       6
5092 #define ANEG_STATE_ACK_DETECT_INIT      7
5093 #define ANEG_STATE_ACK_DETECT           8
5094 #define ANEG_STATE_COMPLETE_ACK_INIT    9
5095 #define ANEG_STATE_COMPLETE_ACK         10
5096 #define ANEG_STATE_IDLE_DETECT_INIT     11
5097 #define ANEG_STATE_IDLE_DETECT          12
5098 #define ANEG_STATE_LINK_OK              13
5099 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
5100 #define ANEG_STATE_NEXT_PAGE_WAIT       15
5101
5102         u32 flags;
5103 #define MR_AN_ENABLE            0x00000001
5104 #define MR_RESTART_AN           0x00000002
5105 #define MR_AN_COMPLETE          0x00000004
5106 #define MR_PAGE_RX              0x00000008
5107 #define MR_NP_LOADED            0x00000010
5108 #define MR_TOGGLE_TX            0x00000020
5109 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
5110 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
5111 #define MR_LP_ADV_SYM_PAUSE     0x00000100
5112 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
5113 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5114 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5115 #define MR_LP_ADV_NEXT_PAGE     0x00001000
5116 #define MR_TOGGLE_RX            0x00002000
5117 #define MR_NP_RX                0x00004000
5118
5119 #define MR_LINK_OK              0x80000000
5120
5121         unsigned long link_time, cur_time;
5122
5123         u32 ability_match_cfg;
5124         int ability_match_count;
5125
5126         char ability_match, idle_match, ack_match;
5127
5128         u32 txconfig, rxconfig;
5129 #define ANEG_CFG_NP             0x00000080
5130 #define ANEG_CFG_ACK            0x00000040
5131 #define ANEG_CFG_RF2            0x00000020
5132 #define ANEG_CFG_RF1            0x00000010
5133 #define ANEG_CFG_PS2            0x00000001
5134 #define ANEG_CFG_PS1            0x00008000
5135 #define ANEG_CFG_HD             0x00004000
5136 #define ANEG_CFG_FD             0x00002000
5137 #define ANEG_CFG_INVAL          0x00001f06
5138
5139 };
5140 #define ANEG_OK         0
5141 #define ANEG_DONE       1
5142 #define ANEG_TIMER_ENAB 2
5143 #define ANEG_FAILED     -1
5144
5145 #define ANEG_STATE_SETTLE_TIME  10000
5146
5147 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5148                                    struct tg3_fiber_aneginfo *ap)
5149 {
5150         u16 flowctrl;
5151         unsigned long delta;
5152         u32 rx_cfg_reg;
5153         int ret;
5154
5155         if (ap->state == ANEG_STATE_UNKNOWN) {
5156                 ap->rxconfig = 0;
5157                 ap->link_time = 0;
5158                 ap->cur_time = 0;
5159                 ap->ability_match_cfg = 0;
5160                 ap->ability_match_count = 0;
5161                 ap->ability_match = 0;
5162                 ap->idle_match = 0;
5163                 ap->ack_match = 0;
5164         }
5165         ap->cur_time++;
5166
5167         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5168                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5169
5170                 if (rx_cfg_reg != ap->ability_match_cfg) {
5171                         ap->ability_match_cfg = rx_cfg_reg;
5172                         ap->ability_match = 0;
5173                         ap->ability_match_count = 0;
5174                 } else {
5175                         if (++ap->ability_match_count > 1) {
5176                                 ap->ability_match = 1;
5177                                 ap->ability_match_cfg = rx_cfg_reg;
5178                         }
5179                 }
5180                 if (rx_cfg_reg & ANEG_CFG_ACK)
5181                         ap->ack_match = 1;
5182                 else
5183                         ap->ack_match = 0;
5184
5185                 ap->idle_match = 0;
5186         } else {
5187                 ap->idle_match = 1;
5188                 ap->ability_match_cfg = 0;
5189                 ap->ability_match_count = 0;
5190                 ap->ability_match = 0;
5191                 ap->ack_match = 0;
5192
5193                 rx_cfg_reg = 0;
5194         }
5195
5196         ap->rxconfig = rx_cfg_reg;
5197         ret = ANEG_OK;
5198
5199         switch (ap->state) {
5200         case ANEG_STATE_UNKNOWN:
5201                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5202                         ap->state = ANEG_STATE_AN_ENABLE;
5203
5204                 /* fallthru */
5205         case ANEG_STATE_AN_ENABLE:
5206                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5207                 if (ap->flags & MR_AN_ENABLE) {
5208                         ap->link_time = 0;
5209                         ap->cur_time = 0;
5210                         ap->ability_match_cfg = 0;
5211                         ap->ability_match_count = 0;
5212                         ap->ability_match = 0;
5213                         ap->idle_match = 0;
5214                         ap->ack_match = 0;
5215
5216                         ap->state = ANEG_STATE_RESTART_INIT;
5217                 } else {
5218                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
5219                 }
5220                 break;
5221
5222         case ANEG_STATE_RESTART_INIT:
5223                 ap->link_time = ap->cur_time;
5224                 ap->flags &= ~(MR_NP_LOADED);
5225                 ap->txconfig = 0;
5226                 tw32(MAC_TX_AUTO_NEG, 0);
5227                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5228                 tw32_f(MAC_MODE, tp->mac_mode);
5229                 udelay(40);
5230
5231                 ret = ANEG_TIMER_ENAB;
5232                 ap->state = ANEG_STATE_RESTART;
5233
5234                 /* fallthru */
5235         case ANEG_STATE_RESTART:
5236                 delta = ap->cur_time - ap->link_time;
5237                 if (delta > ANEG_STATE_SETTLE_TIME)
5238                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5239                 else
5240                         ret = ANEG_TIMER_ENAB;
5241                 break;
5242
5243         case ANEG_STATE_DISABLE_LINK_OK:
5244                 ret = ANEG_DONE;
5245                 break;
5246
5247         case ANEG_STATE_ABILITY_DETECT_INIT:
5248                 ap->flags &= ~(MR_TOGGLE_TX);
5249                 ap->txconfig = ANEG_CFG_FD;
5250                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5251                 if (flowctrl & ADVERTISE_1000XPAUSE)
5252                         ap->txconfig |= ANEG_CFG_PS1;
5253                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5254                         ap->txconfig |= ANEG_CFG_PS2;
5255                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5256                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5257                 tw32_f(MAC_MODE, tp->mac_mode);
5258                 udelay(40);
5259
5260                 ap->state = ANEG_STATE_ABILITY_DETECT;
5261                 break;
5262
5263         case ANEG_STATE_ABILITY_DETECT:
5264                 if (ap->ability_match != 0 && ap->rxconfig != 0)
5265                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
5266                 break;
5267
5268         case ANEG_STATE_ACK_DETECT_INIT:
5269                 ap->txconfig |= ANEG_CFG_ACK;
5270                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5271                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5272                 tw32_f(MAC_MODE, tp->mac_mode);
5273                 udelay(40);
5274
5275                 ap->state = ANEG_STATE_ACK_DETECT;
5276
5277                 /* fallthru */
5278         case ANEG_STATE_ACK_DETECT:
5279                 if (ap->ack_match != 0) {
5280                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5281                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5282                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5283                         } else {
5284                                 ap->state = ANEG_STATE_AN_ENABLE;
5285                         }
5286                 } else if (ap->ability_match != 0 &&
5287                            ap->rxconfig == 0) {
5288                         ap->state = ANEG_STATE_AN_ENABLE;
5289                 }
5290                 break;
5291
5292         case ANEG_STATE_COMPLETE_ACK_INIT:
5293                 if (ap->rxconfig & ANEG_CFG_INVAL) {
5294                         ret = ANEG_FAILED;
5295                         break;
5296                 }
5297                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5298                                MR_LP_ADV_HALF_DUPLEX |
5299                                MR_LP_ADV_SYM_PAUSE |
5300                                MR_LP_ADV_ASYM_PAUSE |
5301                                MR_LP_ADV_REMOTE_FAULT1 |
5302                                MR_LP_ADV_REMOTE_FAULT2 |
5303                                MR_LP_ADV_NEXT_PAGE |
5304                                MR_TOGGLE_RX |
5305                                MR_NP_RX);
5306                 if (ap->rxconfig & ANEG_CFG_FD)
5307                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5308                 if (ap->rxconfig & ANEG_CFG_HD)
5309                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5310                 if (ap->rxconfig & ANEG_CFG_PS1)
5311                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
5312                 if (ap->rxconfig & ANEG_CFG_PS2)
5313                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5314                 if (ap->rxconfig & ANEG_CFG_RF1)
5315                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5316                 if (ap->rxconfig & ANEG_CFG_RF2)
5317                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5318                 if (ap->rxconfig & ANEG_CFG_NP)
5319                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
5320
5321                 ap->link_time = ap->cur_time;
5322
5323                 ap->flags ^= (MR_TOGGLE_TX);
5324                 if (ap->rxconfig & 0x0008)
5325                         ap->flags |= MR_TOGGLE_RX;
5326                 if (ap->rxconfig & ANEG_CFG_NP)
5327                         ap->flags |= MR_NP_RX;
5328                 ap->flags |= MR_PAGE_RX;
5329
5330                 ap->state = ANEG_STATE_COMPLETE_ACK;
5331                 ret = ANEG_TIMER_ENAB;
5332                 break;
5333
5334         case ANEG_STATE_COMPLETE_ACK:
5335                 if (ap->ability_match != 0 &&
5336                     ap->rxconfig == 0) {
5337                         ap->state = ANEG_STATE_AN_ENABLE;
5338                         break;
5339                 }
5340                 delta = ap->cur_time - ap->link_time;
5341                 if (delta > ANEG_STATE_SETTLE_TIME) {
5342                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5343                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5344                         } else {
5345                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5346                                     !(ap->flags & MR_NP_RX)) {
5347                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5348                                 } else {
5349                                         ret = ANEG_FAILED;
5350                                 }
5351                         }
5352                 }
5353                 break;
5354
5355         case ANEG_STATE_IDLE_DETECT_INIT:
5356                 ap->link_time = ap->cur_time;
5357                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5358                 tw32_f(MAC_MODE, tp->mac_mode);
5359                 udelay(40);
5360
5361                 ap->state = ANEG_STATE_IDLE_DETECT;
5362                 ret = ANEG_TIMER_ENAB;
5363                 break;
5364
5365         case ANEG_STATE_IDLE_DETECT:
5366                 if (ap->ability_match != 0 &&
5367                     ap->rxconfig == 0) {
5368                         ap->state = ANEG_STATE_AN_ENABLE;
5369                         break;
5370                 }
5371                 delta = ap->cur_time - ap->link_time;
5372                 if (delta > ANEG_STATE_SETTLE_TIME) {
5373                         /* XXX another gem from the Broadcom driver :( */
5374                         ap->state = ANEG_STATE_LINK_OK;
5375                 }
5376                 break;
5377
5378         case ANEG_STATE_LINK_OK:
5379                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5380                 ret = ANEG_DONE;
5381                 break;
5382
5383         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5384                 /* ??? unimplemented */
5385                 break;
5386
5387         case ANEG_STATE_NEXT_PAGE_WAIT:
5388                 /* ??? unimplemented */
5389                 break;
5390
5391         default:
5392                 ret = ANEG_FAILED;
5393                 break;
5394         }
5395
5396         return ret;
5397 }
5398
5399 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5400 {
5401         int res = 0;
5402         struct tg3_fiber_aneginfo aninfo;
5403         int status = ANEG_FAILED;
5404         unsigned int tick;
5405         u32 tmp;
5406
5407         tw32_f(MAC_TX_AUTO_NEG, 0);
5408
5409         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5410         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5411         udelay(40);
5412
5413         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5414         udelay(40);
5415
5416         memset(&aninfo, 0, sizeof(aninfo));
5417         aninfo.flags |= MR_AN_ENABLE;
5418         aninfo.state = ANEG_STATE_UNKNOWN;
5419         aninfo.cur_time = 0;
5420         tick = 0;
5421         while (++tick < 195000) {
5422                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5423                 if (status == ANEG_DONE || status == ANEG_FAILED)
5424                         break;
5425
5426                 udelay(1);
5427         }
5428
5429         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5430         tw32_f(MAC_MODE, tp->mac_mode);
5431         udelay(40);
5432
5433         *txflags = aninfo.txconfig;
5434         *rxflags = aninfo.flags;
5435
5436         if (status == ANEG_DONE &&
5437             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5438                              MR_LP_ADV_FULL_DUPLEX)))
5439                 res = 1;
5440
5441         return res;
5442 }
5443
5444 static void tg3_init_bcm8002(struct tg3 *tp)
5445 {
5446         u32 mac_status = tr32(MAC_STATUS);
5447         int i;
5448
5449         /* Reset when initting first time or we have a link. */
5450         if (tg3_flag(tp, INIT_COMPLETE) &&
5451             !(mac_status & MAC_STATUS_PCS_SYNCED))
5452                 return;
5453
5454         /* Set PLL lock range. */
5455         tg3_writephy(tp, 0x16, 0x8007);
5456
5457         /* SW reset */
5458         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5459
5460         /* Wait for reset to complete. */
5461         /* XXX schedule_timeout() ... */
5462         for (i = 0; i < 500; i++)
5463                 udelay(10);
5464
5465         /* Config mode; select PMA/Ch 1 regs. */
5466         tg3_writephy(tp, 0x10, 0x8411);
5467
5468         /* Enable auto-lock and comdet, select txclk for tx. */
5469         tg3_writephy(tp, 0x11, 0x0a10);
5470
5471         tg3_writephy(tp, 0x18, 0x00a0);
5472         tg3_writephy(tp, 0x16, 0x41ff);
5473
5474         /* Assert and deassert POR. */
5475         tg3_writephy(tp, 0x13, 0x0400);
5476         udelay(40);
5477         tg3_writephy(tp, 0x13, 0x0000);
5478
5479         tg3_writephy(tp, 0x11, 0x0a50);
5480         udelay(40);
5481         tg3_writephy(tp, 0x11, 0x0a10);
5482
5483         /* Wait for signal to stabilize */
5484         /* XXX schedule_timeout() ... */
5485         for (i = 0; i < 15000; i++)
5486                 udelay(10);
5487
5488         /* Deselect the channel register so we can read the PHYID
5489          * later.
5490          */
5491         tg3_writephy(tp, 0x10, 0x8011);
5492 }
5493
5494 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5495 {
5496         u16 flowctrl;
5497         bool current_link_up;
5498         u32 sg_dig_ctrl, sg_dig_status;
5499         u32 serdes_cfg, expected_sg_dig_ctrl;
5500         int workaround, port_a;
5501
5502         serdes_cfg = 0;
5503         expected_sg_dig_ctrl = 0;
5504         workaround = 0;
5505         port_a = 1;
5506         current_link_up = false;
5507
5508         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5509             tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5510                 workaround = 1;
5511                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5512                         port_a = 0;
5513
5514                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5515                 /* preserve bits 20-23 for voltage regulator */
5516                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5517         }
5518
5519         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5520
5521         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5522                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5523                         if (workaround) {
5524                                 u32 val = serdes_cfg;
5525
5526                                 if (port_a)
5527                                         val |= 0xc010000;
5528                                 else
5529                                         val |= 0x4010000;
5530                                 tw32_f(MAC_SERDES_CFG, val);
5531                         }
5532
5533                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5534                 }
5535                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5536                         tg3_setup_flow_control(tp, 0, 0);
5537                         current_link_up = true;
5538                 }
5539                 goto out;
5540         }
5541
5542         /* Want auto-negotiation.  */
5543         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5544
5545         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5546         if (flowctrl & ADVERTISE_1000XPAUSE)
5547                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5548         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5549                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5550
5551         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5552                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5553                     tp->serdes_counter &&
5554                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5555                                     MAC_STATUS_RCVD_CFG)) ==
5556                      MAC_STATUS_PCS_SYNCED)) {
5557                         tp->serdes_counter--;
5558                         current_link_up = true;
5559                         goto out;
5560                 }
5561 restart_autoneg:
5562                 if (workaround)
5563                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5564                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5565                 udelay(5);
5566                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5567
5568                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5569                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5570         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5571                                  MAC_STATUS_SIGNAL_DET)) {
5572                 sg_dig_status = tr32(SG_DIG_STATUS);
5573                 mac_status = tr32(MAC_STATUS);
5574
5575                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5576                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5577                         u32 local_adv = 0, remote_adv = 0;
5578
5579                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5580                                 local_adv |= ADVERTISE_1000XPAUSE;
5581                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5582                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5583
5584                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5585                                 remote_adv |= LPA_1000XPAUSE;
5586                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5587                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5588
5589                         tp->link_config.rmt_adv =
5590                                            mii_adv_to_ethtool_adv_x(remote_adv);
5591
5592                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5593                         current_link_up = true;
5594                         tp->serdes_counter = 0;
5595                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5596                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5597                         if (tp->serdes_counter)
5598                                 tp->serdes_counter--;
5599                         else {
5600                                 if (workaround) {
5601                                         u32 val = serdes_cfg;
5602
5603                                         if (port_a)
5604                                                 val |= 0xc010000;
5605                                         else
5606                                                 val |= 0x4010000;
5607
5608                                         tw32_f(MAC_SERDES_CFG, val);
5609                                 }
5610
5611                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5612                                 udelay(40);
5613
5614                                 /* Link parallel detection - link is up */
5615                                 /* only if we have PCS_SYNC and not */
5616                                 /* receiving config code words */
5617                                 mac_status = tr32(MAC_STATUS);
5618                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5619                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5620                                         tg3_setup_flow_control(tp, 0, 0);
5621                                         current_link_up = true;
5622                                         tp->phy_flags |=
5623                                                 TG3_PHYFLG_PARALLEL_DETECT;
5624                                         tp->serdes_counter =
5625                                                 SERDES_PARALLEL_DET_TIMEOUT;
5626                                 } else
5627                                         goto restart_autoneg;
5628                         }
5629                 }
5630         } else {
5631                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5632                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5633         }
5634
5635 out:
5636         return current_link_up;
5637 }
5638
5639 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5640 {
5641         bool current_link_up = false;
5642
5643         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5644                 goto out;
5645
5646         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5647                 u32 txflags, rxflags;
5648                 int i;
5649
5650                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5651                         u32 local_adv = 0, remote_adv = 0;
5652
5653                         if (txflags & ANEG_CFG_PS1)
5654                                 local_adv |= ADVERTISE_1000XPAUSE;
5655                         if (txflags & ANEG_CFG_PS2)
5656                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5657
5658                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5659                                 remote_adv |= LPA_1000XPAUSE;
5660                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5661                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5662
5663                         tp->link_config.rmt_adv =
5664                                            mii_adv_to_ethtool_adv_x(remote_adv);
5665
5666                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5667
5668                         current_link_up = true;
5669                 }
5670                 for (i = 0; i < 30; i++) {
5671                         udelay(20);
5672                         tw32_f(MAC_STATUS,
5673                                (MAC_STATUS_SYNC_CHANGED |
5674                                 MAC_STATUS_CFG_CHANGED));
5675                         udelay(40);
5676                         if ((tr32(MAC_STATUS) &
5677                              (MAC_STATUS_SYNC_CHANGED |
5678                               MAC_STATUS_CFG_CHANGED)) == 0)
5679                                 break;
5680                 }
5681
5682                 mac_status = tr32(MAC_STATUS);
5683                 if (!current_link_up &&
5684                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5685                     !(mac_status & MAC_STATUS_RCVD_CFG))
5686                         current_link_up = true;
5687         } else {
5688                 tg3_setup_flow_control(tp, 0, 0);
5689
5690                 /* Forcing 1000FD link up. */
5691                 current_link_up = true;
5692
5693                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5694                 udelay(40);
5695
5696                 tw32_f(MAC_MODE, tp->mac_mode);
5697                 udelay(40);
5698         }
5699
5700 out:
5701         return current_link_up;
5702 }
5703
5704 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5705 {
5706         u32 orig_pause_cfg;
5707         u16 orig_active_speed;
5708         u8 orig_active_duplex;
5709         u32 mac_status;
5710         bool current_link_up;
5711         int i;
5712
5713         orig_pause_cfg = tp->link_config.active_flowctrl;
5714         orig_active_speed = tp->link_config.active_speed;
5715         orig_active_duplex = tp->link_config.active_duplex;
5716
5717         if (!tg3_flag(tp, HW_AUTONEG) &&
5718             tp->link_up &&
5719             tg3_flag(tp, INIT_COMPLETE)) {
5720                 mac_status = tr32(MAC_STATUS);
5721                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5722                                MAC_STATUS_SIGNAL_DET |
5723                                MAC_STATUS_CFG_CHANGED |
5724                                MAC_STATUS_RCVD_CFG);
5725                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5726                                    MAC_STATUS_SIGNAL_DET)) {
5727                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5728                                             MAC_STATUS_CFG_CHANGED));
5729                         return 0;
5730                 }
5731         }
5732
5733         tw32_f(MAC_TX_AUTO_NEG, 0);
5734
5735         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5736         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5737         tw32_f(MAC_MODE, tp->mac_mode);
5738         udelay(40);
5739
5740         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5741                 tg3_init_bcm8002(tp);
5742
5743         /* Enable link change event even when serdes polling.  */
5744         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5745         udelay(40);
5746
5747         current_link_up = false;
5748         tp->link_config.rmt_adv = 0;
5749         mac_status = tr32(MAC_STATUS);
5750
5751         if (tg3_flag(tp, HW_AUTONEG))
5752                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5753         else
5754                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5755
5756         tp->napi[0].hw_status->status =
5757                 (SD_STATUS_UPDATED |
5758                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5759
5760         for (i = 0; i < 100; i++) {
5761                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5762                                     MAC_STATUS_CFG_CHANGED));
5763                 udelay(5);
5764                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5765                                          MAC_STATUS_CFG_CHANGED |
5766                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5767                         break;
5768         }
5769
5770         mac_status = tr32(MAC_STATUS);
5771         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5772                 current_link_up = false;
5773                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5774                     tp->serdes_counter == 0) {
5775                         tw32_f(MAC_MODE, (tp->mac_mode |
5776                                           MAC_MODE_SEND_CONFIGS));
5777                         udelay(1);
5778                         tw32_f(MAC_MODE, tp->mac_mode);
5779                 }
5780         }
5781
5782         if (current_link_up) {
5783                 tp->link_config.active_speed = SPEED_1000;
5784                 tp->link_config.active_duplex = DUPLEX_FULL;
5785                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5786                                     LED_CTRL_LNKLED_OVERRIDE |
5787                                     LED_CTRL_1000MBPS_ON));
5788         } else {
5789                 tp->link_config.active_speed = SPEED_UNKNOWN;
5790                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5791                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5792                                     LED_CTRL_LNKLED_OVERRIDE |
5793                                     LED_CTRL_TRAFFIC_OVERRIDE));
5794         }
5795
5796         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5797                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5798                 if (orig_pause_cfg != now_pause_cfg ||
5799                     orig_active_speed != tp->link_config.active_speed ||
5800                     orig_active_duplex != tp->link_config.active_duplex)
5801                         tg3_link_report(tp);
5802         }
5803
5804         return 0;
5805 }
5806
5807 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5808 {
5809         int err = 0;
5810         u32 bmsr, bmcr;
5811         u16 current_speed = SPEED_UNKNOWN;
5812         u8 current_duplex = DUPLEX_UNKNOWN;
5813         bool current_link_up = false;
5814         u32 local_adv, remote_adv, sgsr;
5815
5816         if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5817              tg3_asic_rev(tp) == ASIC_REV_5720) &&
5818              !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5819              (sgsr & SERDES_TG3_SGMII_MODE)) {
5820
5821                 if (force_reset)
5822                         tg3_phy_reset(tp);
5823
5824                 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5825
5826                 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5827                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5828                 } else {
5829                         current_link_up = true;
5830                         if (sgsr & SERDES_TG3_SPEED_1000) {
5831                                 current_speed = SPEED_1000;
5832                                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5833                         } else if (sgsr & SERDES_TG3_SPEED_100) {
5834                                 current_speed = SPEED_100;
5835                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5836                         } else {
5837                                 current_speed = SPEED_10;
5838                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5839                         }
5840
5841                         if (sgsr & SERDES_TG3_FULL_DUPLEX)
5842                                 current_duplex = DUPLEX_FULL;
5843                         else
5844                                 current_duplex = DUPLEX_HALF;
5845                 }
5846
5847                 tw32_f(MAC_MODE, tp->mac_mode);
5848                 udelay(40);
5849
5850                 tg3_clear_mac_status(tp);
5851
5852                 goto fiber_setup_done;
5853         }
5854
5855         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5856         tw32_f(MAC_MODE, tp->mac_mode);
5857         udelay(40);
5858
5859         tg3_clear_mac_status(tp);
5860
5861         if (force_reset)
5862                 tg3_phy_reset(tp);
5863
5864         tp->link_config.rmt_adv = 0;
5865
5866         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5867         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5868         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5869                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5870                         bmsr |= BMSR_LSTATUS;
5871                 else
5872                         bmsr &= ~BMSR_LSTATUS;
5873         }
5874
5875         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5876
5877         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5878             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5879                 /* do nothing, just check for link up at the end */
5880         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5881                 u32 adv, newadv;
5882
5883                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5884                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5885                                  ADVERTISE_1000XPAUSE |
5886                                  ADVERTISE_1000XPSE_ASYM |
5887                                  ADVERTISE_SLCT);
5888
5889                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5890                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5891
5892                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5893                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5894                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5895                         tg3_writephy(tp, MII_BMCR, bmcr);
5896
5897                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5898                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5899                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5900
5901                         return err;
5902                 }
5903         } else {
5904                 u32 new_bmcr;
5905
5906                 bmcr &= ~BMCR_SPEED1000;
5907                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5908
5909                 if (tp->link_config.duplex == DUPLEX_FULL)
5910                         new_bmcr |= BMCR_FULLDPLX;
5911
5912                 if (new_bmcr != bmcr) {
5913                         /* BMCR_SPEED1000 is a reserved bit that needs
5914                          * to be set on write.
5915                          */
5916                         new_bmcr |= BMCR_SPEED1000;
5917
5918                         /* Force a linkdown */
5919                         if (tp->link_up) {
5920                                 u32 adv;
5921
5922                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5923                                 adv &= ~(ADVERTISE_1000XFULL |
5924                                          ADVERTISE_1000XHALF |
5925                                          ADVERTISE_SLCT);
5926                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5927                                 tg3_writephy(tp, MII_BMCR, bmcr |
5928                                                            BMCR_ANRESTART |
5929                                                            BMCR_ANENABLE);
5930                                 udelay(10);
5931                                 tg3_carrier_off(tp);
5932                         }
5933                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5934                         bmcr = new_bmcr;
5935                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5936                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5937                         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5938                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5939                                         bmsr |= BMSR_LSTATUS;
5940                                 else
5941                                         bmsr &= ~BMSR_LSTATUS;
5942                         }
5943                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5944                 }
5945         }
5946
5947         if (bmsr & BMSR_LSTATUS) {
5948                 current_speed = SPEED_1000;
5949                 current_link_up = true;
5950                 if (bmcr & BMCR_FULLDPLX)
5951                         current_duplex = DUPLEX_FULL;
5952                 else
5953                         current_duplex = DUPLEX_HALF;
5954
5955                 local_adv = 0;
5956                 remote_adv = 0;
5957
5958                 if (bmcr & BMCR_ANENABLE) {
5959                         u32 common;
5960
5961                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5962                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5963                         common = local_adv & remote_adv;
5964                         if (common & (ADVERTISE_1000XHALF |
5965                                       ADVERTISE_1000XFULL)) {
5966                                 if (common & ADVERTISE_1000XFULL)
5967                                         current_duplex = DUPLEX_FULL;
5968                                 else
5969                                         current_duplex = DUPLEX_HALF;
5970
5971                                 tp->link_config.rmt_adv =
5972                                            mii_adv_to_ethtool_adv_x(remote_adv);
5973                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5974                                 /* Link is up via parallel detect */
5975                         } else {
5976                                 current_link_up = false;
5977                         }
5978                 }
5979         }
5980
5981 fiber_setup_done:
5982         if (current_link_up && current_duplex == DUPLEX_FULL)
5983                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5984
5985         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5986         if (tp->link_config.active_duplex == DUPLEX_HALF)
5987                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5988
5989         tw32_f(MAC_MODE, tp->mac_mode);
5990         udelay(40);
5991
5992         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5993
5994         tp->link_config.active_speed = current_speed;
5995         tp->link_config.active_duplex = current_duplex;
5996
5997         tg3_test_and_report_link_chg(tp, current_link_up);
5998         return err;
5999 }
6000
6001 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6002 {
6003         if (tp->serdes_counter) {
6004                 /* Give autoneg time to complete. */
6005                 tp->serdes_counter--;
6006                 return;
6007         }
6008
6009         if (!tp->link_up &&
6010             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6011                 u32 bmcr;
6012
6013                 tg3_readphy(tp, MII_BMCR, &bmcr);
6014                 if (bmcr & BMCR_ANENABLE) {
6015                         u32 phy1, phy2;
6016
6017                         /* Select shadow register 0x1f */
6018                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6019                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6020
6021                         /* Select expansion interrupt status register */
6022                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6023                                          MII_TG3_DSP_EXP1_INT_STAT);
6024                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6025                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6026
6027                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6028                                 /* We have signal detect and not receiving
6029                                  * config code words, link is up by parallel
6030                                  * detection.
6031                                  */
6032
6033                                 bmcr &= ~BMCR_ANENABLE;
6034                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6035                                 tg3_writephy(tp, MII_BMCR, bmcr);
6036                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6037                         }
6038                 }
6039         } else if (tp->link_up &&
6040                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6041                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6042                 u32 phy2;
6043
6044                 /* Select expansion interrupt status register */
6045                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6046                                  MII_TG3_DSP_EXP1_INT_STAT);
6047                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6048                 if (phy2 & 0x20) {
6049                         u32 bmcr;
6050
6051                         /* Config code words received, turn on autoneg. */
6052                         tg3_readphy(tp, MII_BMCR, &bmcr);
6053                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6054
6055                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6056
6057                 }
6058         }
6059 }
6060
6061 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6062 {
6063         u32 val;
6064         int err;
6065
6066         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6067                 err = tg3_setup_fiber_phy(tp, force_reset);
6068         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6069                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6070         else
6071                 err = tg3_setup_copper_phy(tp, force_reset);
6072
6073         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6074                 u32 scale;
6075
6076                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6077                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6078                         scale = 65;
6079                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6080                         scale = 6;
6081                 else
6082                         scale = 12;
6083
6084                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6085                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6086                 tw32(GRC_MISC_CFG, val);
6087         }
6088
6089         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6090               (6 << TX_LENGTHS_IPG_SHIFT);
6091         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6092             tg3_asic_rev(tp) == ASIC_REV_5762)
6093                 val |= tr32(MAC_TX_LENGTHS) &
6094                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
6095                         TX_LENGTHS_CNT_DWN_VAL_MSK);
6096
6097         if (tp->link_config.active_speed == SPEED_1000 &&
6098             tp->link_config.active_duplex == DUPLEX_HALF)
6099                 tw32(MAC_TX_LENGTHS, val |
6100                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6101         else
6102                 tw32(MAC_TX_LENGTHS, val |
6103                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6104
6105         if (!tg3_flag(tp, 5705_PLUS)) {
6106                 if (tp->link_up) {
6107                         tw32(HOSTCC_STAT_COAL_TICKS,
6108                              tp->coal.stats_block_coalesce_usecs);
6109                 } else {
6110                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
6111                 }
6112         }
6113
6114         if (tg3_flag(tp, ASPM_WORKAROUND)) {
6115                 val = tr32(PCIE_PWR_MGMT_THRESH);
6116                 if (!tp->link_up)
6117                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6118                               tp->pwrmgmt_thresh;
6119                 else
6120                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6121                 tw32(PCIE_PWR_MGMT_THRESH, val);
6122         }
6123
6124         return err;
6125 }
6126
6127 /* tp->lock must be held */
6128 static u64 tg3_refclk_read(struct tg3 *tp)
6129 {
6130         u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6131         return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6132 }
6133
6134 /* tp->lock must be held */
6135 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6136 {
6137         u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6138
6139         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6140         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6141         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6142         tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6143 }
6144
6145 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6146 static inline void tg3_full_unlock(struct tg3 *tp);
6147 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6148 {
6149         struct tg3 *tp = netdev_priv(dev);
6150
6151         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6152                                 SOF_TIMESTAMPING_RX_SOFTWARE |
6153                                 SOF_TIMESTAMPING_SOFTWARE;
6154
6155         if (tg3_flag(tp, PTP_CAPABLE)) {
6156                 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6157                                         SOF_TIMESTAMPING_RX_HARDWARE |
6158                                         SOF_TIMESTAMPING_RAW_HARDWARE;
6159         }
6160
6161         if (tp->ptp_clock)
6162                 info->phc_index = ptp_clock_index(tp->ptp_clock);
6163         else
6164                 info->phc_index = -1;
6165
6166         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6167
6168         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6169                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6170                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6171                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6172         return 0;
6173 }
6174
6175 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6176 {
6177         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6178         bool neg_adj = false;
6179         u32 correction = 0;
6180
6181         if (ppb < 0) {
6182                 neg_adj = true;
6183                 ppb = -ppb;
6184         }
6185
6186         /* Frequency adjustment is performed using hardware with a 24 bit
6187          * accumulator and a programmable correction value. On each clk, the
6188          * correction value gets added to the accumulator and when it
6189          * overflows, the time counter is incremented/decremented.
6190          *
6191          * So conversion from ppb to correction value is
6192          *              ppb * (1 << 24) / 1000000000
6193          */
6194         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6195                      TG3_EAV_REF_CLK_CORRECT_MASK;
6196
6197         tg3_full_lock(tp, 0);
6198
6199         if (correction)
6200                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6201                      TG3_EAV_REF_CLK_CORRECT_EN |
6202                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6203         else
6204                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6205
6206         tg3_full_unlock(tp);
6207
6208         return 0;
6209 }
6210
6211 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6212 {
6213         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6214
6215         tg3_full_lock(tp, 0);
6216         tp->ptp_adjust += delta;
6217         tg3_full_unlock(tp);
6218
6219         return 0;
6220 }
6221
6222 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
6223 {
6224         u64 ns;
6225         u32 remainder;
6226         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6227
6228         tg3_full_lock(tp, 0);
6229         ns = tg3_refclk_read(tp);
6230         ns += tp->ptp_adjust;
6231         tg3_full_unlock(tp);
6232
6233         ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
6234         ts->tv_nsec = remainder;
6235
6236         return 0;
6237 }
6238
6239 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6240                            const struct timespec *ts)
6241 {
6242         u64 ns;
6243         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6244
6245         ns = timespec_to_ns(ts);
6246
6247         tg3_full_lock(tp, 0);
6248         tg3_refclk_write(tp, ns);
6249         tp->ptp_adjust = 0;
6250         tg3_full_unlock(tp);
6251
6252         return 0;
6253 }
6254
6255 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6256                           struct ptp_clock_request *rq, int on)
6257 {
6258         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6259         u32 clock_ctl;
6260         int rval = 0;
6261
6262         switch (rq->type) {
6263         case PTP_CLK_REQ_PEROUT:
6264                 if (rq->perout.index != 0)
6265                         return -EINVAL;
6266
6267                 tg3_full_lock(tp, 0);
6268                 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6269                 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6270
6271                 if (on) {
6272                         u64 nsec;
6273
6274                         nsec = rq->perout.start.sec * 1000000000ULL +
6275                                rq->perout.start.nsec;
6276
6277                         if (rq->perout.period.sec || rq->perout.period.nsec) {
6278                                 netdev_warn(tp->dev,
6279                                             "Device supports only a one-shot timesync output, period must be 0\n");
6280                                 rval = -EINVAL;
6281                                 goto err_out;
6282                         }
6283
6284                         if (nsec & (1ULL << 63)) {
6285                                 netdev_warn(tp->dev,
6286                                             "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6287                                 rval = -EINVAL;
6288                                 goto err_out;
6289                         }
6290
6291                         tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6292                         tw32(TG3_EAV_WATCHDOG0_MSB,
6293                              TG3_EAV_WATCHDOG0_EN |
6294                              ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6295
6296                         tw32(TG3_EAV_REF_CLCK_CTL,
6297                              clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6298                 } else {
6299                         tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6300                         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6301                 }
6302
6303 err_out:
6304                 tg3_full_unlock(tp);
6305                 return rval;
6306
6307         default:
6308                 break;
6309         }
6310
6311         return -EOPNOTSUPP;
6312 }
6313
6314 static const struct ptp_clock_info tg3_ptp_caps = {
6315         .owner          = THIS_MODULE,
6316         .name           = "tg3 clock",
6317         .max_adj        = 250000000,
6318         .n_alarm        = 0,
6319         .n_ext_ts       = 0,
6320         .n_per_out      = 1,
6321         .pps            = 0,
6322         .adjfreq        = tg3_ptp_adjfreq,
6323         .adjtime        = tg3_ptp_adjtime,
6324         .gettime        = tg3_ptp_gettime,
6325         .settime        = tg3_ptp_settime,
6326         .enable         = tg3_ptp_enable,
6327 };
6328
6329 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6330                                      struct skb_shared_hwtstamps *timestamp)
6331 {
6332         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6333         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6334                                            tp->ptp_adjust);
6335 }
6336
6337 /* tp->lock must be held */
6338 static void tg3_ptp_init(struct tg3 *tp)
6339 {
6340         if (!tg3_flag(tp, PTP_CAPABLE))
6341                 return;
6342
6343         /* Initialize the hardware clock to the system time. */
6344         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6345         tp->ptp_adjust = 0;
6346         tp->ptp_info = tg3_ptp_caps;
6347 }
6348
6349 /* tp->lock must be held */
6350 static void tg3_ptp_resume(struct tg3 *tp)
6351 {
6352         if (!tg3_flag(tp, PTP_CAPABLE))
6353                 return;
6354
6355         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6356         tp->ptp_adjust = 0;
6357 }
6358
6359 static void tg3_ptp_fini(struct tg3 *tp)
6360 {
6361         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6362                 return;
6363
6364         ptp_clock_unregister(tp->ptp_clock);
6365         tp->ptp_clock = NULL;
6366         tp->ptp_adjust = 0;
6367 }
6368
6369 static inline int tg3_irq_sync(struct tg3 *tp)
6370 {
6371         return tp->irq_sync;
6372 }
6373
6374 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6375 {
6376         int i;
6377
6378         dst = (u32 *)((u8 *)dst + off);
6379         for (i = 0; i < len; i += sizeof(u32))
6380                 *dst++ = tr32(off + i);
6381 }
6382
6383 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6384 {
6385         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6386         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6387         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6388         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6389         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6390         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6391         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6392         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6393         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6394         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6395         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6396         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6397         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6398         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6399         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6400         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6401         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6402         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6403         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6404
6405         if (tg3_flag(tp, SUPPORT_MSIX))
6406                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6407
6408         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6409         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6410         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6411         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6412         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6413         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6414         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6415         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6416
6417         if (!tg3_flag(tp, 5705_PLUS)) {
6418                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6419                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6420                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6421         }
6422
6423         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6424         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6425         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6426         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6427         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6428
6429         if (tg3_flag(tp, NVRAM))
6430                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6431 }
6432
6433 static void tg3_dump_state(struct tg3 *tp)
6434 {
6435         int i;
6436         u32 *regs;
6437
6438         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6439         if (!regs)
6440                 return;
6441
6442         if (tg3_flag(tp, PCI_EXPRESS)) {
6443                 /* Read up to but not including private PCI registers */
6444                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6445                         regs[i / sizeof(u32)] = tr32(i);
6446         } else
6447                 tg3_dump_legacy_regs(tp, regs);
6448
6449         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6450                 if (!regs[i + 0] && !regs[i + 1] &&
6451                     !regs[i + 2] && !regs[i + 3])
6452                         continue;
6453
6454                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6455                            i * 4,
6456                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6457         }
6458
6459         kfree(regs);
6460
6461         for (i = 0; i < tp->irq_cnt; i++) {
6462                 struct tg3_napi *tnapi = &tp->napi[i];
6463
6464                 /* SW status block */
6465                 netdev_err(tp->dev,
6466                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6467                            i,
6468                            tnapi->hw_status->status,
6469                            tnapi->hw_status->status_tag,
6470                            tnapi->hw_status->rx_jumbo_consumer,
6471                            tnapi->hw_status->rx_consumer,
6472                            tnapi->hw_status->rx_mini_consumer,
6473                            tnapi->hw_status->idx[0].rx_producer,
6474                            tnapi->hw_status->idx[0].tx_consumer);
6475
6476                 netdev_err(tp->dev,
6477                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6478                            i,
6479                            tnapi->last_tag, tnapi->last_irq_tag,
6480                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6481                            tnapi->rx_rcb_ptr,
6482                            tnapi->prodring.rx_std_prod_idx,
6483                            tnapi->prodring.rx_std_cons_idx,
6484                            tnapi->prodring.rx_jmb_prod_idx,
6485                            tnapi->prodring.rx_jmb_cons_idx);
6486         }
6487 }
6488
6489 /* This is called whenever we suspect that the system chipset is re-
6490  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6491  * is bogus tx completions. We try to recover by setting the
6492  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6493  * in the workqueue.
6494  */
6495 static void tg3_tx_recover(struct tg3 *tp)
6496 {
6497         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6498                tp->write32_tx_mbox == tg3_write_indirect_mbox);
6499
6500         netdev_warn(tp->dev,
6501                     "The system may be re-ordering memory-mapped I/O "
6502                     "cycles to the network device, attempting to recover. "
6503                     "Please report the problem to the driver maintainer "
6504                     "and include system chipset information.\n");
6505
6506         tg3_flag_set(tp, TX_RECOVERY_PENDING);
6507 }
6508
6509 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6510 {
6511         /* Tell compiler to fetch tx indices from memory. */
6512         barrier();
6513         return tnapi->tx_pending -
6514                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6515 }
6516
6517 /* Tigon3 never reports partial packet sends.  So we do not
6518  * need special logic to handle SKBs that have not had all
6519  * of their frags sent yet, like SunGEM does.
6520  */
6521 static void tg3_tx(struct tg3_napi *tnapi)
6522 {
6523         struct tg3 *tp = tnapi->tp;
6524         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6525         u32 sw_idx = tnapi->tx_cons;
6526         struct netdev_queue *txq;
6527         int index = tnapi - tp->napi;
6528         unsigned int pkts_compl = 0, bytes_compl = 0;
6529
6530         if (tg3_flag(tp, ENABLE_TSS))
6531                 index--;
6532
6533         txq = netdev_get_tx_queue(tp->dev, index);
6534
6535         while (sw_idx != hw_idx) {
6536                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6537                 struct sk_buff *skb = ri->skb;
6538                 int i, tx_bug = 0;
6539
6540                 if (unlikely(skb == NULL)) {
6541                         tg3_tx_recover(tp);
6542                         return;
6543                 }
6544
6545                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6546                         struct skb_shared_hwtstamps timestamp;
6547                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6548                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6549
6550                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6551
6552                         skb_tstamp_tx(skb, &timestamp);
6553                 }
6554
6555                 pci_unmap_single(tp->pdev,
6556                                  dma_unmap_addr(ri, mapping),
6557                                  skb_headlen(skb),
6558                                  PCI_DMA_TODEVICE);
6559
6560                 ri->skb = NULL;
6561
6562                 while (ri->fragmented) {
6563                         ri->fragmented = false;
6564                         sw_idx = NEXT_TX(sw_idx);
6565                         ri = &tnapi->tx_buffers[sw_idx];
6566                 }
6567
6568                 sw_idx = NEXT_TX(sw_idx);
6569
6570                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6571                         ri = &tnapi->tx_buffers[sw_idx];
6572                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6573                                 tx_bug = 1;
6574
6575                         pci_unmap_page(tp->pdev,
6576                                        dma_unmap_addr(ri, mapping),
6577                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
6578                                        PCI_DMA_TODEVICE);
6579
6580                         while (ri->fragmented) {
6581                                 ri->fragmented = false;
6582                                 sw_idx = NEXT_TX(sw_idx);
6583                                 ri = &tnapi->tx_buffers[sw_idx];
6584                         }
6585
6586                         sw_idx = NEXT_TX(sw_idx);
6587                 }
6588
6589                 pkts_compl++;
6590                 bytes_compl += skb->len;
6591
6592                 dev_kfree_skb(skb);
6593
6594                 if (unlikely(tx_bug)) {
6595                         tg3_tx_recover(tp);
6596                         return;
6597                 }
6598         }
6599
6600         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6601
6602         tnapi->tx_cons = sw_idx;
6603
6604         /* Need to make the tx_cons update visible to tg3_start_xmit()
6605          * before checking for netif_queue_stopped().  Without the
6606          * memory barrier, there is a small possibility that tg3_start_xmit()
6607          * will miss it and cause the queue to be stopped forever.
6608          */
6609         smp_mb();
6610
6611         if (unlikely(netif_tx_queue_stopped(txq) &&
6612                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6613                 __netif_tx_lock(txq, smp_processor_id());
6614                 if (netif_tx_queue_stopped(txq) &&
6615                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6616                         netif_tx_wake_queue(txq);
6617                 __netif_tx_unlock(txq);
6618         }
6619 }
6620
6621 static void tg3_frag_free(bool is_frag, void *data)
6622 {
6623         if (is_frag)
6624                 put_page(virt_to_head_page(data));
6625         else
6626                 kfree(data);
6627 }
6628
6629 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6630 {
6631         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6632                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6633
6634         if (!ri->data)
6635                 return;
6636
6637         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6638                          map_sz, PCI_DMA_FROMDEVICE);
6639         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6640         ri->data = NULL;
6641 }
6642
6643
6644 /* Returns size of skb allocated or < 0 on error.
6645  *
6646  * We only need to fill in the address because the other members
6647  * of the RX descriptor are invariant, see tg3_init_rings.
6648  *
6649  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6650  * posting buffers we only dirty the first cache line of the RX
6651  * descriptor (containing the address).  Whereas for the RX status
6652  * buffers the cpu only reads the last cacheline of the RX descriptor
6653  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6654  */
6655 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6656                              u32 opaque_key, u32 dest_idx_unmasked,
6657                              unsigned int *frag_size)
6658 {
6659         struct tg3_rx_buffer_desc *desc;
6660         struct ring_info *map;
6661         u8 *data;
6662         dma_addr_t mapping;
6663         int skb_size, data_size, dest_idx;
6664
6665         switch (opaque_key) {
6666         case RXD_OPAQUE_RING_STD:
6667                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6668                 desc = &tpr->rx_std[dest_idx];
6669                 map = &tpr->rx_std_buffers[dest_idx];
6670                 data_size = tp->rx_pkt_map_sz;
6671                 break;
6672
6673         case RXD_OPAQUE_RING_JUMBO:
6674                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6675                 desc = &tpr->rx_jmb[dest_idx].std;
6676                 map = &tpr->rx_jmb_buffers[dest_idx];
6677                 data_size = TG3_RX_JMB_MAP_SZ;
6678                 break;
6679
6680         default:
6681                 return -EINVAL;
6682         }
6683
6684         /* Do not overwrite any of the map or rp information
6685          * until we are sure we can commit to a new buffer.
6686          *
6687          * Callers depend upon this behavior and assume that
6688          * we leave everything unchanged if we fail.
6689          */
6690         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6691                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6692         if (skb_size <= PAGE_SIZE) {
6693                 data = netdev_alloc_frag(skb_size);
6694                 *frag_size = skb_size;
6695         } else {
6696                 data = kmalloc(skb_size, GFP_ATOMIC);
6697                 *frag_size = 0;
6698         }
6699         if (!data)
6700                 return -ENOMEM;
6701
6702         mapping = pci_map_single(tp->pdev,
6703                                  data + TG3_RX_OFFSET(tp),
6704                                  data_size,
6705                                  PCI_DMA_FROMDEVICE);
6706         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6707                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6708                 return -EIO;
6709         }
6710
6711         map->data = data;
6712         dma_unmap_addr_set(map, mapping, mapping);
6713
6714         desc->addr_hi = ((u64)mapping >> 32);
6715         desc->addr_lo = ((u64)mapping & 0xffffffff);
6716
6717         return data_size;
6718 }
6719
6720 /* We only need to move over in the address because the other
6721  * members of the RX descriptor are invariant.  See notes above
6722  * tg3_alloc_rx_data for full details.
6723  */
6724 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6725                            struct tg3_rx_prodring_set *dpr,
6726                            u32 opaque_key, int src_idx,
6727                            u32 dest_idx_unmasked)
6728 {
6729         struct tg3 *tp = tnapi->tp;
6730         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6731         struct ring_info *src_map, *dest_map;
6732         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6733         int dest_idx;
6734
6735         switch (opaque_key) {
6736         case RXD_OPAQUE_RING_STD:
6737                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6738                 dest_desc = &dpr->rx_std[dest_idx];
6739                 dest_map = &dpr->rx_std_buffers[dest_idx];
6740                 src_desc = &spr->rx_std[src_idx];
6741                 src_map = &spr->rx_std_buffers[src_idx];
6742                 break;
6743
6744         case RXD_OPAQUE_RING_JUMBO:
6745                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6746                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6747                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6748                 src_desc = &spr->rx_jmb[src_idx].std;
6749                 src_map = &spr->rx_jmb_buffers[src_idx];
6750                 break;
6751
6752         default:
6753                 return;
6754         }
6755
6756         dest_map->data = src_map->data;
6757         dma_unmap_addr_set(dest_map, mapping,
6758                            dma_unmap_addr(src_map, mapping));
6759         dest_desc->addr_hi = src_desc->addr_hi;
6760         dest_desc->addr_lo = src_desc->addr_lo;
6761
6762         /* Ensure that the update to the skb happens after the physical
6763          * addresses have been transferred to the new BD location.
6764          */
6765         smp_wmb();
6766
6767         src_map->data = NULL;
6768 }
6769
6770 /* The RX ring scheme is composed of multiple rings which post fresh
6771  * buffers to the chip, and one special ring the chip uses to report
6772  * status back to the host.
6773  *
6774  * The special ring reports the status of received packets to the
6775  * host.  The chip does not write into the original descriptor the
6776  * RX buffer was obtained from.  The chip simply takes the original
6777  * descriptor as provided by the host, updates the status and length
6778  * field, then writes this into the next status ring entry.
6779  *
6780  * Each ring the host uses to post buffers to the chip is described
6781  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6782  * it is first placed into the on-chip ram.  When the packet's length
6783  * is known, it walks down the TG3_BDINFO entries to select the ring.
6784  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6785  * which is within the range of the new packet's length is chosen.
6786  *
6787  * The "separate ring for rx status" scheme may sound queer, but it makes
6788  * sense from a cache coherency perspective.  If only the host writes
6789  * to the buffer post rings, and only the chip writes to the rx status
6790  * rings, then cache lines never move beyond shared-modified state.
6791  * If both the host and chip were to write into the same ring, cache line
6792  * eviction could occur since both entities want it in an exclusive state.
6793  */
6794 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6795 {
6796         struct tg3 *tp = tnapi->tp;
6797         u32 work_mask, rx_std_posted = 0;
6798         u32 std_prod_idx, jmb_prod_idx;
6799         u32 sw_idx = tnapi->rx_rcb_ptr;
6800         u16 hw_idx;
6801         int received;
6802         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6803
6804         hw_idx = *(tnapi->rx_rcb_prod_idx);
6805         /*
6806          * We need to order the read of hw_idx and the read of
6807          * the opaque cookie.
6808          */
6809         rmb();
6810         work_mask = 0;
6811         received = 0;
6812         std_prod_idx = tpr->rx_std_prod_idx;
6813         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6814         while (sw_idx != hw_idx && budget > 0) {
6815                 struct ring_info *ri;
6816                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6817                 unsigned int len;
6818                 struct sk_buff *skb;
6819                 dma_addr_t dma_addr;
6820                 u32 opaque_key, desc_idx, *post_ptr;
6821                 u8 *data;
6822                 u64 tstamp = 0;
6823
6824                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6825                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6826                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6827                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6828                         dma_addr = dma_unmap_addr(ri, mapping);
6829                         data = ri->data;
6830                         post_ptr = &std_prod_idx;
6831                         rx_std_posted++;
6832                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6833                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6834                         dma_addr = dma_unmap_addr(ri, mapping);
6835                         data = ri->data;
6836                         post_ptr = &jmb_prod_idx;
6837                 } else
6838                         goto next_pkt_nopost;
6839
6840                 work_mask |= opaque_key;
6841
6842                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6843                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6844                 drop_it:
6845                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6846                                        desc_idx, *post_ptr);
6847                 drop_it_no_recycle:
6848                         /* Other statistics kept track of by card. */
6849                         tp->rx_dropped++;
6850                         goto next_pkt;
6851                 }
6852
6853                 prefetch(data + TG3_RX_OFFSET(tp));
6854                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6855                       ETH_FCS_LEN;
6856
6857                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6858                      RXD_FLAG_PTPSTAT_PTPV1 ||
6859                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6860                      RXD_FLAG_PTPSTAT_PTPV2) {
6861                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6862                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6863                 }
6864
6865                 if (len > TG3_RX_COPY_THRESH(tp)) {
6866                         int skb_size;
6867                         unsigned int frag_size;
6868
6869                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6870                                                     *post_ptr, &frag_size);
6871                         if (skb_size < 0)
6872                                 goto drop_it;
6873
6874                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6875                                          PCI_DMA_FROMDEVICE);
6876
6877                         /* Ensure that the update to the data happens
6878                          * after the usage of the old DMA mapping.
6879                          */
6880                         smp_wmb();
6881
6882                         ri->data = NULL;
6883
6884                         skb = build_skb(data, frag_size);
6885                         if (!skb) {
6886                                 tg3_frag_free(frag_size != 0, data);
6887                                 goto drop_it_no_recycle;
6888                         }
6889                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6890                 } else {
6891                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6892                                        desc_idx, *post_ptr);
6893
6894                         skb = netdev_alloc_skb(tp->dev,
6895                                                len + TG3_RAW_IP_ALIGN);
6896                         if (skb == NULL)
6897                                 goto drop_it_no_recycle;
6898
6899                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6900                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6901                         memcpy(skb->data,
6902                                data + TG3_RX_OFFSET(tp),
6903                                len);
6904                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6905                 }
6906
6907                 skb_put(skb, len);
6908                 if (tstamp)
6909                         tg3_hwclock_to_timestamp(tp, tstamp,
6910                                                  skb_hwtstamps(skb));
6911
6912                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6913                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6914                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6915                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6916                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6917                 else
6918                         skb_checksum_none_assert(skb);
6919
6920                 skb->protocol = eth_type_trans(skb, tp->dev);
6921
6922                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6923                     skb->protocol != htons(ETH_P_8021Q)) {
6924                         dev_kfree_skb(skb);
6925                         goto drop_it_no_recycle;
6926                 }
6927
6928                 if (desc->type_flags & RXD_FLAG_VLAN &&
6929                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6930                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6931                                                desc->err_vlan & RXD_VLAN_MASK);
6932
6933                 napi_gro_receive(&tnapi->napi, skb);
6934
6935                 received++;
6936                 budget--;
6937
6938 next_pkt:
6939                 (*post_ptr)++;
6940
6941                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6942                         tpr->rx_std_prod_idx = std_prod_idx &
6943                                                tp->rx_std_ring_mask;
6944                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6945                                      tpr->rx_std_prod_idx);
6946                         work_mask &= ~RXD_OPAQUE_RING_STD;
6947                         rx_std_posted = 0;
6948                 }
6949 next_pkt_nopost:
6950                 sw_idx++;
6951                 sw_idx &= tp->rx_ret_ring_mask;
6952
6953                 /* Refresh hw_idx to see if there is new work */
6954                 if (sw_idx == hw_idx) {
6955                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6956                         rmb();
6957                 }
6958         }
6959
6960         /* ACK the status ring. */
6961         tnapi->rx_rcb_ptr = sw_idx;
6962         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6963
6964         /* Refill RX ring(s). */
6965         if (!tg3_flag(tp, ENABLE_RSS)) {
6966                 /* Sync BD data before updating mailbox */
6967                 wmb();
6968
6969                 if (work_mask & RXD_OPAQUE_RING_STD) {
6970                         tpr->rx_std_prod_idx = std_prod_idx &
6971                                                tp->rx_std_ring_mask;
6972                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6973                                      tpr->rx_std_prod_idx);
6974                 }
6975                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6976                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6977                                                tp->rx_jmb_ring_mask;
6978                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6979                                      tpr->rx_jmb_prod_idx);
6980                 }
6981                 mmiowb();
6982         } else if (work_mask) {
6983                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6984                  * updated before the producer indices can be updated.
6985                  */
6986                 smp_wmb();
6987
6988                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6989                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6990
6991                 if (tnapi != &tp->napi[1]) {
6992                         tp->rx_refill = true;
6993                         napi_schedule(&tp->napi[1].napi);
6994                 }
6995         }
6996
6997         return received;
6998 }
6999
7000 static void tg3_poll_link(struct tg3 *tp)
7001 {
7002         /* handle link change and other phy events */
7003         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7004                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7005
7006                 if (sblk->status & SD_STATUS_LINK_CHG) {
7007                         sblk->status = SD_STATUS_UPDATED |
7008                                        (sblk->status & ~SD_STATUS_LINK_CHG);
7009                         spin_lock(&tp->lock);
7010                         if (tg3_flag(tp, USE_PHYLIB)) {
7011                                 tw32_f(MAC_STATUS,
7012                                      (MAC_STATUS_SYNC_CHANGED |
7013                                       MAC_STATUS_CFG_CHANGED |
7014                                       MAC_STATUS_MI_COMPLETION |
7015                                       MAC_STATUS_LNKSTATE_CHANGED));
7016                                 udelay(40);
7017                         } else
7018                                 tg3_setup_phy(tp, false);
7019                         spin_unlock(&tp->lock);
7020                 }
7021         }
7022 }
7023
7024 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7025                                 struct tg3_rx_prodring_set *dpr,
7026                                 struct tg3_rx_prodring_set *spr)
7027 {
7028         u32 si, di, cpycnt, src_prod_idx;
7029         int i, err = 0;
7030
7031         while (1) {
7032                 src_prod_idx = spr->rx_std_prod_idx;
7033
7034                 /* Make sure updates to the rx_std_buffers[] entries and the
7035                  * standard producer index are seen in the correct order.
7036                  */
7037                 smp_rmb();
7038
7039                 if (spr->rx_std_cons_idx == src_prod_idx)
7040                         break;
7041
7042                 if (spr->rx_std_cons_idx < src_prod_idx)
7043                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7044                 else
7045                         cpycnt = tp->rx_std_ring_mask + 1 -
7046                                  spr->rx_std_cons_idx;
7047
7048                 cpycnt = min(cpycnt,
7049                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7050
7051                 si = spr->rx_std_cons_idx;
7052                 di = dpr->rx_std_prod_idx;
7053
7054                 for (i = di; i < di + cpycnt; i++) {
7055                         if (dpr->rx_std_buffers[i].data) {
7056                                 cpycnt = i - di;
7057                                 err = -ENOSPC;
7058                                 break;
7059                         }
7060                 }
7061
7062                 if (!cpycnt)
7063                         break;
7064
7065                 /* Ensure that updates to the rx_std_buffers ring and the
7066                  * shadowed hardware producer ring from tg3_recycle_skb() are
7067                  * ordered correctly WRT the skb check above.
7068                  */
7069                 smp_rmb();
7070
7071                 memcpy(&dpr->rx_std_buffers[di],
7072                        &spr->rx_std_buffers[si],
7073                        cpycnt * sizeof(struct ring_info));
7074
7075                 for (i = 0; i < cpycnt; i++, di++, si++) {
7076                         struct tg3_rx_buffer_desc *sbd, *dbd;
7077                         sbd = &spr->rx_std[si];
7078                         dbd = &dpr->rx_std[di];
7079                         dbd->addr_hi = sbd->addr_hi;
7080                         dbd->addr_lo = sbd->addr_lo;
7081                 }
7082
7083                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7084                                        tp->rx_std_ring_mask;
7085                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7086                                        tp->rx_std_ring_mask;
7087         }
7088
7089         while (1) {
7090                 src_prod_idx = spr->rx_jmb_prod_idx;
7091
7092                 /* Make sure updates to the rx_jmb_buffers[] entries and
7093                  * the jumbo producer index are seen in the correct order.
7094                  */
7095                 smp_rmb();
7096
7097                 if (spr->rx_jmb_cons_idx == src_prod_idx)
7098                         break;
7099
7100                 if (spr->rx_jmb_cons_idx < src_prod_idx)
7101                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7102                 else
7103                         cpycnt = tp->rx_jmb_ring_mask + 1 -
7104                                  spr->rx_jmb_cons_idx;
7105
7106                 cpycnt = min(cpycnt,
7107                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7108
7109                 si = spr->rx_jmb_cons_idx;
7110                 di = dpr->rx_jmb_prod_idx;
7111
7112                 for (i = di; i < di + cpycnt; i++) {
7113                         if (dpr->rx_jmb_buffers[i].data) {
7114                                 cpycnt = i - di;
7115                                 err = -ENOSPC;
7116                                 break;
7117                         }
7118                 }
7119
7120                 if (!cpycnt)
7121                         break;
7122
7123                 /* Ensure that updates to the rx_jmb_buffers ring and the
7124                  * shadowed hardware producer ring from tg3_recycle_skb() are
7125                  * ordered correctly WRT the skb check above.
7126                  */
7127                 smp_rmb();
7128
7129                 memcpy(&dpr->rx_jmb_buffers[di],
7130                        &spr->rx_jmb_buffers[si],
7131                        cpycnt * sizeof(struct ring_info));
7132
7133                 for (i = 0; i < cpycnt; i++, di++, si++) {
7134                         struct tg3_rx_buffer_desc *sbd, *dbd;
7135                         sbd = &spr->rx_jmb[si].std;
7136                         dbd = &dpr->rx_jmb[di].std;
7137                         dbd->addr_hi = sbd->addr_hi;
7138                         dbd->addr_lo = sbd->addr_lo;
7139                 }
7140
7141                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7142                                        tp->rx_jmb_ring_mask;
7143                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7144                                        tp->rx_jmb_ring_mask;
7145         }
7146
7147         return err;
7148 }
7149
7150 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7151 {
7152         struct tg3 *tp = tnapi->tp;
7153
7154         /* run TX completion thread */
7155         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7156                 tg3_tx(tnapi);
7157                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7158                         return work_done;
7159         }
7160
7161         if (!tnapi->rx_rcb_prod_idx)
7162                 return work_done;
7163
7164         /* run RX thread, within the bounds set by NAPI.
7165          * All RX "locking" is done by ensuring outside
7166          * code synchronizes with tg3->napi.poll()
7167          */
7168         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7169                 work_done += tg3_rx(tnapi, budget - work_done);
7170
7171         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7172                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7173                 int i, err = 0;
7174                 u32 std_prod_idx = dpr->rx_std_prod_idx;
7175                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7176
7177                 tp->rx_refill = false;
7178                 for (i = 1; i <= tp->rxq_cnt; i++)
7179                         err |= tg3_rx_prodring_xfer(tp, dpr,
7180                                                     &tp->napi[i].prodring);
7181
7182                 wmb();
7183
7184                 if (std_prod_idx != dpr->rx_std_prod_idx)
7185                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7186                                      dpr->rx_std_prod_idx);
7187
7188                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7189                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7190                                      dpr->rx_jmb_prod_idx);
7191
7192                 mmiowb();
7193
7194                 if (err)
7195                         tw32_f(HOSTCC_MODE, tp->coal_now);
7196         }
7197
7198         return work_done;
7199 }
7200
7201 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7202 {
7203         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7204                 schedule_work(&tp->reset_task);
7205 }
7206
7207 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7208 {
7209         cancel_work_sync(&tp->reset_task);
7210         tg3_flag_clear(tp, RESET_TASK_PENDING);
7211         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7212 }
7213
7214 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7215 {
7216         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7217         struct tg3 *tp = tnapi->tp;
7218         int work_done = 0;
7219         struct tg3_hw_status *sblk = tnapi->hw_status;
7220
7221         while (1) {
7222                 work_done = tg3_poll_work(tnapi, work_done, budget);
7223
7224                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7225                         goto tx_recovery;
7226
7227                 if (unlikely(work_done >= budget))
7228                         break;
7229
7230                 /* tp->last_tag is used in tg3_int_reenable() below
7231                  * to tell the hw how much work has been processed,
7232                  * so we must read it before checking for more work.
7233                  */
7234                 tnapi->last_tag = sblk->status_tag;
7235                 tnapi->last_irq_tag = tnapi->last_tag;
7236                 rmb();
7237
7238                 /* check for RX/TX work to do */
7239                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7240                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7241
7242                         /* This test here is not race free, but will reduce
7243                          * the number of interrupts by looping again.
7244                          */
7245                         if (tnapi == &tp->napi[1] && tp->rx_refill)
7246                                 continue;
7247
7248                         napi_complete(napi);
7249                         /* Reenable interrupts. */
7250                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7251
7252                         /* This test here is synchronized by napi_schedule()
7253                          * and napi_complete() to close the race condition.
7254                          */
7255                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7256                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
7257                                                   HOSTCC_MODE_ENABLE |
7258                                                   tnapi->coal_now);
7259                         }
7260                         mmiowb();
7261                         break;
7262                 }
7263         }
7264
7265         return work_done;
7266
7267 tx_recovery:
7268         /* work_done is guaranteed to be less than budget. */
7269         napi_complete(napi);
7270         tg3_reset_task_schedule(tp);
7271         return work_done;
7272 }
7273
7274 static void tg3_process_error(struct tg3 *tp)
7275 {
7276         u32 val;
7277         bool real_error = false;
7278
7279         if (tg3_flag(tp, ERROR_PROCESSED))
7280                 return;
7281
7282         /* Check Flow Attention register */
7283         val = tr32(HOSTCC_FLOW_ATTN);
7284         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7285                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7286                 real_error = true;
7287         }
7288
7289         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7290                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7291                 real_error = true;
7292         }
7293
7294         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7295                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7296                 real_error = true;
7297         }
7298
7299         if (!real_error)
7300                 return;
7301
7302         tg3_dump_state(tp);
7303
7304         tg3_flag_set(tp, ERROR_PROCESSED);
7305         tg3_reset_task_schedule(tp);
7306 }
7307
7308 static int tg3_poll(struct napi_struct *napi, int budget)
7309 {
7310         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7311         struct tg3 *tp = tnapi->tp;
7312         int work_done = 0;
7313         struct tg3_hw_status *sblk = tnapi->hw_status;
7314
7315         while (1) {
7316                 if (sblk->status & SD_STATUS_ERROR)
7317                         tg3_process_error(tp);
7318
7319                 tg3_poll_link(tp);
7320
7321                 work_done = tg3_poll_work(tnapi, work_done, budget);
7322
7323                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7324                         goto tx_recovery;
7325
7326                 if (unlikely(work_done >= budget))
7327                         break;
7328
7329                 if (tg3_flag(tp, TAGGED_STATUS)) {
7330                         /* tp->last_tag is used in tg3_int_reenable() below
7331                          * to tell the hw how much work has been processed,
7332                          * so we must read it before checking for more work.
7333                          */
7334                         tnapi->last_tag = sblk->status_tag;
7335                         tnapi->last_irq_tag = tnapi->last_tag;
7336                         rmb();
7337                 } else
7338                         sblk->status &= ~SD_STATUS_UPDATED;
7339
7340                 if (likely(!tg3_has_work(tnapi))) {
7341                         napi_complete(napi);
7342                         tg3_int_reenable(tnapi);
7343                         break;
7344                 }
7345         }
7346
7347         return work_done;
7348
7349 tx_recovery:
7350         /* work_done is guaranteed to be less than budget. */
7351         napi_complete(napi);
7352         tg3_reset_task_schedule(tp);
7353         return work_done;
7354 }
7355
7356 static void tg3_napi_disable(struct tg3 *tp)
7357 {
7358         int i;
7359
7360         for (i = tp->irq_cnt - 1; i >= 0; i--)
7361                 napi_disable(&tp->napi[i].napi);
7362 }
7363
7364 static void tg3_napi_enable(struct tg3 *tp)
7365 {
7366         int i;
7367
7368         for (i = 0; i < tp->irq_cnt; i++)
7369                 napi_enable(&tp->napi[i].napi);
7370 }
7371
7372 static void tg3_napi_init(struct tg3 *tp)
7373 {
7374         int i;
7375
7376         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7377         for (i = 1; i < tp->irq_cnt; i++)
7378                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7379 }
7380
7381 static void tg3_napi_fini(struct tg3 *tp)
7382 {
7383         int i;
7384
7385         for (i = 0; i < tp->irq_cnt; i++)
7386                 netif_napi_del(&tp->napi[i].napi);
7387 }
7388
7389 static inline void tg3_netif_stop(struct tg3 *tp)
7390 {
7391         tp->dev->trans_start = jiffies; /* prevent tx timeout */
7392         tg3_napi_disable(tp);
7393         netif_carrier_off(tp->dev);
7394         netif_tx_disable(tp->dev);
7395 }
7396
7397 /* tp->lock must be held */
7398 static inline void tg3_netif_start(struct tg3 *tp)
7399 {
7400         tg3_ptp_resume(tp);
7401
7402         /* NOTE: unconditional netif_tx_wake_all_queues is only
7403          * appropriate so long as all callers are assured to
7404          * have free tx slots (such as after tg3_init_hw)
7405          */
7406         netif_tx_wake_all_queues(tp->dev);
7407
7408         if (tp->link_up)
7409                 netif_carrier_on(tp->dev);
7410
7411         tg3_napi_enable(tp);
7412         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7413         tg3_enable_ints(tp);
7414 }
7415
7416 static void tg3_irq_quiesce(struct tg3 *tp)
7417 {
7418         int i;
7419
7420         BUG_ON(tp->irq_sync);
7421
7422         tp->irq_sync = 1;
7423         smp_mb();
7424
7425         for (i = 0; i < tp->irq_cnt; i++)
7426                 synchronize_irq(tp->napi[i].irq_vec);
7427 }
7428
7429 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7430  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7431  * with as well.  Most of the time, this is not necessary except when
7432  * shutting down the device.
7433  */
7434 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7435 {
7436         spin_lock_bh(&tp->lock);
7437         if (irq_sync)
7438                 tg3_irq_quiesce(tp);
7439 }
7440
7441 static inline void tg3_full_unlock(struct tg3 *tp)
7442 {
7443         spin_unlock_bh(&tp->lock);
7444 }
7445
7446 /* One-shot MSI handler - Chip automatically disables interrupt
7447  * after sending MSI so driver doesn't have to do it.
7448  */
7449 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7450 {
7451         struct tg3_napi *tnapi = dev_id;
7452         struct tg3 *tp = tnapi->tp;
7453
7454         prefetch(tnapi->hw_status);
7455         if (tnapi->rx_rcb)
7456                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7457
7458         if (likely(!tg3_irq_sync(tp)))
7459                 napi_schedule(&tnapi->napi);
7460
7461         return IRQ_HANDLED;
7462 }
7463
7464 /* MSI ISR - No need to check for interrupt sharing and no need to
7465  * flush status block and interrupt mailbox. PCI ordering rules
7466  * guarantee that MSI will arrive after the status block.
7467  */
7468 static irqreturn_t tg3_msi(int irq, void *dev_id)
7469 {
7470         struct tg3_napi *tnapi = dev_id;
7471         struct tg3 *tp = tnapi->tp;
7472
7473         prefetch(tnapi->hw_status);
7474         if (tnapi->rx_rcb)
7475                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7476         /*
7477          * Writing any value to intr-mbox-0 clears PCI INTA# and
7478          * chip-internal interrupt pending events.
7479          * Writing non-zero to intr-mbox-0 additional tells the
7480          * NIC to stop sending us irqs, engaging "in-intr-handler"
7481          * event coalescing.
7482          */
7483         tw32_mailbox(tnapi->int_mbox, 0x00000001);
7484         if (likely(!tg3_irq_sync(tp)))
7485                 napi_schedule(&tnapi->napi);
7486
7487         return IRQ_RETVAL(1);
7488 }
7489
7490 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7491 {
7492         struct tg3_napi *tnapi = dev_id;
7493         struct tg3 *tp = tnapi->tp;
7494         struct tg3_hw_status *sblk = tnapi->hw_status;
7495         unsigned int handled = 1;
7496
7497         /* In INTx mode, it is possible for the interrupt to arrive at
7498          * the CPU before the status block posted prior to the interrupt.
7499          * Reading the PCI State register will confirm whether the
7500          * interrupt is ours and will flush the status block.
7501          */
7502         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7503                 if (tg3_flag(tp, CHIP_RESETTING) ||
7504                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7505                         handled = 0;
7506                         goto out;
7507                 }
7508         }
7509
7510         /*
7511          * Writing any value to intr-mbox-0 clears PCI INTA# and
7512          * chip-internal interrupt pending events.
7513          * Writing non-zero to intr-mbox-0 additional tells the
7514          * NIC to stop sending us irqs, engaging "in-intr-handler"
7515          * event coalescing.
7516          *
7517          * Flush the mailbox to de-assert the IRQ immediately to prevent
7518          * spurious interrupts.  The flush impacts performance but
7519          * excessive spurious interrupts can be worse in some cases.
7520          */
7521         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7522         if (tg3_irq_sync(tp))
7523                 goto out;
7524         sblk->status &= ~SD_STATUS_UPDATED;
7525         if (likely(tg3_has_work(tnapi))) {
7526                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7527                 napi_schedule(&tnapi->napi);
7528         } else {
7529                 /* No work, shared interrupt perhaps?  re-enable
7530                  * interrupts, and flush that PCI write
7531                  */
7532                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7533                                0x00000000);
7534         }
7535 out:
7536         return IRQ_RETVAL(handled);
7537 }
7538
7539 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7540 {
7541         struct tg3_napi *tnapi = dev_id;
7542         struct tg3 *tp = tnapi->tp;
7543         struct tg3_hw_status *sblk = tnapi->hw_status;
7544         unsigned int handled = 1;
7545
7546         /* In INTx mode, it is possible for the interrupt to arrive at
7547          * the CPU before the status block posted prior to the interrupt.
7548          * Reading the PCI State register will confirm whether the
7549          * interrupt is ours and will flush the status block.
7550          */
7551         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7552                 if (tg3_flag(tp, CHIP_RESETTING) ||
7553                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7554                         handled = 0;
7555                         goto out;
7556                 }
7557         }
7558
7559         /*
7560          * writing any value to intr-mbox-0 clears PCI INTA# and
7561          * chip-internal interrupt pending events.
7562          * writing non-zero to intr-mbox-0 additional tells the
7563          * NIC to stop sending us irqs, engaging "in-intr-handler"
7564          * event coalescing.
7565          *
7566          * Flush the mailbox to de-assert the IRQ immediately to prevent
7567          * spurious interrupts.  The flush impacts performance but
7568          * excessive spurious interrupts can be worse in some cases.
7569          */
7570         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7571
7572         /*
7573          * In a shared interrupt configuration, sometimes other devices'
7574          * interrupts will scream.  We record the current status tag here
7575          * so that the above check can report that the screaming interrupts
7576          * are unhandled.  Eventually they will be silenced.
7577          */
7578         tnapi->last_irq_tag = sblk->status_tag;
7579
7580         if (tg3_irq_sync(tp))
7581                 goto out;
7582
7583         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7584
7585         napi_schedule(&tnapi->napi);
7586
7587 out:
7588         return IRQ_RETVAL(handled);
7589 }
7590
7591 /* ISR for interrupt test */
7592 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7593 {
7594         struct tg3_napi *tnapi = dev_id;
7595         struct tg3 *tp = tnapi->tp;
7596         struct tg3_hw_status *sblk = tnapi->hw_status;
7597
7598         if ((sblk->status & SD_STATUS_UPDATED) ||
7599             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7600                 tg3_disable_ints(tp);
7601                 return IRQ_RETVAL(1);
7602         }
7603         return IRQ_RETVAL(0);
7604 }
7605
7606 #ifdef CONFIG_NET_POLL_CONTROLLER
7607 static void tg3_poll_controller(struct net_device *dev)
7608 {
7609         int i;
7610         struct tg3 *tp = netdev_priv(dev);
7611
7612         if (tg3_irq_sync(tp))
7613                 return;
7614
7615         for (i = 0; i < tp->irq_cnt; i++)
7616                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7617 }
7618 #endif
7619
7620 static void tg3_tx_timeout(struct net_device *dev)
7621 {
7622         struct tg3 *tp = netdev_priv(dev);
7623
7624         if (netif_msg_tx_err(tp)) {
7625                 netdev_err(dev, "transmit timed out, resetting\n");
7626                 tg3_dump_state(tp);
7627         }
7628
7629         tg3_reset_task_schedule(tp);
7630 }
7631
7632 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7633 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7634 {
7635         u32 base = (u32) mapping & 0xffffffff;
7636
7637         return (base > 0xffffdcc0) && (base + len + 8 < base);
7638 }
7639
7640 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7641  * of any 4GB boundaries: 4G, 8G, etc
7642  */
7643 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7644                                            u32 len, u32 mss)
7645 {
7646         if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7647                 u32 base = (u32) mapping & 0xffffffff;
7648
7649                 return ((base + len + (mss & 0x3fff)) < base);
7650         }
7651         return 0;
7652 }
7653
7654 /* Test for DMA addresses > 40-bit */
7655 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7656                                           int len)
7657 {
7658 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7659         if (tg3_flag(tp, 40BIT_DMA_BUG))
7660                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7661         return 0;
7662 #else
7663         return 0;
7664 #endif
7665 }
7666
7667 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7668                                  dma_addr_t mapping, u32 len, u32 flags,
7669                                  u32 mss, u32 vlan)
7670 {
7671         txbd->addr_hi = ((u64) mapping >> 32);
7672         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7673         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7674         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7675 }
7676
7677 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7678                             dma_addr_t map, u32 len, u32 flags,
7679                             u32 mss, u32 vlan)
7680 {
7681         struct tg3 *tp = tnapi->tp;
7682         bool hwbug = false;
7683
7684         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7685                 hwbug = true;
7686
7687         if (tg3_4g_overflow_test(map, len))
7688                 hwbug = true;
7689
7690         if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7691                 hwbug = true;
7692
7693         if (tg3_40bit_overflow_test(tp, map, len))
7694                 hwbug = true;
7695
7696         if (tp->dma_limit) {
7697                 u32 prvidx = *entry;
7698                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7699                 while (len > tp->dma_limit && *budget) {
7700                         u32 frag_len = tp->dma_limit;
7701                         len -= tp->dma_limit;
7702
7703                         /* Avoid the 8byte DMA problem */
7704                         if (len <= 8) {
7705                                 len += tp->dma_limit / 2;
7706                                 frag_len = tp->dma_limit / 2;
7707                         }
7708
7709                         tnapi->tx_buffers[*entry].fragmented = true;
7710
7711                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7712                                       frag_len, tmp_flag, mss, vlan);
7713                         *budget -= 1;
7714                         prvidx = *entry;
7715                         *entry = NEXT_TX(*entry);
7716
7717                         map += frag_len;
7718                 }
7719
7720                 if (len) {
7721                         if (*budget) {
7722                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7723                                               len, flags, mss, vlan);
7724                                 *budget -= 1;
7725                                 *entry = NEXT_TX(*entry);
7726                         } else {
7727                                 hwbug = true;
7728                                 tnapi->tx_buffers[prvidx].fragmented = false;
7729                         }
7730                 }
7731         } else {
7732                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7733                               len, flags, mss, vlan);
7734                 *entry = NEXT_TX(*entry);
7735         }
7736
7737         return hwbug;
7738 }
7739
7740 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7741 {
7742         int i;
7743         struct sk_buff *skb;
7744         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7745
7746         skb = txb->skb;
7747         txb->skb = NULL;
7748
7749         pci_unmap_single(tnapi->tp->pdev,
7750                          dma_unmap_addr(txb, mapping),
7751                          skb_headlen(skb),
7752                          PCI_DMA_TODEVICE);
7753
7754         while (txb->fragmented) {
7755                 txb->fragmented = false;
7756                 entry = NEXT_TX(entry);
7757                 txb = &tnapi->tx_buffers[entry];
7758         }
7759
7760         for (i = 0; i <= last; i++) {
7761                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7762
7763                 entry = NEXT_TX(entry);
7764                 txb = &tnapi->tx_buffers[entry];
7765
7766                 pci_unmap_page(tnapi->tp->pdev,
7767                                dma_unmap_addr(txb, mapping),
7768                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7769
7770                 while (txb->fragmented) {
7771                         txb->fragmented = false;
7772                         entry = NEXT_TX(entry);
7773                         txb = &tnapi->tx_buffers[entry];
7774                 }
7775         }
7776 }
7777
7778 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7779 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7780                                        struct sk_buff **pskb,
7781                                        u32 *entry, u32 *budget,
7782                                        u32 base_flags, u32 mss, u32 vlan)
7783 {
7784         struct tg3 *tp = tnapi->tp;
7785         struct sk_buff *new_skb, *skb = *pskb;
7786         dma_addr_t new_addr = 0;
7787         int ret = 0;
7788
7789         if (tg3_asic_rev(tp) != ASIC_REV_5701)
7790                 new_skb = skb_copy(skb, GFP_ATOMIC);
7791         else {
7792                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7793
7794                 new_skb = skb_copy_expand(skb,
7795                                           skb_headroom(skb) + more_headroom,
7796                                           skb_tailroom(skb), GFP_ATOMIC);
7797         }
7798
7799         if (!new_skb) {
7800                 ret = -1;
7801         } else {
7802                 /* New SKB is guaranteed to be linear. */
7803                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7804                                           PCI_DMA_TODEVICE);
7805                 /* Make sure the mapping succeeded */
7806                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7807                         dev_kfree_skb(new_skb);
7808                         ret = -1;
7809                 } else {
7810                         u32 save_entry = *entry;
7811
7812                         base_flags |= TXD_FLAG_END;
7813
7814                         tnapi->tx_buffers[*entry].skb = new_skb;
7815                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7816                                            mapping, new_addr);
7817
7818                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7819                                             new_skb->len, base_flags,
7820                                             mss, vlan)) {
7821                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7822                                 dev_kfree_skb(new_skb);
7823                                 ret = -1;
7824                         }
7825                 }
7826         }
7827
7828         dev_kfree_skb(skb);
7829         *pskb = new_skb;
7830         return ret;
7831 }
7832
7833 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7834
7835 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7836  * TSO header is greater than 80 bytes.
7837  */
7838 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7839 {
7840         struct sk_buff *segs, *nskb;
7841         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7842
7843         /* Estimate the number of fragments in the worst case */
7844         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7845                 netif_stop_queue(tp->dev);
7846
7847                 /* netif_tx_stop_queue() must be done before checking
7848                  * checking tx index in tg3_tx_avail() below, because in
7849                  * tg3_tx(), we update tx index before checking for
7850                  * netif_tx_queue_stopped().
7851                  */
7852                 smp_mb();
7853                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7854                         return NETDEV_TX_BUSY;
7855
7856                 netif_wake_queue(tp->dev);
7857         }
7858
7859         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7860         if (IS_ERR(segs))
7861                 goto tg3_tso_bug_end;
7862
7863         do {
7864                 nskb = segs;
7865                 segs = segs->next;
7866                 nskb->next = NULL;
7867                 tg3_start_xmit(nskb, tp->dev);
7868         } while (segs);
7869
7870 tg3_tso_bug_end:
7871         dev_kfree_skb(skb);
7872
7873         return NETDEV_TX_OK;
7874 }
7875
7876 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7877  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7878  */
7879 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7880 {
7881         struct tg3 *tp = netdev_priv(dev);
7882         u32 len, entry, base_flags, mss, vlan = 0;
7883         u32 budget;
7884         int i = -1, would_hit_hwbug;
7885         dma_addr_t mapping;
7886         struct tg3_napi *tnapi;
7887         struct netdev_queue *txq;
7888         unsigned int last;
7889
7890         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7891         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7892         if (tg3_flag(tp, ENABLE_TSS))
7893                 tnapi++;
7894
7895         budget = tg3_tx_avail(tnapi);
7896
7897         /* We are running in BH disabled context with netif_tx_lock
7898          * and TX reclaim runs via tp->napi.poll inside of a software
7899          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7900          * no IRQ context deadlocks to worry about either.  Rejoice!
7901          */
7902         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7903                 if (!netif_tx_queue_stopped(txq)) {
7904                         netif_tx_stop_queue(txq);
7905
7906                         /* This is a hard error, log it. */
7907                         netdev_err(dev,
7908                                    "BUG! Tx Ring full when queue awake!\n");
7909                 }
7910                 return NETDEV_TX_BUSY;
7911         }
7912
7913         entry = tnapi->tx_prod;
7914         base_flags = 0;
7915         if (skb->ip_summed == CHECKSUM_PARTIAL)
7916                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7917
7918         mss = skb_shinfo(skb)->gso_size;
7919         if (mss) {
7920                 struct iphdr *iph;
7921                 u32 tcp_opt_len, hdr_len;
7922
7923                 if (skb_header_cloned(skb) &&
7924                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7925                         goto drop;
7926
7927                 iph = ip_hdr(skb);
7928                 tcp_opt_len = tcp_optlen(skb);
7929
7930                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7931
7932                 if (!skb_is_gso_v6(skb)) {
7933                         iph->check = 0;
7934                         iph->tot_len = htons(mss + hdr_len);
7935                 }
7936
7937                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7938                     tg3_flag(tp, TSO_BUG))
7939                         return tg3_tso_bug(tp, skb);
7940
7941                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7942                                TXD_FLAG_CPU_POST_DMA);
7943
7944                 if (tg3_flag(tp, HW_TSO_1) ||
7945                     tg3_flag(tp, HW_TSO_2) ||
7946                     tg3_flag(tp, HW_TSO_3)) {
7947                         tcp_hdr(skb)->check = 0;
7948                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7949                 } else
7950                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7951                                                                  iph->daddr, 0,
7952                                                                  IPPROTO_TCP,
7953                                                                  0);
7954
7955                 if (tg3_flag(tp, HW_TSO_3)) {
7956                         mss |= (hdr_len & 0xc) << 12;
7957                         if (hdr_len & 0x10)
7958                                 base_flags |= 0x00000010;
7959                         base_flags |= (hdr_len & 0x3e0) << 5;
7960                 } else if (tg3_flag(tp, HW_TSO_2))
7961                         mss |= hdr_len << 9;
7962                 else if (tg3_flag(tp, HW_TSO_1) ||
7963                          tg3_asic_rev(tp) == ASIC_REV_5705) {
7964                         if (tcp_opt_len || iph->ihl > 5) {
7965                                 int tsflags;
7966
7967                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7968                                 mss |= (tsflags << 11);
7969                         }
7970                 } else {
7971                         if (tcp_opt_len || iph->ihl > 5) {
7972                                 int tsflags;
7973
7974                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7975                                 base_flags |= tsflags << 12;
7976                         }
7977                 }
7978         }
7979
7980         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7981             !mss && skb->len > VLAN_ETH_FRAME_LEN)
7982                 base_flags |= TXD_FLAG_JMB_PKT;
7983
7984         if (vlan_tx_tag_present(skb)) {
7985                 base_flags |= TXD_FLAG_VLAN;
7986                 vlan = vlan_tx_tag_get(skb);
7987         }
7988
7989         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7990             tg3_flag(tp, TX_TSTAMP_EN)) {
7991                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7992                 base_flags |= TXD_FLAG_HWTSTAMP;
7993         }
7994
7995         len = skb_headlen(skb);
7996
7997         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7998         if (pci_dma_mapping_error(tp->pdev, mapping))
7999                 goto drop;
8000
8001
8002         tnapi->tx_buffers[entry].skb = skb;
8003         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8004
8005         would_hit_hwbug = 0;
8006
8007         if (tg3_flag(tp, 5701_DMA_BUG))
8008                 would_hit_hwbug = 1;
8009
8010         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8011                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8012                             mss, vlan)) {
8013                 would_hit_hwbug = 1;
8014         } else if (skb_shinfo(skb)->nr_frags > 0) {
8015                 u32 tmp_mss = mss;
8016
8017                 if (!tg3_flag(tp, HW_TSO_1) &&
8018                     !tg3_flag(tp, HW_TSO_2) &&
8019                     !tg3_flag(tp, HW_TSO_3))
8020                         tmp_mss = 0;
8021
8022                 /* Now loop through additional data
8023                  * fragments, and queue them.
8024                  */
8025                 last = skb_shinfo(skb)->nr_frags - 1;
8026                 for (i = 0; i <= last; i++) {
8027                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8028
8029                         len = skb_frag_size(frag);
8030                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8031                                                    len, DMA_TO_DEVICE);
8032
8033                         tnapi->tx_buffers[entry].skb = NULL;
8034                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8035                                            mapping);
8036                         if (dma_mapping_error(&tp->pdev->dev, mapping))
8037                                 goto dma_error;
8038
8039                         if (!budget ||
8040                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8041                                             len, base_flags |
8042                                             ((i == last) ? TXD_FLAG_END : 0),
8043                                             tmp_mss, vlan)) {
8044                                 would_hit_hwbug = 1;
8045                                 break;
8046                         }
8047                 }
8048         }
8049
8050         if (would_hit_hwbug) {
8051                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8052
8053                 /* If the workaround fails due to memory/mapping
8054                  * failure, silently drop this packet.
8055                  */
8056                 entry = tnapi->tx_prod;
8057                 budget = tg3_tx_avail(tnapi);
8058                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8059                                                 base_flags, mss, vlan))
8060                         goto drop_nofree;
8061         }
8062
8063         skb_tx_timestamp(skb);
8064         netdev_tx_sent_queue(txq, skb->len);
8065
8066         /* Sync BD data before updating mailbox */
8067         wmb();
8068
8069         /* Packets are ready, update Tx producer idx local and on card. */
8070         tw32_tx_mbox(tnapi->prodmbox, entry);
8071
8072         tnapi->tx_prod = entry;
8073         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8074                 netif_tx_stop_queue(txq);
8075
8076                 /* netif_tx_stop_queue() must be done before checking
8077                  * checking tx index in tg3_tx_avail() below, because in
8078                  * tg3_tx(), we update tx index before checking for
8079                  * netif_tx_queue_stopped().
8080                  */
8081                 smp_mb();
8082                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8083                         netif_tx_wake_queue(txq);
8084         }
8085
8086         mmiowb();
8087         return NETDEV_TX_OK;
8088
8089 dma_error:
8090         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8091         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8092 drop:
8093         dev_kfree_skb(skb);
8094 drop_nofree:
8095         tp->tx_dropped++;
8096         return NETDEV_TX_OK;
8097 }
8098
8099 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8100 {
8101         if (enable) {
8102                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8103                                   MAC_MODE_PORT_MODE_MASK);
8104
8105                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8106
8107                 if (!tg3_flag(tp, 5705_PLUS))
8108                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8109
8110                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8111                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8112                 else
8113                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8114         } else {
8115                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8116
8117                 if (tg3_flag(tp, 5705_PLUS) ||
8118                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8119                     tg3_asic_rev(tp) == ASIC_REV_5700)
8120                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8121         }
8122
8123         tw32(MAC_MODE, tp->mac_mode);
8124         udelay(40);
8125 }
8126
8127 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8128 {
8129         u32 val, bmcr, mac_mode, ptest = 0;
8130
8131         tg3_phy_toggle_apd(tp, false);
8132         tg3_phy_toggle_automdix(tp, false);
8133
8134         if (extlpbk && tg3_phy_set_extloopbk(tp))
8135                 return -EIO;
8136
8137         bmcr = BMCR_FULLDPLX;
8138         switch (speed) {
8139         case SPEED_10:
8140                 break;
8141         case SPEED_100:
8142                 bmcr |= BMCR_SPEED100;
8143                 break;
8144         case SPEED_1000:
8145         default:
8146                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8147                         speed = SPEED_100;
8148                         bmcr |= BMCR_SPEED100;
8149                 } else {
8150                         speed = SPEED_1000;
8151                         bmcr |= BMCR_SPEED1000;
8152                 }
8153         }
8154
8155         if (extlpbk) {
8156                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8157                         tg3_readphy(tp, MII_CTRL1000, &val);
8158                         val |= CTL1000_AS_MASTER |
8159                                CTL1000_ENABLE_MASTER;
8160                         tg3_writephy(tp, MII_CTRL1000, val);
8161                 } else {
8162                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8163                                 MII_TG3_FET_PTEST_TRIM_2;
8164                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8165                 }
8166         } else
8167                 bmcr |= BMCR_LOOPBACK;
8168
8169         tg3_writephy(tp, MII_BMCR, bmcr);
8170
8171         /* The write needs to be flushed for the FETs */
8172         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8173                 tg3_readphy(tp, MII_BMCR, &bmcr);
8174
8175         udelay(40);
8176
8177         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8178             tg3_asic_rev(tp) == ASIC_REV_5785) {
8179                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8180                              MII_TG3_FET_PTEST_FRC_TX_LINK |
8181                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
8182
8183                 /* The write needs to be flushed for the AC131 */
8184                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8185         }
8186
8187         /* Reset to prevent losing 1st rx packet intermittently */
8188         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8189             tg3_flag(tp, 5780_CLASS)) {
8190                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8191                 udelay(10);
8192                 tw32_f(MAC_RX_MODE, tp->rx_mode);
8193         }
8194
8195         mac_mode = tp->mac_mode &
8196                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8197         if (speed == SPEED_1000)
8198                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8199         else
8200                 mac_mode |= MAC_MODE_PORT_MODE_MII;
8201
8202         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8203                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8204
8205                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8206                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8207                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8208                         mac_mode |= MAC_MODE_LINK_POLARITY;
8209
8210                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8211                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8212         }
8213
8214         tw32(MAC_MODE, mac_mode);
8215         udelay(40);
8216
8217         return 0;
8218 }
8219
8220 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8221 {
8222         struct tg3 *tp = netdev_priv(dev);
8223
8224         if (features & NETIF_F_LOOPBACK) {
8225                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8226                         return;
8227
8228                 spin_lock_bh(&tp->lock);
8229                 tg3_mac_loopback(tp, true);
8230                 netif_carrier_on(tp->dev);
8231                 spin_unlock_bh(&tp->lock);
8232                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8233         } else {
8234                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8235                         return;
8236
8237                 spin_lock_bh(&tp->lock);
8238                 tg3_mac_loopback(tp, false);
8239                 /* Force link status check */
8240                 tg3_setup_phy(tp, true);
8241                 spin_unlock_bh(&tp->lock);
8242                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8243         }
8244 }
8245
8246 static netdev_features_t tg3_fix_features(struct net_device *dev,
8247         netdev_features_t features)
8248 {
8249         struct tg3 *tp = netdev_priv(dev);
8250
8251         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8252                 features &= ~NETIF_F_ALL_TSO;
8253
8254         return features;
8255 }
8256
8257 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8258 {
8259         netdev_features_t changed = dev->features ^ features;
8260
8261         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8262                 tg3_set_loopback(dev, features);
8263
8264         return 0;
8265 }
8266
8267 static void tg3_rx_prodring_free(struct tg3 *tp,
8268                                  struct tg3_rx_prodring_set *tpr)
8269 {
8270         int i;
8271
8272         if (tpr != &tp->napi[0].prodring) {
8273                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8274                      i = (i + 1) & tp->rx_std_ring_mask)
8275                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8276                                         tp->rx_pkt_map_sz);
8277
8278                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8279                         for (i = tpr->rx_jmb_cons_idx;
8280                              i != tpr->rx_jmb_prod_idx;
8281                              i = (i + 1) & tp->rx_jmb_ring_mask) {
8282                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8283                                                 TG3_RX_JMB_MAP_SZ);
8284                         }
8285                 }
8286
8287                 return;
8288         }
8289
8290         for (i = 0; i <= tp->rx_std_ring_mask; i++)
8291                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8292                                 tp->rx_pkt_map_sz);
8293
8294         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8295                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8296                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8297                                         TG3_RX_JMB_MAP_SZ);
8298         }
8299 }
8300
8301 /* Initialize rx rings for packet processing.
8302  *
8303  * The chip has been shut down and the driver detached from
8304  * the networking, so no interrupts or new tx packets will
8305  * end up in the driver.  tp->{tx,}lock are held and thus
8306  * we may not sleep.
8307  */
8308 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8309                                  struct tg3_rx_prodring_set *tpr)
8310 {
8311         u32 i, rx_pkt_dma_sz;
8312
8313         tpr->rx_std_cons_idx = 0;
8314         tpr->rx_std_prod_idx = 0;
8315         tpr->rx_jmb_cons_idx = 0;
8316         tpr->rx_jmb_prod_idx = 0;
8317
8318         if (tpr != &tp->napi[0].prodring) {
8319                 memset(&tpr->rx_std_buffers[0], 0,
8320                        TG3_RX_STD_BUFF_RING_SIZE(tp));
8321                 if (tpr->rx_jmb_buffers)
8322                         memset(&tpr->rx_jmb_buffers[0], 0,
8323                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
8324                 goto done;
8325         }
8326
8327         /* Zero out all descriptors. */
8328         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8329
8330         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8331         if (tg3_flag(tp, 5780_CLASS) &&
8332             tp->dev->mtu > ETH_DATA_LEN)
8333                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8334         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8335
8336         /* Initialize invariants of the rings, we only set this
8337          * stuff once.  This works because the card does not
8338          * write into the rx buffer posting rings.
8339          */
8340         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8341                 struct tg3_rx_buffer_desc *rxd;
8342
8343                 rxd = &tpr->rx_std[i];
8344                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8345                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8346                 rxd->opaque = (RXD_OPAQUE_RING_STD |
8347                                (i << RXD_OPAQUE_INDEX_SHIFT));
8348         }
8349
8350         /* Now allocate fresh SKBs for each rx ring. */
8351         for (i = 0; i < tp->rx_pending; i++) {
8352                 unsigned int frag_size;
8353
8354                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8355                                       &frag_size) < 0) {
8356                         netdev_warn(tp->dev,
8357                                     "Using a smaller RX standard ring. Only "
8358                                     "%d out of %d buffers were allocated "
8359                                     "successfully\n", i, tp->rx_pending);
8360                         if (i == 0)
8361                                 goto initfail;
8362                         tp->rx_pending = i;
8363                         break;
8364                 }
8365         }
8366
8367         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8368                 goto done;
8369
8370         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8371
8372         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8373                 goto done;
8374
8375         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8376                 struct tg3_rx_buffer_desc *rxd;
8377
8378                 rxd = &tpr->rx_jmb[i].std;
8379                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8380                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8381                                   RXD_FLAG_JUMBO;
8382                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8383                        (i << RXD_OPAQUE_INDEX_SHIFT));
8384         }
8385
8386         for (i = 0; i < tp->rx_jumbo_pending; i++) {
8387                 unsigned int frag_size;
8388
8389                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8390                                       &frag_size) < 0) {
8391                         netdev_warn(tp->dev,
8392                                     "Using a smaller RX jumbo ring. Only %d "
8393                                     "out of %d buffers were allocated "
8394                                     "successfully\n", i, tp->rx_jumbo_pending);
8395                         if (i == 0)
8396                                 goto initfail;
8397                         tp->rx_jumbo_pending = i;
8398                         break;
8399                 }
8400         }
8401
8402 done:
8403         return 0;
8404
8405 initfail:
8406         tg3_rx_prodring_free(tp, tpr);
8407         return -ENOMEM;
8408 }
8409
8410 static void tg3_rx_prodring_fini(struct tg3 *tp,
8411                                  struct tg3_rx_prodring_set *tpr)
8412 {
8413         kfree(tpr->rx_std_buffers);
8414         tpr->rx_std_buffers = NULL;
8415         kfree(tpr->rx_jmb_buffers);
8416         tpr->rx_jmb_buffers = NULL;
8417         if (tpr->rx_std) {
8418                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8419                                   tpr->rx_std, tpr->rx_std_mapping);
8420                 tpr->rx_std = NULL;
8421         }
8422         if (tpr->rx_jmb) {
8423                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8424                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
8425                 tpr->rx_jmb = NULL;
8426         }
8427 }
8428
8429 static int tg3_rx_prodring_init(struct tg3 *tp,
8430                                 struct tg3_rx_prodring_set *tpr)
8431 {
8432         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8433                                       GFP_KERNEL);
8434         if (!tpr->rx_std_buffers)
8435                 return -ENOMEM;
8436
8437         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8438                                          TG3_RX_STD_RING_BYTES(tp),
8439                                          &tpr->rx_std_mapping,
8440                                          GFP_KERNEL);
8441         if (!tpr->rx_std)
8442                 goto err_out;
8443
8444         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8445                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8446                                               GFP_KERNEL);
8447                 if (!tpr->rx_jmb_buffers)
8448                         goto err_out;
8449
8450                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8451                                                  TG3_RX_JMB_RING_BYTES(tp),
8452                                                  &tpr->rx_jmb_mapping,
8453                                                  GFP_KERNEL);
8454                 if (!tpr->rx_jmb)
8455                         goto err_out;
8456         }
8457
8458         return 0;
8459
8460 err_out:
8461         tg3_rx_prodring_fini(tp, tpr);
8462         return -ENOMEM;
8463 }
8464
8465 /* Free up pending packets in all rx/tx rings.
8466  *
8467  * The chip has been shut down and the driver detached from
8468  * the networking, so no interrupts or new tx packets will
8469  * end up in the driver.  tp->{tx,}lock is not held and we are not
8470  * in an interrupt context and thus may sleep.
8471  */
8472 static void tg3_free_rings(struct tg3 *tp)
8473 {
8474         int i, j;
8475
8476         for (j = 0; j < tp->irq_cnt; j++) {
8477                 struct tg3_napi *tnapi = &tp->napi[j];
8478
8479                 tg3_rx_prodring_free(tp, &tnapi->prodring);
8480
8481                 if (!tnapi->tx_buffers)
8482                         continue;
8483
8484                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8485                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8486
8487                         if (!skb)
8488                                 continue;
8489
8490                         tg3_tx_skb_unmap(tnapi, i,
8491                                          skb_shinfo(skb)->nr_frags - 1);
8492
8493                         dev_kfree_skb_any(skb);
8494                 }
8495                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8496         }
8497 }
8498
8499 /* Initialize tx/rx rings for packet processing.
8500  *
8501  * The chip has been shut down and the driver detached from
8502  * the networking, so no interrupts or new tx packets will
8503  * end up in the driver.  tp->{tx,}lock are held and thus
8504  * we may not sleep.
8505  */
8506 static int tg3_init_rings(struct tg3 *tp)
8507 {
8508         int i;
8509
8510         /* Free up all the SKBs. */
8511         tg3_free_rings(tp);
8512
8513         for (i = 0; i < tp->irq_cnt; i++) {
8514                 struct tg3_napi *tnapi = &tp->napi[i];
8515
8516                 tnapi->last_tag = 0;
8517                 tnapi->last_irq_tag = 0;
8518                 tnapi->hw_status->status = 0;
8519                 tnapi->hw_status->status_tag = 0;
8520                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8521
8522                 tnapi->tx_prod = 0;
8523                 tnapi->tx_cons = 0;
8524                 if (tnapi->tx_ring)
8525                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8526
8527                 tnapi->rx_rcb_ptr = 0;
8528                 if (tnapi->rx_rcb)
8529                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8530
8531                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8532                         tg3_free_rings(tp);
8533                         return -ENOMEM;
8534                 }
8535         }
8536
8537         return 0;
8538 }
8539
8540 static void tg3_mem_tx_release(struct tg3 *tp)
8541 {
8542         int i;
8543
8544         for (i = 0; i < tp->irq_max; i++) {
8545                 struct tg3_napi *tnapi = &tp->napi[i];
8546
8547                 if (tnapi->tx_ring) {
8548                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8549                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
8550                         tnapi->tx_ring = NULL;
8551                 }
8552
8553                 kfree(tnapi->tx_buffers);
8554                 tnapi->tx_buffers = NULL;
8555         }
8556 }
8557
8558 static int tg3_mem_tx_acquire(struct tg3 *tp)
8559 {
8560         int i;
8561         struct tg3_napi *tnapi = &tp->napi[0];
8562
8563         /* If multivector TSS is enabled, vector 0 does not handle
8564          * tx interrupts.  Don't allocate any resources for it.
8565          */
8566         if (tg3_flag(tp, ENABLE_TSS))
8567                 tnapi++;
8568
8569         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8570                 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8571                                             TG3_TX_RING_SIZE, GFP_KERNEL);
8572                 if (!tnapi->tx_buffers)
8573                         goto err_out;
8574
8575                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8576                                                     TG3_TX_RING_BYTES,
8577                                                     &tnapi->tx_desc_mapping,
8578                                                     GFP_KERNEL);
8579                 if (!tnapi->tx_ring)
8580                         goto err_out;
8581         }
8582
8583         return 0;
8584
8585 err_out:
8586         tg3_mem_tx_release(tp);
8587         return -ENOMEM;
8588 }
8589
8590 static void tg3_mem_rx_release(struct tg3 *tp)
8591 {
8592         int i;
8593
8594         for (i = 0; i < tp->irq_max; i++) {
8595                 struct tg3_napi *tnapi = &tp->napi[i];
8596
8597                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8598
8599                 if (!tnapi->rx_rcb)
8600                         continue;
8601
8602                 dma_free_coherent(&tp->pdev->dev,
8603                                   TG3_RX_RCB_RING_BYTES(tp),
8604                                   tnapi->rx_rcb,
8605                                   tnapi->rx_rcb_mapping);
8606                 tnapi->rx_rcb = NULL;
8607         }
8608 }
8609
8610 static int tg3_mem_rx_acquire(struct tg3 *tp)
8611 {
8612         unsigned int i, limit;
8613
8614         limit = tp->rxq_cnt;
8615
8616         /* If RSS is enabled, we need a (dummy) producer ring
8617          * set on vector zero.  This is the true hw prodring.
8618          */
8619         if (tg3_flag(tp, ENABLE_RSS))
8620                 limit++;
8621
8622         for (i = 0; i < limit; i++) {
8623                 struct tg3_napi *tnapi = &tp->napi[i];
8624
8625                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8626                         goto err_out;
8627
8628                 /* If multivector RSS is enabled, vector 0
8629                  * does not handle rx or tx interrupts.
8630                  * Don't allocate any resources for it.
8631                  */
8632                 if (!i && tg3_flag(tp, ENABLE_RSS))
8633                         continue;
8634
8635                 tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8636                                                     TG3_RX_RCB_RING_BYTES(tp),
8637                                                     &tnapi->rx_rcb_mapping,
8638                                                     GFP_KERNEL);
8639                 if (!tnapi->rx_rcb)
8640                         goto err_out;
8641         }
8642
8643         return 0;
8644
8645 err_out:
8646         tg3_mem_rx_release(tp);
8647         return -ENOMEM;
8648 }
8649
8650 /*
8651  * Must not be invoked with interrupt sources disabled and
8652  * the hardware shutdown down.
8653  */
8654 static void tg3_free_consistent(struct tg3 *tp)
8655 {
8656         int i;
8657
8658         for (i = 0; i < tp->irq_cnt; i++) {
8659                 struct tg3_napi *tnapi = &tp->napi[i];
8660
8661                 if (tnapi->hw_status) {
8662                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8663                                           tnapi->hw_status,
8664                                           tnapi->status_mapping);
8665                         tnapi->hw_status = NULL;
8666                 }
8667         }
8668
8669         tg3_mem_rx_release(tp);
8670         tg3_mem_tx_release(tp);
8671
8672         if (tp->hw_stats) {
8673                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8674                                   tp->hw_stats, tp->stats_mapping);
8675                 tp->hw_stats = NULL;
8676         }
8677 }
8678
8679 /*
8680  * Must not be invoked with interrupt sources disabled and
8681  * the hardware shutdown down.  Can sleep.
8682  */
8683 static int tg3_alloc_consistent(struct tg3 *tp)
8684 {
8685         int i;
8686
8687         tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8688                                            sizeof(struct tg3_hw_stats),
8689                                            &tp->stats_mapping, GFP_KERNEL);
8690         if (!tp->hw_stats)
8691                 goto err_out;
8692
8693         for (i = 0; i < tp->irq_cnt; i++) {
8694                 struct tg3_napi *tnapi = &tp->napi[i];
8695                 struct tg3_hw_status *sblk;
8696
8697                 tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8698                                                        TG3_HW_STATUS_SIZE,
8699                                                        &tnapi->status_mapping,
8700                                                        GFP_KERNEL);
8701                 if (!tnapi->hw_status)
8702                         goto err_out;
8703
8704                 sblk = tnapi->hw_status;
8705
8706                 if (tg3_flag(tp, ENABLE_RSS)) {
8707                         u16 *prodptr = NULL;
8708
8709                         /*
8710                          * When RSS is enabled, the status block format changes
8711                          * slightly.  The "rx_jumbo_consumer", "reserved",
8712                          * and "rx_mini_consumer" members get mapped to the
8713                          * other three rx return ring producer indexes.
8714                          */
8715                         switch (i) {
8716                         case 1:
8717                                 prodptr = &sblk->idx[0].rx_producer;
8718                                 break;
8719                         case 2:
8720                                 prodptr = &sblk->rx_jumbo_consumer;
8721                                 break;
8722                         case 3:
8723                                 prodptr = &sblk->reserved;
8724                                 break;
8725                         case 4:
8726                                 prodptr = &sblk->rx_mini_consumer;
8727                                 break;
8728                         }
8729                         tnapi->rx_rcb_prod_idx = prodptr;
8730                 } else {
8731                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8732                 }
8733         }
8734
8735         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8736                 goto err_out;
8737
8738         return 0;
8739
8740 err_out:
8741         tg3_free_consistent(tp);
8742         return -ENOMEM;
8743 }
8744
8745 #define MAX_WAIT_CNT 1000
8746
8747 /* To stop a block, clear the enable bit and poll till it
8748  * clears.  tp->lock is held.
8749  */
8750 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8751 {
8752         unsigned int i;
8753         u32 val;
8754
8755         if (tg3_flag(tp, 5705_PLUS)) {
8756                 switch (ofs) {
8757                 case RCVLSC_MODE:
8758                 case DMAC_MODE:
8759                 case MBFREE_MODE:
8760                 case BUFMGR_MODE:
8761                 case MEMARB_MODE:
8762                         /* We can't enable/disable these bits of the
8763                          * 5705/5750, just say success.
8764                          */
8765                         return 0;
8766
8767                 default:
8768                         break;
8769                 }
8770         }
8771
8772         val = tr32(ofs);
8773         val &= ~enable_bit;
8774         tw32_f(ofs, val);
8775
8776         for (i = 0; i < MAX_WAIT_CNT; i++) {
8777                 if (pci_channel_offline(tp->pdev)) {
8778                         dev_err(&tp->pdev->dev,
8779                                 "tg3_stop_block device offline, "
8780                                 "ofs=%lx enable_bit=%x\n",
8781                                 ofs, enable_bit);
8782                         return -ENODEV;
8783                 }
8784
8785                 udelay(100);
8786                 val = tr32(ofs);
8787                 if ((val & enable_bit) == 0)
8788                         break;
8789         }
8790
8791         if (i == MAX_WAIT_CNT && !silent) {
8792                 dev_err(&tp->pdev->dev,
8793                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8794                         ofs, enable_bit);
8795                 return -ENODEV;
8796         }
8797
8798         return 0;
8799 }
8800
8801 /* tp->lock is held. */
8802 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8803 {
8804         int i, err;
8805
8806         tg3_disable_ints(tp);
8807
8808         if (pci_channel_offline(tp->pdev)) {
8809                 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8810                 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8811                 err = -ENODEV;
8812                 goto err_no_dev;
8813         }
8814
8815         tp->rx_mode &= ~RX_MODE_ENABLE;
8816         tw32_f(MAC_RX_MODE, tp->rx_mode);
8817         udelay(10);
8818
8819         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8820         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8821         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8822         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8823         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8824         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8825
8826         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8827         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8828         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8829         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8830         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8831         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8832         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8833
8834         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8835         tw32_f(MAC_MODE, tp->mac_mode);
8836         udelay(40);
8837
8838         tp->tx_mode &= ~TX_MODE_ENABLE;
8839         tw32_f(MAC_TX_MODE, tp->tx_mode);
8840
8841         for (i = 0; i < MAX_WAIT_CNT; i++) {
8842                 udelay(100);
8843                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8844                         break;
8845         }
8846         if (i >= MAX_WAIT_CNT) {
8847                 dev_err(&tp->pdev->dev,
8848                         "%s timed out, TX_MODE_ENABLE will not clear "
8849                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8850                 err |= -ENODEV;
8851         }
8852
8853         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8854         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8855         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8856
8857         tw32(FTQ_RESET, 0xffffffff);
8858         tw32(FTQ_RESET, 0x00000000);
8859
8860         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8861         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8862
8863 err_no_dev:
8864         for (i = 0; i < tp->irq_cnt; i++) {
8865                 struct tg3_napi *tnapi = &tp->napi[i];
8866                 if (tnapi->hw_status)
8867                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8868         }
8869
8870         return err;
8871 }
8872
8873 /* Save PCI command register before chip reset */
8874 static void tg3_save_pci_state(struct tg3 *tp)
8875 {
8876         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8877 }
8878
8879 /* Restore PCI state after chip reset */
8880 static void tg3_restore_pci_state(struct tg3 *tp)
8881 {
8882         u32 val;
8883
8884         /* Re-enable indirect register accesses. */
8885         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8886                                tp->misc_host_ctrl);
8887
8888         /* Set MAX PCI retry to zero. */
8889         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8890         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8891             tg3_flag(tp, PCIX_MODE))
8892                 val |= PCISTATE_RETRY_SAME_DMA;
8893         /* Allow reads and writes to the APE register and memory space. */
8894         if (tg3_flag(tp, ENABLE_APE))
8895                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8896                        PCISTATE_ALLOW_APE_SHMEM_WR |
8897                        PCISTATE_ALLOW_APE_PSPACE_WR;
8898         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8899
8900         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8901
8902         if (!tg3_flag(tp, PCI_EXPRESS)) {
8903                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8904                                       tp->pci_cacheline_sz);
8905                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8906                                       tp->pci_lat_timer);
8907         }
8908
8909         /* Make sure PCI-X relaxed ordering bit is clear. */
8910         if (tg3_flag(tp, PCIX_MODE)) {
8911                 u16 pcix_cmd;
8912
8913                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8914                                      &pcix_cmd);
8915                 pcix_cmd &= ~PCI_X_CMD_ERO;
8916                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8917                                       pcix_cmd);
8918         }
8919
8920         if (tg3_flag(tp, 5780_CLASS)) {
8921
8922                 /* Chip reset on 5780 will reset MSI enable bit,
8923                  * so need to restore it.
8924                  */
8925                 if (tg3_flag(tp, USING_MSI)) {
8926                         u16 ctrl;
8927
8928                         pci_read_config_word(tp->pdev,
8929                                              tp->msi_cap + PCI_MSI_FLAGS,
8930                                              &ctrl);
8931                         pci_write_config_word(tp->pdev,
8932                                               tp->msi_cap + PCI_MSI_FLAGS,
8933                                               ctrl | PCI_MSI_FLAGS_ENABLE);
8934                         val = tr32(MSGINT_MODE);
8935                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8936                 }
8937         }
8938 }
8939
8940 /* tp->lock is held. */
8941 static int tg3_chip_reset(struct tg3 *tp)
8942 {
8943         u32 val;
8944         void (*write_op)(struct tg3 *, u32, u32);
8945         int i, err;
8946
8947         if (!pci_device_is_present(tp->pdev))
8948                 return -ENODEV;
8949
8950         tg3_nvram_lock(tp);
8951
8952         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8953
8954         /* No matching tg3_nvram_unlock() after this because
8955          * chip reset below will undo the nvram lock.
8956          */
8957         tp->nvram_lock_cnt = 0;
8958
8959         /* GRC_MISC_CFG core clock reset will clear the memory
8960          * enable bit in PCI register 4 and the MSI enable bit
8961          * on some chips, so we save relevant registers here.
8962          */
8963         tg3_save_pci_state(tp);
8964
8965         if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8966             tg3_flag(tp, 5755_PLUS))
8967                 tw32(GRC_FASTBOOT_PC, 0);
8968
8969         /*
8970          * We must avoid the readl() that normally takes place.
8971          * It locks machines, causes machine checks, and other
8972          * fun things.  So, temporarily disable the 5701
8973          * hardware workaround, while we do the reset.
8974          */
8975         write_op = tp->write32;
8976         if (write_op == tg3_write_flush_reg32)
8977                 tp->write32 = tg3_write32;
8978
8979         /* Prevent the irq handler from reading or writing PCI registers
8980          * during chip reset when the memory enable bit in the PCI command
8981          * register may be cleared.  The chip does not generate interrupt
8982          * at this time, but the irq handler may still be called due to irq
8983          * sharing or irqpoll.
8984          */
8985         tg3_flag_set(tp, CHIP_RESETTING);
8986         for (i = 0; i < tp->irq_cnt; i++) {
8987                 struct tg3_napi *tnapi = &tp->napi[i];
8988                 if (tnapi->hw_status) {
8989                         tnapi->hw_status->status = 0;
8990                         tnapi->hw_status->status_tag = 0;
8991                 }
8992                 tnapi->last_tag = 0;
8993                 tnapi->last_irq_tag = 0;
8994         }
8995         smp_mb();
8996
8997         for (i = 0; i < tp->irq_cnt; i++)
8998                 synchronize_irq(tp->napi[i].irq_vec);
8999
9000         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9001                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9002                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9003         }
9004
9005         /* do the reset */
9006         val = GRC_MISC_CFG_CORECLK_RESET;
9007
9008         if (tg3_flag(tp, PCI_EXPRESS)) {
9009                 /* Force PCIe 1.0a mode */
9010                 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9011                     !tg3_flag(tp, 57765_PLUS) &&
9012                     tr32(TG3_PCIE_PHY_TSTCTL) ==
9013                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9014                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9015
9016                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9017                         tw32(GRC_MISC_CFG, (1 << 29));
9018                         val |= (1 << 29);
9019                 }
9020         }
9021
9022         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9023                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9024                 tw32(GRC_VCPU_EXT_CTRL,
9025                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9026         }
9027
9028         /* Manage gphy power for all CPMU absent PCIe devices. */
9029         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9030                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9031
9032         tw32(GRC_MISC_CFG, val);
9033
9034         /* restore 5701 hardware bug workaround write method */
9035         tp->write32 = write_op;
9036
9037         /* Unfortunately, we have to delay before the PCI read back.
9038          * Some 575X chips even will not respond to a PCI cfg access
9039          * when the reset command is given to the chip.
9040          *
9041          * How do these hardware designers expect things to work
9042          * properly if the PCI write is posted for a long period
9043          * of time?  It is always necessary to have some method by
9044          * which a register read back can occur to push the write
9045          * out which does the reset.
9046          *
9047          * For most tg3 variants the trick below was working.
9048          * Ho hum...
9049          */
9050         udelay(120);
9051
9052         /* Flush PCI posted writes.  The normal MMIO registers
9053          * are inaccessible at this time so this is the only
9054          * way to make this reliably (actually, this is no longer
9055          * the case, see above).  I tried to use indirect
9056          * register read/write but this upset some 5701 variants.
9057          */
9058         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9059
9060         udelay(120);
9061
9062         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9063                 u16 val16;
9064
9065                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9066                         int j;
9067                         u32 cfg_val;
9068
9069                         /* Wait for link training to complete.  */
9070                         for (j = 0; j < 5000; j++)
9071                                 udelay(100);
9072
9073                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9074                         pci_write_config_dword(tp->pdev, 0xc4,
9075                                                cfg_val | (1 << 15));
9076                 }
9077
9078                 /* Clear the "no snoop" and "relaxed ordering" bits. */
9079                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9080                 /*
9081                  * Older PCIe devices only support the 128 byte
9082                  * MPS setting.  Enforce the restriction.
9083                  */
9084                 if (!tg3_flag(tp, CPMU_PRESENT))
9085                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9086                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9087
9088                 /* Clear error status */
9089                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9090                                       PCI_EXP_DEVSTA_CED |
9091                                       PCI_EXP_DEVSTA_NFED |
9092                                       PCI_EXP_DEVSTA_FED |
9093                                       PCI_EXP_DEVSTA_URD);
9094         }
9095
9096         tg3_restore_pci_state(tp);
9097
9098         tg3_flag_clear(tp, CHIP_RESETTING);
9099         tg3_flag_clear(tp, ERROR_PROCESSED);
9100
9101         val = 0;
9102         if (tg3_flag(tp, 5780_CLASS))
9103                 val = tr32(MEMARB_MODE);
9104         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9105
9106         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9107                 tg3_stop_fw(tp);
9108                 tw32(0x5000, 0x400);
9109         }
9110
9111         if (tg3_flag(tp, IS_SSB_CORE)) {
9112                 /*
9113                  * BCM4785: In order to avoid repercussions from using
9114                  * potentially defective internal ROM, stop the Rx RISC CPU,
9115                  * which is not required.
9116                  */
9117                 tg3_stop_fw(tp);
9118                 tg3_halt_cpu(tp, RX_CPU_BASE);
9119         }
9120
9121         err = tg3_poll_fw(tp);
9122         if (err)
9123                 return err;
9124
9125         tw32(GRC_MODE, tp->grc_mode);
9126
9127         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9128                 val = tr32(0xc4);
9129
9130                 tw32(0xc4, val | (1 << 15));
9131         }
9132
9133         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9134             tg3_asic_rev(tp) == ASIC_REV_5705) {
9135                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9136                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9137                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9138                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9139         }
9140
9141         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9142                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9143                 val = tp->mac_mode;
9144         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9145                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9146                 val = tp->mac_mode;
9147         } else
9148                 val = 0;
9149
9150         tw32_f(MAC_MODE, val);
9151         udelay(40);
9152
9153         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9154
9155         tg3_mdio_start(tp);
9156
9157         if (tg3_flag(tp, PCI_EXPRESS) &&
9158             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9159             tg3_asic_rev(tp) != ASIC_REV_5785 &&
9160             !tg3_flag(tp, 57765_PLUS)) {
9161                 val = tr32(0x7c00);
9162
9163                 tw32(0x7c00, val | (1 << 25));
9164         }
9165
9166         if (tg3_asic_rev(tp) == ASIC_REV_5720) {
9167                 val = tr32(TG3_CPMU_CLCK_ORIDE);
9168                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9169         }
9170
9171         /* Reprobe ASF enable state.  */
9172         tg3_flag_clear(tp, ENABLE_ASF);
9173         tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9174                            TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9175
9176         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9177         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9178         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9179                 u32 nic_cfg;
9180
9181                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9182                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9183                         tg3_flag_set(tp, ENABLE_ASF);
9184                         tp->last_event_jiffies = jiffies;
9185                         if (tg3_flag(tp, 5750_PLUS))
9186                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9187
9188                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9189                         if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9190                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9191                         if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9192                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9193                 }
9194         }
9195
9196         return 0;
9197 }
9198
9199 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9200 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9201
9202 /* tp->lock is held. */
9203 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9204 {
9205         int err;
9206
9207         tg3_stop_fw(tp);
9208
9209         tg3_write_sig_pre_reset(tp, kind);
9210
9211         tg3_abort_hw(tp, silent);
9212         err = tg3_chip_reset(tp);
9213
9214         __tg3_set_mac_addr(tp, false);
9215
9216         tg3_write_sig_legacy(tp, kind);
9217         tg3_write_sig_post_reset(tp, kind);
9218
9219         if (tp->hw_stats) {
9220                 /* Save the stats across chip resets... */
9221                 tg3_get_nstats(tp, &tp->net_stats_prev);
9222                 tg3_get_estats(tp, &tp->estats_prev);
9223
9224                 /* And make sure the next sample is new data */
9225                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9226         }
9227
9228         return err;
9229 }
9230
9231 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9232 {
9233         struct tg3 *tp = netdev_priv(dev);
9234         struct sockaddr *addr = p;
9235         int err = 0;
9236         bool skip_mac_1 = false;
9237
9238         if (!is_valid_ether_addr(addr->sa_data))
9239                 return -EADDRNOTAVAIL;
9240
9241         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9242
9243         if (!netif_running(dev))
9244                 return 0;
9245
9246         if (tg3_flag(tp, ENABLE_ASF)) {
9247                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9248
9249                 addr0_high = tr32(MAC_ADDR_0_HIGH);
9250                 addr0_low = tr32(MAC_ADDR_0_LOW);
9251                 addr1_high = tr32(MAC_ADDR_1_HIGH);
9252                 addr1_low = tr32(MAC_ADDR_1_LOW);
9253
9254                 /* Skip MAC addr 1 if ASF is using it. */
9255                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9256                     !(addr1_high == 0 && addr1_low == 0))
9257                         skip_mac_1 = true;
9258         }
9259         spin_lock_bh(&tp->lock);
9260         __tg3_set_mac_addr(tp, skip_mac_1);
9261         spin_unlock_bh(&tp->lock);
9262
9263         return err;
9264 }
9265
9266 /* tp->lock is held. */
9267 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9268                            dma_addr_t mapping, u32 maxlen_flags,
9269                            u32 nic_addr)
9270 {
9271         tg3_write_mem(tp,
9272                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9273                       ((u64) mapping >> 32));
9274         tg3_write_mem(tp,
9275                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9276                       ((u64) mapping & 0xffffffff));
9277         tg3_write_mem(tp,
9278                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9279                        maxlen_flags);
9280
9281         if (!tg3_flag(tp, 5705_PLUS))
9282                 tg3_write_mem(tp,
9283                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9284                               nic_addr);
9285 }
9286
9287
9288 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9289 {
9290         int i = 0;
9291
9292         if (!tg3_flag(tp, ENABLE_TSS)) {
9293                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9294                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9295                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9296         } else {
9297                 tw32(HOSTCC_TXCOL_TICKS, 0);
9298                 tw32(HOSTCC_TXMAX_FRAMES, 0);
9299                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9300
9301                 for (; i < tp->txq_cnt; i++) {
9302                         u32 reg;
9303
9304                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9305                         tw32(reg, ec->tx_coalesce_usecs);
9306                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9307                         tw32(reg, ec->tx_max_coalesced_frames);
9308                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9309                         tw32(reg, ec->tx_max_coalesced_frames_irq);
9310                 }
9311         }
9312
9313         for (; i < tp->irq_max - 1; i++) {
9314                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9315                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9316                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9317         }
9318 }
9319
9320 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9321 {
9322         int i = 0;
9323         u32 limit = tp->rxq_cnt;
9324
9325         if (!tg3_flag(tp, ENABLE_RSS)) {
9326                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9327                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9328                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9329                 limit--;
9330         } else {
9331                 tw32(HOSTCC_RXCOL_TICKS, 0);
9332                 tw32(HOSTCC_RXMAX_FRAMES, 0);
9333                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9334         }
9335
9336         for (; i < limit; i++) {
9337                 u32 reg;
9338
9339                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9340                 tw32(reg, ec->rx_coalesce_usecs);
9341                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9342                 tw32(reg, ec->rx_max_coalesced_frames);
9343                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9344                 tw32(reg, ec->rx_max_coalesced_frames_irq);
9345         }
9346
9347         for (; i < tp->irq_max - 1; i++) {
9348                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9349                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9350                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9351         }
9352 }
9353
9354 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9355 {
9356         tg3_coal_tx_init(tp, ec);
9357         tg3_coal_rx_init(tp, ec);
9358
9359         if (!tg3_flag(tp, 5705_PLUS)) {
9360                 u32 val = ec->stats_block_coalesce_usecs;
9361
9362                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9363                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9364
9365                 if (!tp->link_up)
9366                         val = 0;
9367
9368                 tw32(HOSTCC_STAT_COAL_TICKS, val);
9369         }
9370 }
9371
9372 /* tp->lock is held. */
9373 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9374 {
9375         u32 txrcb, limit;
9376
9377         /* Disable all transmit rings but the first. */
9378         if (!tg3_flag(tp, 5705_PLUS))
9379                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9380         else if (tg3_flag(tp, 5717_PLUS))
9381                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9382         else if (tg3_flag(tp, 57765_CLASS) ||
9383                  tg3_asic_rev(tp) == ASIC_REV_5762)
9384                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9385         else
9386                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9387
9388         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9389              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9390                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9391                               BDINFO_FLAGS_DISABLED);
9392 }
9393
9394 /* tp->lock is held. */
9395 static void tg3_tx_rcbs_init(struct tg3 *tp)
9396 {
9397         int i = 0;
9398         u32 txrcb = NIC_SRAM_SEND_RCB;
9399
9400         if (tg3_flag(tp, ENABLE_TSS))
9401                 i++;
9402
9403         for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9404                 struct tg3_napi *tnapi = &tp->napi[i];
9405
9406                 if (!tnapi->tx_ring)
9407                         continue;
9408
9409                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9410                                (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9411                                NIC_SRAM_TX_BUFFER_DESC);
9412         }
9413 }
9414
9415 /* tp->lock is held. */
9416 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9417 {
9418         u32 rxrcb, limit;
9419
9420         /* Disable all receive return rings but the first. */
9421         if (tg3_flag(tp, 5717_PLUS))
9422                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9423         else if (!tg3_flag(tp, 5705_PLUS))
9424                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9425         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9426                  tg3_asic_rev(tp) == ASIC_REV_5762 ||
9427                  tg3_flag(tp, 57765_CLASS))
9428                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9429         else
9430                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9431
9432         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9433              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9434                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9435                               BDINFO_FLAGS_DISABLED);
9436 }
9437
9438 /* tp->lock is held. */
9439 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9440 {
9441         int i = 0;
9442         u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9443
9444         if (tg3_flag(tp, ENABLE_RSS))
9445                 i++;
9446
9447         for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9448                 struct tg3_napi *tnapi = &tp->napi[i];
9449
9450                 if (!tnapi->rx_rcb)
9451                         continue;
9452
9453                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9454                                (tp->rx_ret_ring_mask + 1) <<
9455                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9456         }
9457 }
9458
9459 /* tp->lock is held. */
9460 static void tg3_rings_reset(struct tg3 *tp)
9461 {
9462         int i;
9463         u32 stblk;
9464         struct tg3_napi *tnapi = &tp->napi[0];
9465
9466         tg3_tx_rcbs_disable(tp);
9467
9468         tg3_rx_ret_rcbs_disable(tp);
9469
9470         /* Disable interrupts */
9471         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9472         tp->napi[0].chk_msi_cnt = 0;
9473         tp->napi[0].last_rx_cons = 0;
9474         tp->napi[0].last_tx_cons = 0;
9475
9476         /* Zero mailbox registers. */
9477         if (tg3_flag(tp, SUPPORT_MSIX)) {
9478                 for (i = 1; i < tp->irq_max; i++) {
9479                         tp->napi[i].tx_prod = 0;
9480                         tp->napi[i].tx_cons = 0;
9481                         if (tg3_flag(tp, ENABLE_TSS))
9482                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
9483                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
9484                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9485                         tp->napi[i].chk_msi_cnt = 0;
9486                         tp->napi[i].last_rx_cons = 0;
9487                         tp->napi[i].last_tx_cons = 0;
9488                 }
9489                 if (!tg3_flag(tp, ENABLE_TSS))
9490                         tw32_mailbox(tp->napi[0].prodmbox, 0);
9491         } else {
9492                 tp->napi[0].tx_prod = 0;
9493                 tp->napi[0].tx_cons = 0;
9494                 tw32_mailbox(tp->napi[0].prodmbox, 0);
9495                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9496         }
9497
9498         /* Make sure the NIC-based send BD rings are disabled. */
9499         if (!tg3_flag(tp, 5705_PLUS)) {
9500                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9501                 for (i = 0; i < 16; i++)
9502                         tw32_tx_mbox(mbox + i * 8, 0);
9503         }
9504
9505         /* Clear status block in ram. */
9506         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9507
9508         /* Set status block DMA address */
9509         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9510              ((u64) tnapi->status_mapping >> 32));
9511         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9512              ((u64) tnapi->status_mapping & 0xffffffff));
9513
9514         stblk = HOSTCC_STATBLCK_RING1;
9515
9516         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9517                 u64 mapping = (u64)tnapi->status_mapping;
9518                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9519                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9520                 stblk += 8;
9521
9522                 /* Clear status block in ram. */
9523                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9524         }
9525
9526         tg3_tx_rcbs_init(tp);
9527         tg3_rx_ret_rcbs_init(tp);
9528 }
9529
9530 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9531 {
9532         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9533
9534         if (!tg3_flag(tp, 5750_PLUS) ||
9535             tg3_flag(tp, 5780_CLASS) ||
9536             tg3_asic_rev(tp) == ASIC_REV_5750 ||
9537             tg3_asic_rev(tp) == ASIC_REV_5752 ||
9538             tg3_flag(tp, 57765_PLUS))
9539                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9540         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9541                  tg3_asic_rev(tp) == ASIC_REV_5787)
9542                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9543         else
9544                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9545
9546         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9547         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9548
9549         val = min(nic_rep_thresh, host_rep_thresh);
9550         tw32(RCVBDI_STD_THRESH, val);
9551
9552         if (tg3_flag(tp, 57765_PLUS))
9553                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9554
9555         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9556                 return;
9557
9558         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9559
9560         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9561
9562         val = min(bdcache_maxcnt / 2, host_rep_thresh);
9563         tw32(RCVBDI_JUMBO_THRESH, val);
9564
9565         if (tg3_flag(tp, 57765_PLUS))
9566                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9567 }
9568
9569 static inline u32 calc_crc(unsigned char *buf, int len)
9570 {
9571         u32 reg;
9572         u32 tmp;
9573         int j, k;
9574
9575         reg = 0xffffffff;
9576
9577         for (j = 0; j < len; j++) {
9578                 reg ^= buf[j];
9579
9580                 for (k = 0; k < 8; k++) {
9581                         tmp = reg & 0x01;
9582
9583                         reg >>= 1;
9584
9585                         if (tmp)
9586                                 reg ^= 0xedb88320;
9587                 }
9588         }
9589
9590         return ~reg;
9591 }
9592
9593 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9594 {
9595         /* accept or reject all multicast frames */
9596         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9597         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9598         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9599         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9600 }
9601
9602 static void __tg3_set_rx_mode(struct net_device *dev)
9603 {
9604         struct tg3 *tp = netdev_priv(dev);
9605         u32 rx_mode;
9606
9607         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9608                                   RX_MODE_KEEP_VLAN_TAG);
9609
9610 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9611         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9612          * flag clear.
9613          */
9614         if (!tg3_flag(tp, ENABLE_ASF))
9615                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9616 #endif
9617
9618         if (dev->flags & IFF_PROMISC) {
9619                 /* Promiscuous mode. */
9620                 rx_mode |= RX_MODE_PROMISC;
9621         } else if (dev->flags & IFF_ALLMULTI) {
9622                 /* Accept all multicast. */
9623                 tg3_set_multi(tp, 1);
9624         } else if (netdev_mc_empty(dev)) {
9625                 /* Reject all multicast. */
9626                 tg3_set_multi(tp, 0);
9627         } else {
9628                 /* Accept one or more multicast(s). */
9629                 struct netdev_hw_addr *ha;
9630                 u32 mc_filter[4] = { 0, };
9631                 u32 regidx;
9632                 u32 bit;
9633                 u32 crc;
9634
9635                 netdev_for_each_mc_addr(ha, dev) {
9636                         crc = calc_crc(ha->addr, ETH_ALEN);
9637                         bit = ~crc & 0x7f;
9638                         regidx = (bit & 0x60) >> 5;
9639                         bit &= 0x1f;
9640                         mc_filter[regidx] |= (1 << bit);
9641                 }
9642
9643                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9644                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9645                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9646                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9647         }
9648
9649         if (rx_mode != tp->rx_mode) {
9650                 tp->rx_mode = rx_mode;
9651                 tw32_f(MAC_RX_MODE, rx_mode);
9652                 udelay(10);
9653         }
9654 }
9655
9656 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9657 {
9658         int i;
9659
9660         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9661                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9662 }
9663
9664 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9665 {
9666         int i;
9667
9668         if (!tg3_flag(tp, SUPPORT_MSIX))
9669                 return;
9670
9671         if (tp->rxq_cnt == 1) {
9672                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9673                 return;
9674         }
9675
9676         /* Validate table against current IRQ count */
9677         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9678                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9679                         break;
9680         }
9681
9682         if (i != TG3_RSS_INDIR_TBL_SIZE)
9683                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9684 }
9685
9686 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9687 {
9688         int i = 0;
9689         u32 reg = MAC_RSS_INDIR_TBL_0;
9690
9691         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9692                 u32 val = tp->rss_ind_tbl[i];
9693                 i++;
9694                 for (; i % 8; i++) {
9695                         val <<= 4;
9696                         val |= tp->rss_ind_tbl[i];
9697                 }
9698                 tw32(reg, val);
9699                 reg += 4;
9700         }
9701 }
9702
9703 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9704 {
9705         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9706                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9707         else
9708                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9709 }
9710
9711 /* tp->lock is held. */
9712 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9713 {
9714         u32 val, rdmac_mode;
9715         int i, err, limit;
9716         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9717
9718         tg3_disable_ints(tp);
9719
9720         tg3_stop_fw(tp);
9721
9722         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9723
9724         if (tg3_flag(tp, INIT_COMPLETE))
9725                 tg3_abort_hw(tp, 1);
9726
9727         if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9728             !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9729                 tg3_phy_pull_config(tp);
9730                 tg3_eee_pull_config(tp, NULL);
9731                 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9732         }
9733
9734         /* Enable MAC control of LPI */
9735         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9736                 tg3_setup_eee(tp);
9737
9738         if (reset_phy)
9739                 tg3_phy_reset(tp);
9740
9741         err = tg3_chip_reset(tp);
9742         if (err)
9743                 return err;
9744
9745         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9746
9747         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9748                 val = tr32(TG3_CPMU_CTRL);
9749                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9750                 tw32(TG3_CPMU_CTRL, val);
9751
9752                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9753                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9754                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9755                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9756
9757                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9758                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9759                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9760                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9761
9762                 val = tr32(TG3_CPMU_HST_ACC);
9763                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9764                 val |= CPMU_HST_ACC_MACCLK_6_25;
9765                 tw32(TG3_CPMU_HST_ACC, val);
9766         }
9767
9768         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9769                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9770                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9771                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9772                 tw32(PCIE_PWR_MGMT_THRESH, val);
9773
9774                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9775                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9776
9777                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9778
9779                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9780                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9781         }
9782
9783         if (tg3_flag(tp, L1PLLPD_EN)) {
9784                 u32 grc_mode = tr32(GRC_MODE);
9785
9786                 /* Access the lower 1K of PL PCIE block registers. */
9787                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9788                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9789
9790                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9791                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9792                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9793
9794                 tw32(GRC_MODE, grc_mode);
9795         }
9796
9797         if (tg3_flag(tp, 57765_CLASS)) {
9798                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9799                         u32 grc_mode = tr32(GRC_MODE);
9800
9801                         /* Access the lower 1K of PL PCIE block registers. */
9802                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9803                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9804
9805                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9806                                    TG3_PCIE_PL_LO_PHYCTL5);
9807                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9808                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9809
9810                         tw32(GRC_MODE, grc_mode);
9811                 }
9812
9813                 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9814                         u32 grc_mode;
9815
9816                         /* Fix transmit hangs */
9817                         val = tr32(TG3_CPMU_PADRNG_CTL);
9818                         val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9819                         tw32(TG3_CPMU_PADRNG_CTL, val);
9820
9821                         grc_mode = tr32(GRC_MODE);
9822
9823                         /* Access the lower 1K of DL PCIE block registers. */
9824                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9825                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9826
9827                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9828                                    TG3_PCIE_DL_LO_FTSMAX);
9829                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9830                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9831                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9832
9833                         tw32(GRC_MODE, grc_mode);
9834                 }
9835
9836                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9837                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9838                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9839                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9840         }
9841
9842         /* This works around an issue with Athlon chipsets on
9843          * B3 tigon3 silicon.  This bit has no effect on any
9844          * other revision.  But do not set this on PCI Express
9845          * chips and don't even touch the clocks if the CPMU is present.
9846          */
9847         if (!tg3_flag(tp, CPMU_PRESENT)) {
9848                 if (!tg3_flag(tp, PCI_EXPRESS))
9849                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9850                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9851         }
9852
9853         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9854             tg3_flag(tp, PCIX_MODE)) {
9855                 val = tr32(TG3PCI_PCISTATE);
9856                 val |= PCISTATE_RETRY_SAME_DMA;
9857                 tw32(TG3PCI_PCISTATE, val);
9858         }
9859
9860         if (tg3_flag(tp, ENABLE_APE)) {
9861                 /* Allow reads and writes to the
9862                  * APE register and memory space.
9863                  */
9864                 val = tr32(TG3PCI_PCISTATE);
9865                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9866                        PCISTATE_ALLOW_APE_SHMEM_WR |
9867                        PCISTATE_ALLOW_APE_PSPACE_WR;
9868                 tw32(TG3PCI_PCISTATE, val);
9869         }
9870
9871         if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9872                 /* Enable some hw fixes.  */
9873                 val = tr32(TG3PCI_MSI_DATA);
9874                 val |= (1 << 26) | (1 << 28) | (1 << 29);
9875                 tw32(TG3PCI_MSI_DATA, val);
9876         }
9877
9878         /* Descriptor ring init may make accesses to the
9879          * NIC SRAM area to setup the TX descriptors, so we
9880          * can only do this after the hardware has been
9881          * successfully reset.
9882          */
9883         err = tg3_init_rings(tp);
9884         if (err)
9885                 return err;
9886
9887         if (tg3_flag(tp, 57765_PLUS)) {
9888                 val = tr32(TG3PCI_DMA_RW_CTRL) &
9889                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9890                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9891                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9892                 if (!tg3_flag(tp, 57765_CLASS) &&
9893                     tg3_asic_rev(tp) != ASIC_REV_5717 &&
9894                     tg3_asic_rev(tp) != ASIC_REV_5762)
9895                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
9896                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9897         } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9898                    tg3_asic_rev(tp) != ASIC_REV_5761) {
9899                 /* This value is determined during the probe time DMA
9900                  * engine test, tg3_test_dma.
9901                  */
9902                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9903         }
9904
9905         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9906                           GRC_MODE_4X_NIC_SEND_RINGS |
9907                           GRC_MODE_NO_TX_PHDR_CSUM |
9908                           GRC_MODE_NO_RX_PHDR_CSUM);
9909         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9910
9911         /* Pseudo-header checksum is done by hardware logic and not
9912          * the offload processers, so make the chip do the pseudo-
9913          * header checksums on receive.  For transmit it is more
9914          * convenient to do the pseudo-header checksum in software
9915          * as Linux does that on transmit for us in all cases.
9916          */
9917         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9918
9919         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9920         if (tp->rxptpctl)
9921                 tw32(TG3_RX_PTP_CTL,
9922                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9923
9924         if (tg3_flag(tp, PTP_CAPABLE))
9925                 val |= GRC_MODE_TIME_SYNC_ENABLE;
9926
9927         tw32(GRC_MODE, tp->grc_mode | val);
9928
9929         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
9930         val = tr32(GRC_MISC_CFG);
9931         val &= ~0xff;
9932         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9933         tw32(GRC_MISC_CFG, val);
9934
9935         /* Initialize MBUF/DESC pool. */
9936         if (tg3_flag(tp, 5750_PLUS)) {
9937                 /* Do nothing.  */
9938         } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9939                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9940                 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9941                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9942                 else
9943                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9944                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9945                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9946         } else if (tg3_flag(tp, TSO_CAPABLE)) {
9947                 int fw_len;
9948
9949                 fw_len = tp->fw_len;
9950                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9951                 tw32(BUFMGR_MB_POOL_ADDR,
9952                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9953                 tw32(BUFMGR_MB_POOL_SIZE,
9954                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9955         }
9956
9957         if (tp->dev->mtu <= ETH_DATA_LEN) {
9958                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9959                      tp->bufmgr_config.mbuf_read_dma_low_water);
9960                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9961                      tp->bufmgr_config.mbuf_mac_rx_low_water);
9962                 tw32(BUFMGR_MB_HIGH_WATER,
9963                      tp->bufmgr_config.mbuf_high_water);
9964         } else {
9965                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9966                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9967                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9968                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9969                 tw32(BUFMGR_MB_HIGH_WATER,
9970                      tp->bufmgr_config.mbuf_high_water_jumbo);
9971         }
9972         tw32(BUFMGR_DMA_LOW_WATER,
9973              tp->bufmgr_config.dma_low_water);
9974         tw32(BUFMGR_DMA_HIGH_WATER,
9975              tp->bufmgr_config.dma_high_water);
9976
9977         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9978         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9979                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9980         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9981             tg3_asic_rev(tp) == ASIC_REV_5762 ||
9982             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9983             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9984                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9985         tw32(BUFMGR_MODE, val);
9986         for (i = 0; i < 2000; i++) {
9987                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9988                         break;
9989                 udelay(10);
9990         }
9991         if (i >= 2000) {
9992                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9993                 return -ENODEV;
9994         }
9995
9996         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9997                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9998
9999         tg3_setup_rxbd_thresholds(tp);
10000
10001         /* Initialize TG3_BDINFO's at:
10002          *  RCVDBDI_STD_BD:     standard eth size rx ring
10003          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
10004          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
10005          *
10006          * like so:
10007          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
10008          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
10009          *                              ring attribute flags
10010          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
10011          *
10012          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10013          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10014          *
10015          * The size of each ring is fixed in the firmware, but the location is
10016          * configurable.
10017          */
10018         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10019              ((u64) tpr->rx_std_mapping >> 32));
10020         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10021              ((u64) tpr->rx_std_mapping & 0xffffffff));
10022         if (!tg3_flag(tp, 5717_PLUS))
10023                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10024                      NIC_SRAM_RX_BUFFER_DESC);
10025
10026         /* Disable the mini ring */
10027         if (!tg3_flag(tp, 5705_PLUS))
10028                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10029                      BDINFO_FLAGS_DISABLED);
10030
10031         /* Program the jumbo buffer descriptor ring control
10032          * blocks on those devices that have them.
10033          */
10034         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10035             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10036
10037                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10038                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10039                              ((u64) tpr->rx_jmb_mapping >> 32));
10040                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10041                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10042                         val = TG3_RX_JMB_RING_SIZE(tp) <<
10043                               BDINFO_FLAGS_MAXLEN_SHIFT;
10044                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10045                              val | BDINFO_FLAGS_USE_EXT_RECV);
10046                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10047                             tg3_flag(tp, 57765_CLASS) ||
10048                             tg3_asic_rev(tp) == ASIC_REV_5762)
10049                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10050                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10051                 } else {
10052                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10053                              BDINFO_FLAGS_DISABLED);
10054                 }
10055
10056                 if (tg3_flag(tp, 57765_PLUS)) {
10057                         val = TG3_RX_STD_RING_SIZE(tp);
10058                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10059                         val |= (TG3_RX_STD_DMA_SZ << 2);
10060                 } else
10061                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10062         } else
10063                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10064
10065         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10066
10067         tpr->rx_std_prod_idx = tp->rx_pending;
10068         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10069
10070         tpr->rx_jmb_prod_idx =
10071                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10072         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10073
10074         tg3_rings_reset(tp);
10075
10076         /* Initialize MAC address and backoff seed. */
10077         __tg3_set_mac_addr(tp, false);
10078
10079         /* MTU + ethernet header + FCS + optional VLAN tag */
10080         tw32(MAC_RX_MTU_SIZE,
10081              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10082
10083         /* The slot time is changed by tg3_setup_phy if we
10084          * run at gigabit with half duplex.
10085          */
10086         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10087               (6 << TX_LENGTHS_IPG_SHIFT) |
10088               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10089
10090         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10091             tg3_asic_rev(tp) == ASIC_REV_5762)
10092                 val |= tr32(MAC_TX_LENGTHS) &
10093                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
10094                         TX_LENGTHS_CNT_DWN_VAL_MSK);
10095
10096         tw32(MAC_TX_LENGTHS, val);
10097
10098         /* Receive rules. */
10099         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10100         tw32(RCVLPC_CONFIG, 0x0181);
10101
10102         /* Calculate RDMAC_MODE setting early, we need it to determine
10103          * the RCVLPC_STATE_ENABLE mask.
10104          */
10105         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10106                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10107                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10108                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10109                       RDMAC_MODE_LNGREAD_ENAB);
10110
10111         if (tg3_asic_rev(tp) == ASIC_REV_5717)
10112                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10113
10114         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10115             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10116             tg3_asic_rev(tp) == ASIC_REV_57780)
10117                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10118                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10119                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10120
10121         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10122             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10123                 if (tg3_flag(tp, TSO_CAPABLE) &&
10124                     tg3_asic_rev(tp) == ASIC_REV_5705) {
10125                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10126                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10127                            !tg3_flag(tp, IS_5788)) {
10128                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10129                 }
10130         }
10131
10132         if (tg3_flag(tp, PCI_EXPRESS))
10133                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10134
10135         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10136                 tp->dma_limit = 0;
10137                 if (tp->dev->mtu <= ETH_DATA_LEN) {
10138                         rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10139                         tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10140                 }
10141         }
10142
10143         if (tg3_flag(tp, HW_TSO_1) ||
10144             tg3_flag(tp, HW_TSO_2) ||
10145             tg3_flag(tp, HW_TSO_3))
10146                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10147
10148         if (tg3_flag(tp, 57765_PLUS) ||
10149             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10150             tg3_asic_rev(tp) == ASIC_REV_57780)
10151                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10152
10153         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10154             tg3_asic_rev(tp) == ASIC_REV_5762)
10155                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10156
10157         if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10158             tg3_asic_rev(tp) == ASIC_REV_5784 ||
10159             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10160             tg3_asic_rev(tp) == ASIC_REV_57780 ||
10161             tg3_flag(tp, 57765_PLUS)) {
10162                 u32 tgtreg;
10163
10164                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10165                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10166                 else
10167                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
10168
10169                 val = tr32(tgtreg);
10170                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10171                     tg3_asic_rev(tp) == ASIC_REV_5762) {
10172                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10173                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10174                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10175                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10176                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10177                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10178                 }
10179                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10180         }
10181
10182         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10183             tg3_asic_rev(tp) == ASIC_REV_5720 ||
10184             tg3_asic_rev(tp) == ASIC_REV_5762) {
10185                 u32 tgtreg;
10186
10187                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10188                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10189                 else
10190                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10191
10192                 val = tr32(tgtreg);
10193                 tw32(tgtreg, val |
10194                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10195                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10196         }
10197
10198         /* Receive/send statistics. */
10199         if (tg3_flag(tp, 5750_PLUS)) {
10200                 val = tr32(RCVLPC_STATS_ENABLE);
10201                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10202                 tw32(RCVLPC_STATS_ENABLE, val);
10203         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10204                    tg3_flag(tp, TSO_CAPABLE)) {
10205                 val = tr32(RCVLPC_STATS_ENABLE);
10206                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10207                 tw32(RCVLPC_STATS_ENABLE, val);
10208         } else {
10209                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10210         }
10211         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10212         tw32(SNDDATAI_STATSENAB, 0xffffff);
10213         tw32(SNDDATAI_STATSCTRL,
10214              (SNDDATAI_SCTRL_ENABLE |
10215               SNDDATAI_SCTRL_FASTUPD));
10216
10217         /* Setup host coalescing engine. */
10218         tw32(HOSTCC_MODE, 0);
10219         for (i = 0; i < 2000; i++) {
10220                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10221                         break;
10222                 udelay(10);
10223         }
10224
10225         __tg3_set_coalesce(tp, &tp->coal);
10226
10227         if (!tg3_flag(tp, 5705_PLUS)) {
10228                 /* Status/statistics block address.  See tg3_timer,
10229                  * the tg3_periodic_fetch_stats call there, and
10230                  * tg3_get_stats to see how this works for 5705/5750 chips.
10231                  */
10232                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10233                      ((u64) tp->stats_mapping >> 32));
10234                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10235                      ((u64) tp->stats_mapping & 0xffffffff));
10236                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10237
10238                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10239
10240                 /* Clear statistics and status block memory areas */
10241                 for (i = NIC_SRAM_STATS_BLK;
10242                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10243                      i += sizeof(u32)) {
10244                         tg3_write_mem(tp, i, 0);
10245                         udelay(40);
10246                 }
10247         }
10248
10249         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10250
10251         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10252         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10253         if (!tg3_flag(tp, 5705_PLUS))
10254                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10255
10256         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10257                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10258                 /* reset to prevent losing 1st rx packet intermittently */
10259                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10260                 udelay(10);
10261         }
10262
10263         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10264                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10265                         MAC_MODE_FHDE_ENABLE;
10266         if (tg3_flag(tp, ENABLE_APE))
10267                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10268         if (!tg3_flag(tp, 5705_PLUS) &&
10269             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10270             tg3_asic_rev(tp) != ASIC_REV_5700)
10271                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10272         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10273         udelay(40);
10274
10275         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10276          * If TG3_FLAG_IS_NIC is zero, we should read the
10277          * register to preserve the GPIO settings for LOMs. The GPIOs,
10278          * whether used as inputs or outputs, are set by boot code after
10279          * reset.
10280          */
10281         if (!tg3_flag(tp, IS_NIC)) {
10282                 u32 gpio_mask;
10283
10284                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10285                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10286                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10287
10288                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10289                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10290                                      GRC_LCLCTRL_GPIO_OUTPUT3;
10291
10292                 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10293                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10294
10295                 tp->grc_local_ctrl &= ~gpio_mask;
10296                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10297
10298                 /* GPIO1 must be driven high for eeprom write protect */
10299                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10300                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10301                                                GRC_LCLCTRL_GPIO_OUTPUT1);
10302         }
10303         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10304         udelay(100);
10305
10306         if (tg3_flag(tp, USING_MSIX)) {
10307                 val = tr32(MSGINT_MODE);
10308                 val |= MSGINT_MODE_ENABLE;
10309                 if (tp->irq_cnt > 1)
10310                         val |= MSGINT_MODE_MULTIVEC_EN;
10311                 if (!tg3_flag(tp, 1SHOT_MSI))
10312                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10313                 tw32(MSGINT_MODE, val);
10314         }
10315
10316         if (!tg3_flag(tp, 5705_PLUS)) {
10317                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10318                 udelay(40);
10319         }
10320
10321         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10322                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10323                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10324                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10325                WDMAC_MODE_LNGREAD_ENAB);
10326
10327         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10328             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10329                 if (tg3_flag(tp, TSO_CAPABLE) &&
10330                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10331                      tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10332                         /* nothing */
10333                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10334                            !tg3_flag(tp, IS_5788)) {
10335                         val |= WDMAC_MODE_RX_ACCEL;
10336                 }
10337         }
10338
10339         /* Enable host coalescing bug fix */
10340         if (tg3_flag(tp, 5755_PLUS))
10341                 val |= WDMAC_MODE_STATUS_TAG_FIX;
10342
10343         if (tg3_asic_rev(tp) == ASIC_REV_5785)
10344                 val |= WDMAC_MODE_BURST_ALL_DATA;
10345
10346         tw32_f(WDMAC_MODE, val);
10347         udelay(40);
10348
10349         if (tg3_flag(tp, PCIX_MODE)) {
10350                 u16 pcix_cmd;
10351
10352                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10353                                      &pcix_cmd);
10354                 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10355                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10356                         pcix_cmd |= PCI_X_CMD_READ_2K;
10357                 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10358                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10359                         pcix_cmd |= PCI_X_CMD_READ_2K;
10360                 }
10361                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10362                                       pcix_cmd);
10363         }
10364
10365         tw32_f(RDMAC_MODE, rdmac_mode);
10366         udelay(40);
10367
10368         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10369             tg3_asic_rev(tp) == ASIC_REV_5720) {
10370                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10371                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10372                                 break;
10373                 }
10374                 if (i < TG3_NUM_RDMA_CHANNELS) {
10375                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10376                         val |= tg3_lso_rd_dma_workaround_bit(tp);
10377                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10378                         tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10379                 }
10380         }
10381
10382         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10383         if (!tg3_flag(tp, 5705_PLUS))
10384                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10385
10386         if (tg3_asic_rev(tp) == ASIC_REV_5761)
10387                 tw32(SNDDATAC_MODE,
10388                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10389         else
10390                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10391
10392         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10393         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10394         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10395         if (tg3_flag(tp, LRG_PROD_RING_CAP))
10396                 val |= RCVDBDI_MODE_LRG_RING_SZ;
10397         tw32(RCVDBDI_MODE, val);
10398         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10399         if (tg3_flag(tp, HW_TSO_1) ||
10400             tg3_flag(tp, HW_TSO_2) ||
10401             tg3_flag(tp, HW_TSO_3))
10402                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10403         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10404         if (tg3_flag(tp, ENABLE_TSS))
10405                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10406         tw32(SNDBDI_MODE, val);
10407         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10408
10409         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10410                 err = tg3_load_5701_a0_firmware_fix(tp);
10411                 if (err)
10412                         return err;
10413         }
10414
10415         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10416                 /* Ignore any errors for the firmware download. If download
10417                  * fails, the device will operate with EEE disabled
10418                  */
10419                 tg3_load_57766_firmware(tp);
10420         }
10421
10422         if (tg3_flag(tp, TSO_CAPABLE)) {
10423                 err = tg3_load_tso_firmware(tp);
10424                 if (err)
10425                         return err;
10426         }
10427
10428         tp->tx_mode = TX_MODE_ENABLE;
10429
10430         if (tg3_flag(tp, 5755_PLUS) ||
10431             tg3_asic_rev(tp) == ASIC_REV_5906)
10432                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10433
10434         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10435             tg3_asic_rev(tp) == ASIC_REV_5762) {
10436                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10437                 tp->tx_mode &= ~val;
10438                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10439         }
10440
10441         tw32_f(MAC_TX_MODE, tp->tx_mode);
10442         udelay(100);
10443
10444         if (tg3_flag(tp, ENABLE_RSS)) {
10445                 tg3_rss_write_indir_tbl(tp);
10446
10447                 /* Setup the "secret" hash key. */
10448                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
10449                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
10450                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
10451                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
10452                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
10453                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
10454                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
10455                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
10456                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
10457                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
10458         }
10459
10460         tp->rx_mode = RX_MODE_ENABLE;
10461         if (tg3_flag(tp, 5755_PLUS))
10462                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10463
10464         if (tg3_asic_rev(tp) == ASIC_REV_5762)
10465                 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10466
10467         if (tg3_flag(tp, ENABLE_RSS))
10468                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10469                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
10470                                RX_MODE_RSS_IPV6_HASH_EN |
10471                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
10472                                RX_MODE_RSS_IPV4_HASH_EN |
10473                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
10474
10475         tw32_f(MAC_RX_MODE, tp->rx_mode);
10476         udelay(10);
10477
10478         tw32(MAC_LED_CTRL, tp->led_ctrl);
10479
10480         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10481         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10482                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10483                 udelay(10);
10484         }
10485         tw32_f(MAC_RX_MODE, tp->rx_mode);
10486         udelay(10);
10487
10488         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10489                 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10490                     !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10491                         /* Set drive transmission level to 1.2V  */
10492                         /* only if the signal pre-emphasis bit is not set  */
10493                         val = tr32(MAC_SERDES_CFG);
10494                         val &= 0xfffff000;
10495                         val |= 0x880;
10496                         tw32(MAC_SERDES_CFG, val);
10497                 }
10498                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10499                         tw32(MAC_SERDES_CFG, 0x616000);
10500         }
10501
10502         /* Prevent chip from dropping frames when flow control
10503          * is enabled.
10504          */
10505         if (tg3_flag(tp, 57765_CLASS))
10506                 val = 1;
10507         else
10508                 val = 2;
10509         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10510
10511         if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10512             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10513                 /* Use hardware link auto-negotiation */
10514                 tg3_flag_set(tp, HW_AUTONEG);
10515         }
10516
10517         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10518             tg3_asic_rev(tp) == ASIC_REV_5714) {
10519                 u32 tmp;
10520
10521                 tmp = tr32(SERDES_RX_CTRL);
10522                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10523                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10524                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10525                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10526         }
10527
10528         if (!tg3_flag(tp, USE_PHYLIB)) {
10529                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10530                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10531
10532                 err = tg3_setup_phy(tp, false);
10533                 if (err)
10534                         return err;
10535
10536                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10537                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10538                         u32 tmp;
10539
10540                         /* Clear CRC stats. */
10541                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10542                                 tg3_writephy(tp, MII_TG3_TEST1,
10543                                              tmp | MII_TG3_TEST1_CRC_EN);
10544                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10545                         }
10546                 }
10547         }
10548
10549         __tg3_set_rx_mode(tp->dev);
10550
10551         /* Initialize receive rules. */
10552         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10553         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10554         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10555         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10556
10557         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10558                 limit = 8;
10559         else
10560                 limit = 16;
10561         if (tg3_flag(tp, ENABLE_ASF))
10562                 limit -= 4;
10563         switch (limit) {
10564         case 16:
10565                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10566         case 15:
10567                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10568         case 14:
10569                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10570         case 13:
10571                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10572         case 12:
10573                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10574         case 11:
10575                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10576         case 10:
10577                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10578         case 9:
10579                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10580         case 8:
10581                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10582         case 7:
10583                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10584         case 6:
10585                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10586         case 5:
10587                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10588         case 4:
10589                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10590         case 3:
10591                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10592         case 2:
10593         case 1:
10594
10595         default:
10596                 break;
10597         }
10598
10599         if (tg3_flag(tp, ENABLE_APE))
10600                 /* Write our heartbeat update interval to APE. */
10601                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10602                                 APE_HOST_HEARTBEAT_INT_DISABLE);
10603
10604         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10605
10606         return 0;
10607 }
10608
10609 /* Called at device open time to get the chip ready for
10610  * packet processing.  Invoked with tp->lock held.
10611  */
10612 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10613 {
10614         /* Chip may have been just powered on. If so, the boot code may still
10615          * be running initialization. Wait for it to finish to avoid races in
10616          * accessing the hardware.
10617          */
10618         tg3_enable_register_access(tp);
10619         tg3_poll_fw(tp);
10620
10621         tg3_switch_clocks(tp);
10622
10623         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10624
10625         return tg3_reset_hw(tp, reset_phy);
10626 }
10627
10628 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10629 {
10630         int i;
10631
10632         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10633                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10634
10635                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10636                 off += len;
10637
10638                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10639                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10640                         memset(ocir, 0, TG3_OCIR_LEN);
10641         }
10642 }
10643
10644 /* sysfs attributes for hwmon */
10645 static ssize_t tg3_show_temp(struct device *dev,
10646                              struct device_attribute *devattr, char *buf)
10647 {
10648         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10649         struct tg3 *tp = dev_get_drvdata(dev);
10650         u32 temperature;
10651
10652         spin_lock_bh(&tp->lock);
10653         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10654                                 sizeof(temperature));
10655         spin_unlock_bh(&tp->lock);
10656         return sprintf(buf, "%u\n", temperature);
10657 }
10658
10659
10660 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10661                           TG3_TEMP_SENSOR_OFFSET);
10662 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10663                           TG3_TEMP_CAUTION_OFFSET);
10664 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10665                           TG3_TEMP_MAX_OFFSET);
10666
10667 static struct attribute *tg3_attrs[] = {
10668         &sensor_dev_attr_temp1_input.dev_attr.attr,
10669         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10670         &sensor_dev_attr_temp1_max.dev_attr.attr,
10671         NULL
10672 };
10673 ATTRIBUTE_GROUPS(tg3);
10674
10675 static void tg3_hwmon_close(struct tg3 *tp)
10676 {
10677         if (tp->hwmon_dev) {
10678                 hwmon_device_unregister(tp->hwmon_dev);
10679                 tp->hwmon_dev = NULL;
10680         }
10681 }
10682
10683 static void tg3_hwmon_open(struct tg3 *tp)
10684 {
10685         int i;
10686         u32 size = 0;
10687         struct pci_dev *pdev = tp->pdev;
10688         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10689
10690         tg3_sd_scan_scratchpad(tp, ocirs);
10691
10692         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10693                 if (!ocirs[i].src_data_length)
10694                         continue;
10695
10696                 size += ocirs[i].src_hdr_length;
10697                 size += ocirs[i].src_data_length;
10698         }
10699
10700         if (!size)
10701                 return;
10702
10703         tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10704                                                           tp, tg3_groups);
10705         if (IS_ERR(tp->hwmon_dev)) {
10706                 tp->hwmon_dev = NULL;
10707                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10708         }
10709 }
10710
10711
10712 #define TG3_STAT_ADD32(PSTAT, REG) \
10713 do {    u32 __val = tr32(REG); \
10714         (PSTAT)->low += __val; \
10715         if ((PSTAT)->low < __val) \
10716                 (PSTAT)->high += 1; \
10717 } while (0)
10718
10719 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10720 {
10721         struct tg3_hw_stats *sp = tp->hw_stats;
10722
10723         if (!tp->link_up)
10724                 return;
10725
10726         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10727         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10728         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10729         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10730         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10731         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10732         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10733         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10734         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10735         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10736         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10737         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10738         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10739         if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10740                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10741                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10742                 u32 val;
10743
10744                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10745                 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10746                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10747                 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10748         }
10749
10750         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10751         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10752         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10753         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10754         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10755         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10756         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10757         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10758         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10759         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10760         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10761         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10762         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10763         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10764
10765         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10766         if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10767             tg3_asic_rev(tp) != ASIC_REV_5762 &&
10768             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10769             tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10770                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10771         } else {
10772                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10773                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10774                 if (val) {
10775                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10776                         sp->rx_discards.low += val;
10777                         if (sp->rx_discards.low < val)
10778                                 sp->rx_discards.high += 1;
10779                 }
10780                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10781         }
10782         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10783 }
10784
10785 static void tg3_chk_missed_msi(struct tg3 *tp)
10786 {
10787         u32 i;
10788
10789         for (i = 0; i < tp->irq_cnt; i++) {
10790                 struct tg3_napi *tnapi = &tp->napi[i];
10791
10792                 if (tg3_has_work(tnapi)) {
10793                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10794                             tnapi->last_tx_cons == tnapi->tx_cons) {
10795                                 if (tnapi->chk_msi_cnt < 1) {
10796                                         tnapi->chk_msi_cnt++;
10797                                         return;
10798                                 }
10799                                 tg3_msi(0, tnapi);
10800                         }
10801                 }
10802                 tnapi->chk_msi_cnt = 0;
10803                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10804                 tnapi->last_tx_cons = tnapi->tx_cons;
10805         }
10806 }
10807
10808 static void tg3_timer(unsigned long __opaque)
10809 {
10810         struct tg3 *tp = (struct tg3 *) __opaque;
10811
10812         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10813                 goto restart_timer;
10814
10815         spin_lock(&tp->lock);
10816
10817         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10818             tg3_flag(tp, 57765_CLASS))
10819                 tg3_chk_missed_msi(tp);
10820
10821         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10822                 /* BCM4785: Flush posted writes from GbE to host memory. */
10823                 tr32(HOSTCC_MODE);
10824         }
10825
10826         if (!tg3_flag(tp, TAGGED_STATUS)) {
10827                 /* All of this garbage is because when using non-tagged
10828                  * IRQ status the mailbox/status_block protocol the chip
10829                  * uses with the cpu is race prone.
10830                  */
10831                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10832                         tw32(GRC_LOCAL_CTRL,
10833                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10834                 } else {
10835                         tw32(HOSTCC_MODE, tp->coalesce_mode |
10836                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10837                 }
10838
10839                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10840                         spin_unlock(&tp->lock);
10841                         tg3_reset_task_schedule(tp);
10842                         goto restart_timer;
10843                 }
10844         }
10845
10846         /* This part only runs once per second. */
10847         if (!--tp->timer_counter) {
10848                 if (tg3_flag(tp, 5705_PLUS))
10849                         tg3_periodic_fetch_stats(tp);
10850
10851                 if (tp->setlpicnt && !--tp->setlpicnt)
10852                         tg3_phy_eee_enable(tp);
10853
10854                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10855                         u32 mac_stat;
10856                         int phy_event;
10857
10858                         mac_stat = tr32(MAC_STATUS);
10859
10860                         phy_event = 0;
10861                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10862                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10863                                         phy_event = 1;
10864                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10865                                 phy_event = 1;
10866
10867                         if (phy_event)
10868                                 tg3_setup_phy(tp, false);
10869                 } else if (tg3_flag(tp, POLL_SERDES)) {
10870                         u32 mac_stat = tr32(MAC_STATUS);
10871                         int need_setup = 0;
10872
10873                         if (tp->link_up &&
10874                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10875                                 need_setup = 1;
10876                         }
10877                         if (!tp->link_up &&
10878                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
10879                                          MAC_STATUS_SIGNAL_DET))) {
10880                                 need_setup = 1;
10881                         }
10882                         if (need_setup) {
10883                                 if (!tp->serdes_counter) {
10884                                         tw32_f(MAC_MODE,
10885                                              (tp->mac_mode &
10886                                               ~MAC_MODE_PORT_MODE_MASK));
10887                                         udelay(40);
10888                                         tw32_f(MAC_MODE, tp->mac_mode);
10889                                         udelay(40);
10890                                 }
10891                                 tg3_setup_phy(tp, false);
10892                         }
10893                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10894                            tg3_flag(tp, 5780_CLASS)) {
10895                         tg3_serdes_parallel_detect(tp);
10896                 }
10897
10898                 tp->timer_counter = tp->timer_multiplier;
10899         }
10900
10901         /* Heartbeat is only sent once every 2 seconds.
10902          *
10903          * The heartbeat is to tell the ASF firmware that the host
10904          * driver is still alive.  In the event that the OS crashes,
10905          * ASF needs to reset the hardware to free up the FIFO space
10906          * that may be filled with rx packets destined for the host.
10907          * If the FIFO is full, ASF will no longer function properly.
10908          *
10909          * Unintended resets have been reported on real time kernels
10910          * where the timer doesn't run on time.  Netpoll will also have
10911          * same problem.
10912          *
10913          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10914          * to check the ring condition when the heartbeat is expiring
10915          * before doing the reset.  This will prevent most unintended
10916          * resets.
10917          */
10918         if (!--tp->asf_counter) {
10919                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10920                         tg3_wait_for_event_ack(tp);
10921
10922                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10923                                       FWCMD_NICDRV_ALIVE3);
10924                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10925                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10926                                       TG3_FW_UPDATE_TIMEOUT_SEC);
10927
10928                         tg3_generate_fw_event(tp);
10929                 }
10930                 tp->asf_counter = tp->asf_multiplier;
10931         }
10932
10933         spin_unlock(&tp->lock);
10934
10935 restart_timer:
10936         tp->timer.expires = jiffies + tp->timer_offset;
10937         add_timer(&tp->timer);
10938 }
10939
10940 static void tg3_timer_init(struct tg3 *tp)
10941 {
10942         if (tg3_flag(tp, TAGGED_STATUS) &&
10943             tg3_asic_rev(tp) != ASIC_REV_5717 &&
10944             !tg3_flag(tp, 57765_CLASS))
10945                 tp->timer_offset = HZ;
10946         else
10947                 tp->timer_offset = HZ / 10;
10948
10949         BUG_ON(tp->timer_offset > HZ);
10950
10951         tp->timer_multiplier = (HZ / tp->timer_offset);
10952         tp->asf_multiplier = (HZ / tp->timer_offset) *
10953                              TG3_FW_UPDATE_FREQ_SEC;
10954
10955         init_timer(&tp->timer);
10956         tp->timer.data = (unsigned long) tp;
10957         tp->timer.function = tg3_timer;
10958 }
10959
10960 static void tg3_timer_start(struct tg3 *tp)
10961 {
10962         tp->asf_counter   = tp->asf_multiplier;
10963         tp->timer_counter = tp->timer_multiplier;
10964
10965         tp->timer.expires = jiffies + tp->timer_offset;
10966         add_timer(&tp->timer);
10967 }
10968
10969 static void tg3_timer_stop(struct tg3 *tp)
10970 {
10971         del_timer_sync(&tp->timer);
10972 }
10973
10974 /* Restart hardware after configuration changes, self-test, etc.
10975  * Invoked with tp->lock held.
10976  */
10977 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
10978         __releases(tp->lock)
10979         __acquires(tp->lock)
10980 {
10981         int err;
10982
10983         err = tg3_init_hw(tp, reset_phy);
10984         if (err) {
10985                 netdev_err(tp->dev,
10986                            "Failed to re-initialize device, aborting\n");
10987                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10988                 tg3_full_unlock(tp);
10989                 tg3_timer_stop(tp);
10990                 tp->irq_sync = 0;
10991                 tg3_napi_enable(tp);
10992                 dev_close(tp->dev);
10993                 tg3_full_lock(tp, 0);
10994         }
10995         return err;
10996 }
10997
10998 static void tg3_reset_task(struct work_struct *work)
10999 {
11000         struct tg3 *tp = container_of(work, struct tg3, reset_task);
11001         int err;
11002
11003         tg3_full_lock(tp, 0);
11004
11005         if (!netif_running(tp->dev)) {
11006                 tg3_flag_clear(tp, RESET_TASK_PENDING);
11007                 tg3_full_unlock(tp);
11008                 return;
11009         }
11010
11011         tg3_full_unlock(tp);
11012
11013         tg3_phy_stop(tp);
11014
11015         tg3_netif_stop(tp);
11016
11017         tg3_full_lock(tp, 1);
11018
11019         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11020                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11021                 tp->write32_rx_mbox = tg3_write_flush_reg32;
11022                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11023                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11024         }
11025
11026         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11027         err = tg3_init_hw(tp, true);
11028         if (err)
11029                 goto out;
11030
11031         tg3_netif_start(tp);
11032
11033 out:
11034         tg3_full_unlock(tp);
11035
11036         if (!err)
11037                 tg3_phy_start(tp);
11038
11039         tg3_flag_clear(tp, RESET_TASK_PENDING);
11040 }
11041
11042 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11043 {
11044         irq_handler_t fn;
11045         unsigned long flags;
11046         char *name;
11047         struct tg3_napi *tnapi = &tp->napi[irq_num];
11048
11049         if (tp->irq_cnt == 1)
11050                 name = tp->dev->name;
11051         else {
11052                 name = &tnapi->irq_lbl[0];
11053                 if (tnapi->tx_buffers && tnapi->rx_rcb)
11054                         snprintf(name, IFNAMSIZ,
11055                                  "%s-txrx-%d", tp->dev->name, irq_num);
11056                 else if (tnapi->tx_buffers)
11057                         snprintf(name, IFNAMSIZ,
11058                                  "%s-tx-%d", tp->dev->name, irq_num);
11059                 else if (tnapi->rx_rcb)
11060                         snprintf(name, IFNAMSIZ,
11061                                  "%s-rx-%d", tp->dev->name, irq_num);
11062                 else
11063                         snprintf(name, IFNAMSIZ,
11064                                  "%s-%d", tp->dev->name, irq_num);
11065                 name[IFNAMSIZ-1] = 0;
11066         }
11067
11068         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11069                 fn = tg3_msi;
11070                 if (tg3_flag(tp, 1SHOT_MSI))
11071                         fn = tg3_msi_1shot;
11072                 flags = 0;
11073         } else {
11074                 fn = tg3_interrupt;
11075                 if (tg3_flag(tp, TAGGED_STATUS))
11076                         fn = tg3_interrupt_tagged;
11077                 flags = IRQF_SHARED;
11078         }
11079
11080         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11081 }
11082
11083 static int tg3_test_interrupt(struct tg3 *tp)
11084 {
11085         struct tg3_napi *tnapi = &tp->napi[0];
11086         struct net_device *dev = tp->dev;
11087         int err, i, intr_ok = 0;
11088         u32 val;
11089
11090         if (!netif_running(dev))
11091                 return -ENODEV;
11092
11093         tg3_disable_ints(tp);
11094
11095         free_irq(tnapi->irq_vec, tnapi);
11096
11097         /*
11098          * Turn off MSI one shot mode.  Otherwise this test has no
11099          * observable way to know whether the interrupt was delivered.
11100          */
11101         if (tg3_flag(tp, 57765_PLUS)) {
11102                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11103                 tw32(MSGINT_MODE, val);
11104         }
11105
11106         err = request_irq(tnapi->irq_vec, tg3_test_isr,
11107                           IRQF_SHARED, dev->name, tnapi);
11108         if (err)
11109                 return err;
11110
11111         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11112         tg3_enable_ints(tp);
11113
11114         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11115                tnapi->coal_now);
11116
11117         for (i = 0; i < 5; i++) {
11118                 u32 int_mbox, misc_host_ctrl;
11119
11120                 int_mbox = tr32_mailbox(tnapi->int_mbox);
11121                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11122
11123                 if ((int_mbox != 0) ||
11124                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11125                         intr_ok = 1;
11126                         break;
11127                 }
11128
11129                 if (tg3_flag(tp, 57765_PLUS) &&
11130                     tnapi->hw_status->status_tag != tnapi->last_tag)
11131                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11132
11133                 msleep(10);
11134         }
11135
11136         tg3_disable_ints(tp);
11137
11138         free_irq(tnapi->irq_vec, tnapi);
11139
11140         err = tg3_request_irq(tp, 0);
11141
11142         if (err)
11143                 return err;
11144
11145         if (intr_ok) {
11146                 /* Reenable MSI one shot mode. */
11147                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11148                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11149                         tw32(MSGINT_MODE, val);
11150                 }
11151                 return 0;
11152         }
11153
11154         return -EIO;
11155 }
11156
11157 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11158  * successfully restored
11159  */
11160 static int tg3_test_msi(struct tg3 *tp)
11161 {
11162         int err;
11163         u16 pci_cmd;
11164
11165         if (!tg3_flag(tp, USING_MSI))
11166                 return 0;
11167
11168         /* Turn off SERR reporting in case MSI terminates with Master
11169          * Abort.
11170          */
11171         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11172         pci_write_config_word(tp->pdev, PCI_COMMAND,
11173                               pci_cmd & ~PCI_COMMAND_SERR);
11174
11175         err = tg3_test_interrupt(tp);
11176
11177         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11178
11179         if (!err)
11180                 return 0;
11181
11182         /* other failures */
11183         if (err != -EIO)
11184                 return err;
11185
11186         /* MSI test failed, go back to INTx mode */
11187         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11188                     "to INTx mode. Please report this failure to the PCI "
11189                     "maintainer and include system chipset information\n");
11190
11191         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11192
11193         pci_disable_msi(tp->pdev);
11194
11195         tg3_flag_clear(tp, USING_MSI);
11196         tp->napi[0].irq_vec = tp->pdev->irq;
11197
11198         err = tg3_request_irq(tp, 0);
11199         if (err)
11200                 return err;
11201
11202         /* Need to reset the chip because the MSI cycle may have terminated
11203          * with Master Abort.
11204          */
11205         tg3_full_lock(tp, 1);
11206
11207         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11208         err = tg3_init_hw(tp, true);
11209
11210         tg3_full_unlock(tp);
11211
11212         if (err)
11213                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11214
11215         return err;
11216 }
11217
11218 static int tg3_request_firmware(struct tg3 *tp)
11219 {
11220         const struct tg3_firmware_hdr *fw_hdr;
11221
11222         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11223                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11224                            tp->fw_needed);
11225                 return -ENOENT;
11226         }
11227
11228         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11229
11230         /* Firmware blob starts with version numbers, followed by
11231          * start address and _full_ length including BSS sections
11232          * (which must be longer than the actual data, of course
11233          */
11234
11235         tp->fw_len = be32_to_cpu(fw_hdr->len);  /* includes bss */
11236         if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11237                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11238                            tp->fw_len, tp->fw_needed);
11239                 release_firmware(tp->fw);
11240                 tp->fw = NULL;
11241                 return -EINVAL;
11242         }
11243
11244         /* We no longer need firmware; we have it. */
11245         tp->fw_needed = NULL;
11246         return 0;
11247 }
11248
11249 static u32 tg3_irq_count(struct tg3 *tp)
11250 {
11251         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11252
11253         if (irq_cnt > 1) {
11254                 /* We want as many rx rings enabled as there are cpus.
11255                  * In multiqueue MSI-X mode, the first MSI-X vector
11256                  * only deals with link interrupts, etc, so we add
11257                  * one to the number of vectors we are requesting.
11258                  */
11259                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11260         }
11261
11262         return irq_cnt;
11263 }
11264
11265 static bool tg3_enable_msix(struct tg3 *tp)
11266 {
11267         int i, rc;
11268         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11269
11270         tp->txq_cnt = tp->txq_req;
11271         tp->rxq_cnt = tp->rxq_req;
11272         if (!tp->rxq_cnt)
11273                 tp->rxq_cnt = netif_get_num_default_rss_queues();
11274         if (tp->rxq_cnt > tp->rxq_max)
11275                 tp->rxq_cnt = tp->rxq_max;
11276
11277         /* Disable multiple TX rings by default.  Simple round-robin hardware
11278          * scheduling of the TX rings can cause starvation of rings with
11279          * small packets when other rings have TSO or jumbo packets.
11280          */
11281         if (!tp->txq_req)
11282                 tp->txq_cnt = 1;
11283
11284         tp->irq_cnt = tg3_irq_count(tp);
11285
11286         for (i = 0; i < tp->irq_max; i++) {
11287                 msix_ent[i].entry  = i;
11288                 msix_ent[i].vector = 0;
11289         }
11290
11291         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
11292         if (rc < 0) {
11293                 return false;
11294         } else if (rc != 0) {
11295                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
11296                         return false;
11297                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11298                               tp->irq_cnt, rc);
11299                 tp->irq_cnt = rc;
11300                 tp->rxq_cnt = max(rc - 1, 1);
11301                 if (tp->txq_cnt)
11302                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11303         }
11304
11305         for (i = 0; i < tp->irq_max; i++)
11306                 tp->napi[i].irq_vec = msix_ent[i].vector;
11307
11308         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11309                 pci_disable_msix(tp->pdev);
11310                 return false;
11311         }
11312
11313         if (tp->irq_cnt == 1)
11314                 return true;
11315
11316         tg3_flag_set(tp, ENABLE_RSS);
11317
11318         if (tp->txq_cnt > 1)
11319                 tg3_flag_set(tp, ENABLE_TSS);
11320
11321         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11322
11323         return true;
11324 }
11325
11326 static void tg3_ints_init(struct tg3 *tp)
11327 {
11328         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11329             !tg3_flag(tp, TAGGED_STATUS)) {
11330                 /* All MSI supporting chips should support tagged
11331                  * status.  Assert that this is the case.
11332                  */
11333                 netdev_warn(tp->dev,
11334                             "MSI without TAGGED_STATUS? Not using MSI\n");
11335                 goto defcfg;
11336         }
11337
11338         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11339                 tg3_flag_set(tp, USING_MSIX);
11340         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11341                 tg3_flag_set(tp, USING_MSI);
11342
11343         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11344                 u32 msi_mode = tr32(MSGINT_MODE);
11345                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11346                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11347                 if (!tg3_flag(tp, 1SHOT_MSI))
11348                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11349                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11350         }
11351 defcfg:
11352         if (!tg3_flag(tp, USING_MSIX)) {
11353                 tp->irq_cnt = 1;
11354                 tp->napi[0].irq_vec = tp->pdev->irq;
11355         }
11356
11357         if (tp->irq_cnt == 1) {
11358                 tp->txq_cnt = 1;
11359                 tp->rxq_cnt = 1;
11360                 netif_set_real_num_tx_queues(tp->dev, 1);
11361                 netif_set_real_num_rx_queues(tp->dev, 1);
11362         }
11363 }
11364
11365 static void tg3_ints_fini(struct tg3 *tp)
11366 {
11367         if (tg3_flag(tp, USING_MSIX))
11368                 pci_disable_msix(tp->pdev);
11369         else if (tg3_flag(tp, USING_MSI))
11370                 pci_disable_msi(tp->pdev);
11371         tg3_flag_clear(tp, USING_MSI);
11372         tg3_flag_clear(tp, USING_MSIX);
11373         tg3_flag_clear(tp, ENABLE_RSS);
11374         tg3_flag_clear(tp, ENABLE_TSS);
11375 }
11376
11377 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11378                      bool init)
11379 {
11380         struct net_device *dev = tp->dev;
11381         int i, err;
11382
11383         /*
11384          * Setup interrupts first so we know how
11385          * many NAPI resources to allocate
11386          */
11387         tg3_ints_init(tp);
11388
11389         tg3_rss_check_indir_tbl(tp);
11390
11391         /* The placement of this call is tied
11392          * to the setup and use of Host TX descriptors.
11393          */
11394         err = tg3_alloc_consistent(tp);
11395         if (err)
11396                 goto out_ints_fini;
11397
11398         tg3_napi_init(tp);
11399
11400         tg3_napi_enable(tp);
11401
11402         for (i = 0; i < tp->irq_cnt; i++) {
11403                 struct tg3_napi *tnapi = &tp->napi[i];
11404                 err = tg3_request_irq(tp, i);
11405                 if (err) {
11406                         for (i--; i >= 0; i--) {
11407                                 tnapi = &tp->napi[i];
11408                                 free_irq(tnapi->irq_vec, tnapi);
11409                         }
11410                         goto out_napi_fini;
11411                 }
11412         }
11413
11414         tg3_full_lock(tp, 0);
11415
11416         if (init)
11417                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11418
11419         err = tg3_init_hw(tp, reset_phy);
11420         if (err) {
11421                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11422                 tg3_free_rings(tp);
11423         }
11424
11425         tg3_full_unlock(tp);
11426
11427         if (err)
11428                 goto out_free_irq;
11429
11430         if (test_irq && tg3_flag(tp, USING_MSI)) {
11431                 err = tg3_test_msi(tp);
11432
11433                 if (err) {
11434                         tg3_full_lock(tp, 0);
11435                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11436                         tg3_free_rings(tp);
11437                         tg3_full_unlock(tp);
11438
11439                         goto out_napi_fini;
11440                 }
11441
11442                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11443                         u32 val = tr32(PCIE_TRANSACTION_CFG);
11444
11445                         tw32(PCIE_TRANSACTION_CFG,
11446                              val | PCIE_TRANS_CFG_1SHOT_MSI);
11447                 }
11448         }
11449
11450         tg3_phy_start(tp);
11451
11452         tg3_hwmon_open(tp);
11453
11454         tg3_full_lock(tp, 0);
11455
11456         tg3_timer_start(tp);
11457         tg3_flag_set(tp, INIT_COMPLETE);
11458         tg3_enable_ints(tp);
11459
11460         if (init)
11461                 tg3_ptp_init(tp);
11462         else
11463                 tg3_ptp_resume(tp);
11464
11465
11466         tg3_full_unlock(tp);
11467
11468         netif_tx_start_all_queues(dev);
11469
11470         /*
11471          * Reset loopback feature if it was turned on while the device was down
11472          * make sure that it's installed properly now.
11473          */
11474         if (dev->features & NETIF_F_LOOPBACK)
11475                 tg3_set_loopback(dev, dev->features);
11476
11477         return 0;
11478
11479 out_free_irq:
11480         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11481                 struct tg3_napi *tnapi = &tp->napi[i];
11482                 free_irq(tnapi->irq_vec, tnapi);
11483         }
11484
11485 out_napi_fini:
11486         tg3_napi_disable(tp);
11487         tg3_napi_fini(tp);
11488         tg3_free_consistent(tp);
11489
11490 out_ints_fini:
11491         tg3_ints_fini(tp);
11492
11493         return err;
11494 }
11495
11496 static void tg3_stop(struct tg3 *tp)
11497 {
11498         int i;
11499
11500         tg3_reset_task_cancel(tp);
11501         tg3_netif_stop(tp);
11502
11503         tg3_timer_stop(tp);
11504
11505         tg3_hwmon_close(tp);
11506
11507         tg3_phy_stop(tp);
11508
11509         tg3_full_lock(tp, 1);
11510
11511         tg3_disable_ints(tp);
11512
11513         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11514         tg3_free_rings(tp);
11515         tg3_flag_clear(tp, INIT_COMPLETE);
11516
11517         tg3_full_unlock(tp);
11518
11519         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11520                 struct tg3_napi *tnapi = &tp->napi[i];
11521                 free_irq(tnapi->irq_vec, tnapi);
11522         }
11523
11524         tg3_ints_fini(tp);
11525
11526         tg3_napi_fini(tp);
11527
11528         tg3_free_consistent(tp);
11529 }
11530
11531 static int tg3_open(struct net_device *dev)
11532 {
11533         struct tg3 *tp = netdev_priv(dev);
11534         int err;
11535
11536         if (tp->fw_needed) {
11537                 err = tg3_request_firmware(tp);
11538                 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11539                         if (err) {
11540                                 netdev_warn(tp->dev, "EEE capability disabled\n");
11541                                 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11542                         } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11543                                 netdev_warn(tp->dev, "EEE capability restored\n");
11544                                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11545                         }
11546                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11547                         if (err)
11548                                 return err;
11549                 } else if (err) {
11550                         netdev_warn(tp->dev, "TSO capability disabled\n");
11551                         tg3_flag_clear(tp, TSO_CAPABLE);
11552                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11553                         netdev_notice(tp->dev, "TSO capability restored\n");
11554                         tg3_flag_set(tp, TSO_CAPABLE);
11555                 }
11556         }
11557
11558         tg3_carrier_off(tp);
11559
11560         err = tg3_power_up(tp);
11561         if (err)
11562                 return err;
11563
11564         tg3_full_lock(tp, 0);
11565
11566         tg3_disable_ints(tp);
11567         tg3_flag_clear(tp, INIT_COMPLETE);
11568
11569         tg3_full_unlock(tp);
11570
11571         err = tg3_start(tp,
11572                         !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11573                         true, true);
11574         if (err) {
11575                 tg3_frob_aux_power(tp, false);
11576                 pci_set_power_state(tp->pdev, PCI_D3hot);
11577         }
11578
11579         if (tg3_flag(tp, PTP_CAPABLE)) {
11580                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11581                                                    &tp->pdev->dev);
11582                 if (IS_ERR(tp->ptp_clock))
11583                         tp->ptp_clock = NULL;
11584         }
11585
11586         return err;
11587 }
11588
11589 static int tg3_close(struct net_device *dev)
11590 {
11591         struct tg3 *tp = netdev_priv(dev);
11592
11593         tg3_ptp_fini(tp);
11594
11595         tg3_stop(tp);
11596
11597         /* Clear stats across close / open calls */
11598         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11599         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11600
11601         if (pci_device_is_present(tp->pdev)) {
11602                 tg3_power_down_prepare(tp);
11603
11604                 tg3_carrier_off(tp);
11605         }
11606         return 0;
11607 }
11608
11609 static inline u64 get_stat64(tg3_stat64_t *val)
11610 {
11611        return ((u64)val->high << 32) | ((u64)val->low);
11612 }
11613
11614 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11615 {
11616         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11617
11618         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11619             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11620              tg3_asic_rev(tp) == ASIC_REV_5701)) {
11621                 u32 val;
11622
11623                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11624                         tg3_writephy(tp, MII_TG3_TEST1,
11625                                      val | MII_TG3_TEST1_CRC_EN);
11626                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11627                 } else
11628                         val = 0;
11629
11630                 tp->phy_crc_errors += val;
11631
11632                 return tp->phy_crc_errors;
11633         }
11634
11635         return get_stat64(&hw_stats->rx_fcs_errors);
11636 }
11637
11638 #define ESTAT_ADD(member) \
11639         estats->member =        old_estats->member + \
11640                                 get_stat64(&hw_stats->member)
11641
11642 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11643 {
11644         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11645         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11646
11647         ESTAT_ADD(rx_octets);
11648         ESTAT_ADD(rx_fragments);
11649         ESTAT_ADD(rx_ucast_packets);
11650         ESTAT_ADD(rx_mcast_packets);
11651         ESTAT_ADD(rx_bcast_packets);
11652         ESTAT_ADD(rx_fcs_errors);
11653         ESTAT_ADD(rx_align_errors);
11654         ESTAT_ADD(rx_xon_pause_rcvd);
11655         ESTAT_ADD(rx_xoff_pause_rcvd);
11656         ESTAT_ADD(rx_mac_ctrl_rcvd);
11657         ESTAT_ADD(rx_xoff_entered);
11658         ESTAT_ADD(rx_frame_too_long_errors);
11659         ESTAT_ADD(rx_jabbers);
11660         ESTAT_ADD(rx_undersize_packets);
11661         ESTAT_ADD(rx_in_length_errors);
11662         ESTAT_ADD(rx_out_length_errors);
11663         ESTAT_ADD(rx_64_or_less_octet_packets);
11664         ESTAT_ADD(rx_65_to_127_octet_packets);
11665         ESTAT_ADD(rx_128_to_255_octet_packets);
11666         ESTAT_ADD(rx_256_to_511_octet_packets);
11667         ESTAT_ADD(rx_512_to_1023_octet_packets);
11668         ESTAT_ADD(rx_1024_to_1522_octet_packets);
11669         ESTAT_ADD(rx_1523_to_2047_octet_packets);
11670         ESTAT_ADD(rx_2048_to_4095_octet_packets);
11671         ESTAT_ADD(rx_4096_to_8191_octet_packets);
11672         ESTAT_ADD(rx_8192_to_9022_octet_packets);
11673
11674         ESTAT_ADD(tx_octets);
11675         ESTAT_ADD(tx_collisions);
11676         ESTAT_ADD(tx_xon_sent);
11677         ESTAT_ADD(tx_xoff_sent);
11678         ESTAT_ADD(tx_flow_control);
11679         ESTAT_ADD(tx_mac_errors);
11680         ESTAT_ADD(tx_single_collisions);
11681         ESTAT_ADD(tx_mult_collisions);
11682         ESTAT_ADD(tx_deferred);
11683         ESTAT_ADD(tx_excessive_collisions);
11684         ESTAT_ADD(tx_late_collisions);
11685         ESTAT_ADD(tx_collide_2times);
11686         ESTAT_ADD(tx_collide_3times);
11687         ESTAT_ADD(tx_collide_4times);
11688         ESTAT_ADD(tx_collide_5times);
11689         ESTAT_ADD(tx_collide_6times);
11690         ESTAT_ADD(tx_collide_7times);
11691         ESTAT_ADD(tx_collide_8times);
11692         ESTAT_ADD(tx_collide_9times);
11693         ESTAT_ADD(tx_collide_10times);
11694         ESTAT_ADD(tx_collide_11times);
11695         ESTAT_ADD(tx_collide_12times);
11696         ESTAT_ADD(tx_collide_13times);
11697         ESTAT_ADD(tx_collide_14times);
11698         ESTAT_ADD(tx_collide_15times);
11699         ESTAT_ADD(tx_ucast_packets);
11700         ESTAT_ADD(tx_mcast_packets);
11701         ESTAT_ADD(tx_bcast_packets);
11702         ESTAT_ADD(tx_carrier_sense_errors);
11703         ESTAT_ADD(tx_discards);
11704         ESTAT_ADD(tx_errors);
11705
11706         ESTAT_ADD(dma_writeq_full);
11707         ESTAT_ADD(dma_write_prioq_full);
11708         ESTAT_ADD(rxbds_empty);
11709         ESTAT_ADD(rx_discards);
11710         ESTAT_ADD(rx_errors);
11711         ESTAT_ADD(rx_threshold_hit);
11712
11713         ESTAT_ADD(dma_readq_full);
11714         ESTAT_ADD(dma_read_prioq_full);
11715         ESTAT_ADD(tx_comp_queue_full);
11716
11717         ESTAT_ADD(ring_set_send_prod_index);
11718         ESTAT_ADD(ring_status_update);
11719         ESTAT_ADD(nic_irqs);
11720         ESTAT_ADD(nic_avoided_irqs);
11721         ESTAT_ADD(nic_tx_threshold_hit);
11722
11723         ESTAT_ADD(mbuf_lwm_thresh_hit);
11724 }
11725
11726 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11727 {
11728         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11729         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11730
11731         stats->rx_packets = old_stats->rx_packets +
11732                 get_stat64(&hw_stats->rx_ucast_packets) +
11733                 get_stat64(&hw_stats->rx_mcast_packets) +
11734                 get_stat64(&hw_stats->rx_bcast_packets);
11735
11736         stats->tx_packets = old_stats->tx_packets +
11737                 get_stat64(&hw_stats->tx_ucast_packets) +
11738                 get_stat64(&hw_stats->tx_mcast_packets) +
11739                 get_stat64(&hw_stats->tx_bcast_packets);
11740
11741         stats->rx_bytes = old_stats->rx_bytes +
11742                 get_stat64(&hw_stats->rx_octets);
11743         stats->tx_bytes = old_stats->tx_bytes +
11744                 get_stat64(&hw_stats->tx_octets);
11745
11746         stats->rx_errors = old_stats->rx_errors +
11747                 get_stat64(&hw_stats->rx_errors);
11748         stats->tx_errors = old_stats->tx_errors +
11749                 get_stat64(&hw_stats->tx_errors) +
11750                 get_stat64(&hw_stats->tx_mac_errors) +
11751                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11752                 get_stat64(&hw_stats->tx_discards);
11753
11754         stats->multicast = old_stats->multicast +
11755                 get_stat64(&hw_stats->rx_mcast_packets);
11756         stats->collisions = old_stats->collisions +
11757                 get_stat64(&hw_stats->tx_collisions);
11758
11759         stats->rx_length_errors = old_stats->rx_length_errors +
11760                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11761                 get_stat64(&hw_stats->rx_undersize_packets);
11762
11763         stats->rx_frame_errors = old_stats->rx_frame_errors +
11764                 get_stat64(&hw_stats->rx_align_errors);
11765         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11766                 get_stat64(&hw_stats->tx_discards);
11767         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11768                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11769
11770         stats->rx_crc_errors = old_stats->rx_crc_errors +
11771                 tg3_calc_crc_errors(tp);
11772
11773         stats->rx_missed_errors = old_stats->rx_missed_errors +
11774                 get_stat64(&hw_stats->rx_discards);
11775
11776         stats->rx_dropped = tp->rx_dropped;
11777         stats->tx_dropped = tp->tx_dropped;
11778 }
11779
11780 static int tg3_get_regs_len(struct net_device *dev)
11781 {
11782         return TG3_REG_BLK_SIZE;
11783 }
11784
11785 static void tg3_get_regs(struct net_device *dev,
11786                 struct ethtool_regs *regs, void *_p)
11787 {
11788         struct tg3 *tp = netdev_priv(dev);
11789
11790         regs->version = 0;
11791
11792         memset(_p, 0, TG3_REG_BLK_SIZE);
11793
11794         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11795                 return;
11796
11797         tg3_full_lock(tp, 0);
11798
11799         tg3_dump_legacy_regs(tp, (u32 *)_p);
11800
11801         tg3_full_unlock(tp);
11802 }
11803
11804 static int tg3_get_eeprom_len(struct net_device *dev)
11805 {
11806         struct tg3 *tp = netdev_priv(dev);
11807
11808         return tp->nvram_size;
11809 }
11810
11811 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11812 {
11813         struct tg3 *tp = netdev_priv(dev);
11814         int ret;
11815         u8  *pd;
11816         u32 i, offset, len, b_offset, b_count;
11817         __be32 val;
11818
11819         if (tg3_flag(tp, NO_NVRAM))
11820                 return -EINVAL;
11821
11822         offset = eeprom->offset;
11823         len = eeprom->len;
11824         eeprom->len = 0;
11825
11826         eeprom->magic = TG3_EEPROM_MAGIC;
11827
11828         if (offset & 3) {
11829                 /* adjustments to start on required 4 byte boundary */
11830                 b_offset = offset & 3;
11831                 b_count = 4 - b_offset;
11832                 if (b_count > len) {
11833                         /* i.e. offset=1 len=2 */
11834                         b_count = len;
11835                 }
11836                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11837                 if (ret)
11838                         return ret;
11839                 memcpy(data, ((char *)&val) + b_offset, b_count);
11840                 len -= b_count;
11841                 offset += b_count;
11842                 eeprom->len += b_count;
11843         }
11844
11845         /* read bytes up to the last 4 byte boundary */
11846         pd = &data[eeprom->len];
11847         for (i = 0; i < (len - (len & 3)); i += 4) {
11848                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11849                 if (ret) {
11850                         eeprom->len += i;
11851                         return ret;
11852                 }
11853                 memcpy(pd + i, &val, 4);
11854         }
11855         eeprom->len += i;
11856
11857         if (len & 3) {
11858                 /* read last bytes not ending on 4 byte boundary */
11859                 pd = &data[eeprom->len];
11860                 b_count = len & 3;
11861                 b_offset = offset + len - b_count;
11862                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11863                 if (ret)
11864                         return ret;
11865                 memcpy(pd, &val, b_count);
11866                 eeprom->len += b_count;
11867         }
11868         return 0;
11869 }
11870
11871 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11872 {
11873         struct tg3 *tp = netdev_priv(dev);
11874         int ret;
11875         u32 offset, len, b_offset, odd_len;
11876         u8 *buf;
11877         __be32 start, end;
11878
11879         if (tg3_flag(tp, NO_NVRAM) ||
11880             eeprom->magic != TG3_EEPROM_MAGIC)
11881                 return -EINVAL;
11882
11883         offset = eeprom->offset;
11884         len = eeprom->len;
11885
11886         if ((b_offset = (offset & 3))) {
11887                 /* adjustments to start on required 4 byte boundary */
11888                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11889                 if (ret)
11890                         return ret;
11891                 len += b_offset;
11892                 offset &= ~3;
11893                 if (len < 4)
11894                         len = 4;
11895         }
11896
11897         odd_len = 0;
11898         if (len & 3) {
11899                 /* adjustments to end on required 4 byte boundary */
11900                 odd_len = 1;
11901                 len = (len + 3) & ~3;
11902                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11903                 if (ret)
11904                         return ret;
11905         }
11906
11907         buf = data;
11908         if (b_offset || odd_len) {
11909                 buf = kmalloc(len, GFP_KERNEL);
11910                 if (!buf)
11911                         return -ENOMEM;
11912                 if (b_offset)
11913                         memcpy(buf, &start, 4);
11914                 if (odd_len)
11915                         memcpy(buf+len-4, &end, 4);
11916                 memcpy(buf + b_offset, data, eeprom->len);
11917         }
11918
11919         ret = tg3_nvram_write_block(tp, offset, len, buf);
11920
11921         if (buf != data)
11922                 kfree(buf);
11923
11924         return ret;
11925 }
11926
11927 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11928 {
11929         struct tg3 *tp = netdev_priv(dev);
11930
11931         if (tg3_flag(tp, USE_PHYLIB)) {
11932                 struct phy_device *phydev;
11933                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11934                         return -EAGAIN;
11935                 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
11936                 return phy_ethtool_gset(phydev, cmd);
11937         }
11938
11939         cmd->supported = (SUPPORTED_Autoneg);
11940
11941         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11942                 cmd->supported |= (SUPPORTED_1000baseT_Half |
11943                                    SUPPORTED_1000baseT_Full);
11944
11945         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11946                 cmd->supported |= (SUPPORTED_100baseT_Half |
11947                                   SUPPORTED_100baseT_Full |
11948                                   SUPPORTED_10baseT_Half |
11949                                   SUPPORTED_10baseT_Full |
11950                                   SUPPORTED_TP);
11951                 cmd->port = PORT_TP;
11952         } else {
11953                 cmd->supported |= SUPPORTED_FIBRE;
11954                 cmd->port = PORT_FIBRE;
11955         }
11956
11957         cmd->advertising = tp->link_config.advertising;
11958         if (tg3_flag(tp, PAUSE_AUTONEG)) {
11959                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11960                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11961                                 cmd->advertising |= ADVERTISED_Pause;
11962                         } else {
11963                                 cmd->advertising |= ADVERTISED_Pause |
11964                                                     ADVERTISED_Asym_Pause;
11965                         }
11966                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11967                         cmd->advertising |= ADVERTISED_Asym_Pause;
11968                 }
11969         }
11970         if (netif_running(dev) && tp->link_up) {
11971                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11972                 cmd->duplex = tp->link_config.active_duplex;
11973                 cmd->lp_advertising = tp->link_config.rmt_adv;
11974                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11975                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11976                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11977                         else
11978                                 cmd->eth_tp_mdix = ETH_TP_MDI;
11979                 }
11980         } else {
11981                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11982                 cmd->duplex = DUPLEX_UNKNOWN;
11983                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11984         }
11985         cmd->phy_address = tp->phy_addr;
11986         cmd->transceiver = XCVR_INTERNAL;
11987         cmd->autoneg = tp->link_config.autoneg;
11988         cmd->maxtxpkt = 0;
11989         cmd->maxrxpkt = 0;
11990         return 0;
11991 }
11992
11993 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11994 {
11995         struct tg3 *tp = netdev_priv(dev);
11996         u32 speed = ethtool_cmd_speed(cmd);
11997
11998         if (tg3_flag(tp, USE_PHYLIB)) {
11999                 struct phy_device *phydev;
12000                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12001                         return -EAGAIN;
12002                 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
12003                 return phy_ethtool_sset(phydev, cmd);
12004         }
12005
12006         if (cmd->autoneg != AUTONEG_ENABLE &&
12007             cmd->autoneg != AUTONEG_DISABLE)
12008                 return -EINVAL;
12009
12010         if (cmd->autoneg == AUTONEG_DISABLE &&
12011             cmd->duplex != DUPLEX_FULL &&
12012             cmd->duplex != DUPLEX_HALF)
12013                 return -EINVAL;
12014
12015         if (cmd->autoneg == AUTONEG_ENABLE) {
12016                 u32 mask = ADVERTISED_Autoneg |
12017                            ADVERTISED_Pause |
12018                            ADVERTISED_Asym_Pause;
12019
12020                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12021                         mask |= ADVERTISED_1000baseT_Half |
12022                                 ADVERTISED_1000baseT_Full;
12023
12024                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12025                         mask |= ADVERTISED_100baseT_Half |
12026                                 ADVERTISED_100baseT_Full |
12027                                 ADVERTISED_10baseT_Half |
12028                                 ADVERTISED_10baseT_Full |
12029                                 ADVERTISED_TP;
12030                 else
12031                         mask |= ADVERTISED_FIBRE;
12032
12033                 if (cmd->advertising & ~mask)
12034                         return -EINVAL;
12035
12036                 mask &= (ADVERTISED_1000baseT_Half |
12037                          ADVERTISED_1000baseT_Full |
12038                          ADVERTISED_100baseT_Half |
12039                          ADVERTISED_100baseT_Full |
12040                          ADVERTISED_10baseT_Half |
12041                          ADVERTISED_10baseT_Full);
12042
12043                 cmd->advertising &= mask;
12044         } else {
12045                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12046                         if (speed != SPEED_1000)
12047                                 return -EINVAL;
12048
12049                         if (cmd->duplex != DUPLEX_FULL)
12050                                 return -EINVAL;
12051                 } else {
12052                         if (speed != SPEED_100 &&
12053                             speed != SPEED_10)
12054                                 return -EINVAL;
12055                 }
12056         }
12057
12058         tg3_full_lock(tp, 0);
12059
12060         tp->link_config.autoneg = cmd->autoneg;
12061         if (cmd->autoneg == AUTONEG_ENABLE) {
12062                 tp->link_config.advertising = (cmd->advertising |
12063                                               ADVERTISED_Autoneg);
12064                 tp->link_config.speed = SPEED_UNKNOWN;
12065                 tp->link_config.duplex = DUPLEX_UNKNOWN;
12066         } else {
12067                 tp->link_config.advertising = 0;
12068                 tp->link_config.speed = speed;
12069                 tp->link_config.duplex = cmd->duplex;
12070         }
12071
12072         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12073
12074         tg3_warn_mgmt_link_flap(tp);
12075
12076         if (netif_running(dev))
12077                 tg3_setup_phy(tp, true);
12078
12079         tg3_full_unlock(tp);
12080
12081         return 0;
12082 }
12083
12084 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12085 {
12086         struct tg3 *tp = netdev_priv(dev);
12087
12088         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12089         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12090         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12091         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12092 }
12093
12094 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12095 {
12096         struct tg3 *tp = netdev_priv(dev);
12097
12098         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12099                 wol->supported = WAKE_MAGIC;
12100         else
12101                 wol->supported = 0;
12102         wol->wolopts = 0;
12103         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12104                 wol->wolopts = WAKE_MAGIC;
12105         memset(&wol->sopass, 0, sizeof(wol->sopass));
12106 }
12107
12108 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12109 {
12110         struct tg3 *tp = netdev_priv(dev);
12111         struct device *dp = &tp->pdev->dev;
12112
12113         if (wol->wolopts & ~WAKE_MAGIC)
12114                 return -EINVAL;
12115         if ((wol->wolopts & WAKE_MAGIC) &&
12116             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12117                 return -EINVAL;
12118
12119         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12120
12121         if (device_may_wakeup(dp))
12122                 tg3_flag_set(tp, WOL_ENABLE);
12123         else
12124                 tg3_flag_clear(tp, WOL_ENABLE);
12125
12126         return 0;
12127 }
12128
12129 static u32 tg3_get_msglevel(struct net_device *dev)
12130 {
12131         struct tg3 *tp = netdev_priv(dev);
12132         return tp->msg_enable;
12133 }
12134
12135 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12136 {
12137         struct tg3 *tp = netdev_priv(dev);
12138         tp->msg_enable = value;
12139 }
12140
12141 static int tg3_nway_reset(struct net_device *dev)
12142 {
12143         struct tg3 *tp = netdev_priv(dev);
12144         int r;
12145
12146         if (!netif_running(dev))
12147                 return -EAGAIN;
12148
12149         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12150                 return -EINVAL;
12151
12152         tg3_warn_mgmt_link_flap(tp);
12153
12154         if (tg3_flag(tp, USE_PHYLIB)) {
12155                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12156                         return -EAGAIN;
12157                 r = phy_start_aneg(tp->mdio_bus->phy_map[tp->phy_addr]);
12158         } else {
12159                 u32 bmcr;
12160
12161                 spin_lock_bh(&tp->lock);
12162                 r = -EINVAL;
12163                 tg3_readphy(tp, MII_BMCR, &bmcr);
12164                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12165                     ((bmcr & BMCR_ANENABLE) ||
12166                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12167                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12168                                                    BMCR_ANENABLE);
12169                         r = 0;
12170                 }
12171                 spin_unlock_bh(&tp->lock);
12172         }
12173
12174         return r;
12175 }
12176
12177 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12178 {
12179         struct tg3 *tp = netdev_priv(dev);
12180
12181         ering->rx_max_pending = tp->rx_std_ring_mask;
12182         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12183                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12184         else
12185                 ering->rx_jumbo_max_pending = 0;
12186
12187         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12188
12189         ering->rx_pending = tp->rx_pending;
12190         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12191                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12192         else
12193                 ering->rx_jumbo_pending = 0;
12194
12195         ering->tx_pending = tp->napi[0].tx_pending;
12196 }
12197
12198 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12199 {
12200         struct tg3 *tp = netdev_priv(dev);
12201         int i, irq_sync = 0, err = 0;
12202
12203         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12204             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12205             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12206             (ering->tx_pending <= MAX_SKB_FRAGS) ||
12207             (tg3_flag(tp, TSO_BUG) &&
12208              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12209                 return -EINVAL;
12210
12211         if (netif_running(dev)) {
12212                 tg3_phy_stop(tp);
12213                 tg3_netif_stop(tp);
12214                 irq_sync = 1;
12215         }
12216
12217         tg3_full_lock(tp, irq_sync);
12218
12219         tp->rx_pending = ering->rx_pending;
12220
12221         if (tg3_flag(tp, MAX_RXPEND_64) &&
12222             tp->rx_pending > 63)
12223                 tp->rx_pending = 63;
12224         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12225
12226         for (i = 0; i < tp->irq_max; i++)
12227                 tp->napi[i].tx_pending = ering->tx_pending;
12228
12229         if (netif_running(dev)) {
12230                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12231                 err = tg3_restart_hw(tp, false);
12232                 if (!err)
12233                         tg3_netif_start(tp);
12234         }
12235
12236         tg3_full_unlock(tp);
12237
12238         if (irq_sync && !err)
12239                 tg3_phy_start(tp);
12240
12241         return err;
12242 }
12243
12244 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12245 {
12246         struct tg3 *tp = netdev_priv(dev);
12247
12248         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12249
12250         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12251                 epause->rx_pause = 1;
12252         else
12253                 epause->rx_pause = 0;
12254
12255         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12256                 epause->tx_pause = 1;
12257         else
12258                 epause->tx_pause = 0;
12259 }
12260
12261 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12262 {
12263         struct tg3 *tp = netdev_priv(dev);
12264         int err = 0;
12265
12266         if (tp->link_config.autoneg == AUTONEG_ENABLE)
12267                 tg3_warn_mgmt_link_flap(tp);
12268
12269         if (tg3_flag(tp, USE_PHYLIB)) {
12270                 u32 newadv;
12271                 struct phy_device *phydev;
12272
12273                 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
12274
12275                 if (!(phydev->supported & SUPPORTED_Pause) ||
12276                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12277                      (epause->rx_pause != epause->tx_pause)))
12278                         return -EINVAL;
12279
12280                 tp->link_config.flowctrl = 0;
12281                 if (epause->rx_pause) {
12282                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12283
12284                         if (epause->tx_pause) {
12285                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12286                                 newadv = ADVERTISED_Pause;
12287                         } else
12288                                 newadv = ADVERTISED_Pause |
12289                                          ADVERTISED_Asym_Pause;
12290                 } else if (epause->tx_pause) {
12291                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12292                         newadv = ADVERTISED_Asym_Pause;
12293                 } else
12294                         newadv = 0;
12295
12296                 if (epause->autoneg)
12297                         tg3_flag_set(tp, PAUSE_AUTONEG);
12298                 else
12299                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12300
12301                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12302                         u32 oldadv = phydev->advertising &
12303                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12304                         if (oldadv != newadv) {
12305                                 phydev->advertising &=
12306                                         ~(ADVERTISED_Pause |
12307                                           ADVERTISED_Asym_Pause);
12308                                 phydev->advertising |= newadv;
12309                                 if (phydev->autoneg) {
12310                                         /*
12311                                          * Always renegotiate the link to
12312                                          * inform our link partner of our
12313                                          * flow control settings, even if the
12314                                          * flow control is forced.  Let
12315                                          * tg3_adjust_link() do the final
12316                                          * flow control setup.
12317                                          */
12318                                         return phy_start_aneg(phydev);
12319                                 }
12320                         }
12321
12322                         if (!epause->autoneg)
12323                                 tg3_setup_flow_control(tp, 0, 0);
12324                 } else {
12325                         tp->link_config.advertising &=
12326                                         ~(ADVERTISED_Pause |
12327                                           ADVERTISED_Asym_Pause);
12328                         tp->link_config.advertising |= newadv;
12329                 }
12330         } else {
12331                 int irq_sync = 0;
12332
12333                 if (netif_running(dev)) {
12334                         tg3_netif_stop(tp);
12335                         irq_sync = 1;
12336                 }
12337
12338                 tg3_full_lock(tp, irq_sync);
12339
12340                 if (epause->autoneg)
12341                         tg3_flag_set(tp, PAUSE_AUTONEG);
12342                 else
12343                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12344                 if (epause->rx_pause)
12345                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12346                 else
12347                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12348                 if (epause->tx_pause)
12349                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12350                 else
12351                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12352
12353                 if (netif_running(dev)) {
12354                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12355                         err = tg3_restart_hw(tp, false);
12356                         if (!err)
12357                                 tg3_netif_start(tp);
12358                 }
12359
12360                 tg3_full_unlock(tp);
12361         }
12362
12363         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12364
12365         return err;
12366 }
12367
12368 static int tg3_get_sset_count(struct net_device *dev, int sset)
12369 {
12370         switch (sset) {
12371         case ETH_SS_TEST:
12372                 return TG3_NUM_TEST;
12373         case ETH_SS_STATS:
12374                 return TG3_NUM_STATS;
12375         default:
12376                 return -EOPNOTSUPP;
12377         }
12378 }
12379
12380 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12381                          u32 *rules __always_unused)
12382 {
12383         struct tg3 *tp = netdev_priv(dev);
12384
12385         if (!tg3_flag(tp, SUPPORT_MSIX))
12386                 return -EOPNOTSUPP;
12387
12388         switch (info->cmd) {
12389         case ETHTOOL_GRXRINGS:
12390                 if (netif_running(tp->dev))
12391                         info->data = tp->rxq_cnt;
12392                 else {
12393                         info->data = num_online_cpus();
12394                         if (info->data > TG3_RSS_MAX_NUM_QS)
12395                                 info->data = TG3_RSS_MAX_NUM_QS;
12396                 }
12397
12398                 /* The first interrupt vector only
12399                  * handles link interrupts.
12400                  */
12401                 info->data -= 1;
12402                 return 0;
12403
12404         default:
12405                 return -EOPNOTSUPP;
12406         }
12407 }
12408
12409 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12410 {
12411         u32 size = 0;
12412         struct tg3 *tp = netdev_priv(dev);
12413
12414         if (tg3_flag(tp, SUPPORT_MSIX))
12415                 size = TG3_RSS_INDIR_TBL_SIZE;
12416
12417         return size;
12418 }
12419
12420 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
12421 {
12422         struct tg3 *tp = netdev_priv(dev);
12423         int i;
12424
12425         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12426                 indir[i] = tp->rss_ind_tbl[i];
12427
12428         return 0;
12429 }
12430
12431 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
12432 {
12433         struct tg3 *tp = netdev_priv(dev);
12434         size_t i;
12435
12436         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12437                 tp->rss_ind_tbl[i] = indir[i];
12438
12439         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12440                 return 0;
12441
12442         /* It is legal to write the indirection
12443          * table while the device is running.
12444          */
12445         tg3_full_lock(tp, 0);
12446         tg3_rss_write_indir_tbl(tp);
12447         tg3_full_unlock(tp);
12448
12449         return 0;
12450 }
12451
12452 static void tg3_get_channels(struct net_device *dev,
12453                              struct ethtool_channels *channel)
12454 {
12455         struct tg3 *tp = netdev_priv(dev);
12456         u32 deflt_qs = netif_get_num_default_rss_queues();
12457
12458         channel->max_rx = tp->rxq_max;
12459         channel->max_tx = tp->txq_max;
12460
12461         if (netif_running(dev)) {
12462                 channel->rx_count = tp->rxq_cnt;
12463                 channel->tx_count = tp->txq_cnt;
12464         } else {
12465                 if (tp->rxq_req)
12466                         channel->rx_count = tp->rxq_req;
12467                 else
12468                         channel->rx_count = min(deflt_qs, tp->rxq_max);
12469
12470                 if (tp->txq_req)
12471                         channel->tx_count = tp->txq_req;
12472                 else
12473                         channel->tx_count = min(deflt_qs, tp->txq_max);
12474         }
12475 }
12476
12477 static int tg3_set_channels(struct net_device *dev,
12478                             struct ethtool_channels *channel)
12479 {
12480         struct tg3 *tp = netdev_priv(dev);
12481
12482         if (!tg3_flag(tp, SUPPORT_MSIX))
12483                 return -EOPNOTSUPP;
12484
12485         if (channel->rx_count > tp->rxq_max ||
12486             channel->tx_count > tp->txq_max)
12487                 return -EINVAL;
12488
12489         tp->rxq_req = channel->rx_count;
12490         tp->txq_req = channel->tx_count;
12491
12492         if (!netif_running(dev))
12493                 return 0;
12494
12495         tg3_stop(tp);
12496
12497         tg3_carrier_off(tp);
12498
12499         tg3_start(tp, true, false, false);
12500
12501         return 0;
12502 }
12503
12504 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12505 {
12506         switch (stringset) {
12507         case ETH_SS_STATS:
12508                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12509                 break;
12510         case ETH_SS_TEST:
12511                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12512                 break;
12513         default:
12514                 WARN_ON(1);     /* we need a WARN() */
12515                 break;
12516         }
12517 }
12518
12519 static int tg3_set_phys_id(struct net_device *dev,
12520                             enum ethtool_phys_id_state state)
12521 {
12522         struct tg3 *tp = netdev_priv(dev);
12523
12524         if (!netif_running(tp->dev))
12525                 return -EAGAIN;
12526
12527         switch (state) {
12528         case ETHTOOL_ID_ACTIVE:
12529                 return 1;       /* cycle on/off once per second */
12530
12531         case ETHTOOL_ID_ON:
12532                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12533                      LED_CTRL_1000MBPS_ON |
12534                      LED_CTRL_100MBPS_ON |
12535                      LED_CTRL_10MBPS_ON |
12536                      LED_CTRL_TRAFFIC_OVERRIDE |
12537                      LED_CTRL_TRAFFIC_BLINK |
12538                      LED_CTRL_TRAFFIC_LED);
12539                 break;
12540
12541         case ETHTOOL_ID_OFF:
12542                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12543                      LED_CTRL_TRAFFIC_OVERRIDE);
12544                 break;
12545
12546         case ETHTOOL_ID_INACTIVE:
12547                 tw32(MAC_LED_CTRL, tp->led_ctrl);
12548                 break;
12549         }
12550
12551         return 0;
12552 }
12553
12554 static void tg3_get_ethtool_stats(struct net_device *dev,
12555                                    struct ethtool_stats *estats, u64 *tmp_stats)
12556 {
12557         struct tg3 *tp = netdev_priv(dev);
12558
12559         if (tp->hw_stats)
12560                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12561         else
12562                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12563 }
12564
12565 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12566 {
12567         int i;
12568         __be32 *buf;
12569         u32 offset = 0, len = 0;
12570         u32 magic, val;
12571
12572         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12573                 return NULL;
12574
12575         if (magic == TG3_EEPROM_MAGIC) {
12576                 for (offset = TG3_NVM_DIR_START;
12577                      offset < TG3_NVM_DIR_END;
12578                      offset += TG3_NVM_DIRENT_SIZE) {
12579                         if (tg3_nvram_read(tp, offset, &val))
12580                                 return NULL;
12581
12582                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12583                             TG3_NVM_DIRTYPE_EXTVPD)
12584                                 break;
12585                 }
12586
12587                 if (offset != TG3_NVM_DIR_END) {
12588                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12589                         if (tg3_nvram_read(tp, offset + 4, &offset))
12590                                 return NULL;
12591
12592                         offset = tg3_nvram_logical_addr(tp, offset);
12593                 }
12594         }
12595
12596         if (!offset || !len) {
12597                 offset = TG3_NVM_VPD_OFF;
12598                 len = TG3_NVM_VPD_LEN;
12599         }
12600
12601         buf = kmalloc(len, GFP_KERNEL);
12602         if (buf == NULL)
12603                 return NULL;
12604
12605         if (magic == TG3_EEPROM_MAGIC) {
12606                 for (i = 0; i < len; i += 4) {
12607                         /* The data is in little-endian format in NVRAM.
12608                          * Use the big-endian read routines to preserve
12609                          * the byte order as it exists in NVRAM.
12610                          */
12611                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12612                                 goto error;
12613                 }
12614         } else {
12615                 u8 *ptr;
12616                 ssize_t cnt;
12617                 unsigned int pos = 0;
12618
12619                 ptr = (u8 *)&buf[0];
12620                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12621                         cnt = pci_read_vpd(tp->pdev, pos,
12622                                            len - pos, ptr);
12623                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
12624                                 cnt = 0;
12625                         else if (cnt < 0)
12626                                 goto error;
12627                 }
12628                 if (pos != len)
12629                         goto error;
12630         }
12631
12632         *vpdlen = len;
12633
12634         return buf;
12635
12636 error:
12637         kfree(buf);
12638         return NULL;
12639 }
12640
12641 #define NVRAM_TEST_SIZE 0x100
12642 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
12643 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
12644 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
12645 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
12646 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
12647 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
12648 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12649 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12650
12651 static int tg3_test_nvram(struct tg3 *tp)
12652 {
12653         u32 csum, magic, len;
12654         __be32 *buf;
12655         int i, j, k, err = 0, size;
12656
12657         if (tg3_flag(tp, NO_NVRAM))
12658                 return 0;
12659
12660         if (tg3_nvram_read(tp, 0, &magic) != 0)
12661                 return -EIO;
12662
12663         if (magic == TG3_EEPROM_MAGIC)
12664                 size = NVRAM_TEST_SIZE;
12665         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12666                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12667                     TG3_EEPROM_SB_FORMAT_1) {
12668                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12669                         case TG3_EEPROM_SB_REVISION_0:
12670                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12671                                 break;
12672                         case TG3_EEPROM_SB_REVISION_2:
12673                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12674                                 break;
12675                         case TG3_EEPROM_SB_REVISION_3:
12676                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12677                                 break;
12678                         case TG3_EEPROM_SB_REVISION_4:
12679                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12680                                 break;
12681                         case TG3_EEPROM_SB_REVISION_5:
12682                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12683                                 break;
12684                         case TG3_EEPROM_SB_REVISION_6:
12685                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12686                                 break;
12687                         default:
12688                                 return -EIO;
12689                         }
12690                 } else
12691                         return 0;
12692         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12693                 size = NVRAM_SELFBOOT_HW_SIZE;
12694         else
12695                 return -EIO;
12696
12697         buf = kmalloc(size, GFP_KERNEL);
12698         if (buf == NULL)
12699                 return -ENOMEM;
12700
12701         err = -EIO;
12702         for (i = 0, j = 0; i < size; i += 4, j++) {
12703                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12704                 if (err)
12705                         break;
12706         }
12707         if (i < size)
12708                 goto out;
12709
12710         /* Selfboot format */
12711         magic = be32_to_cpu(buf[0]);
12712         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12713             TG3_EEPROM_MAGIC_FW) {
12714                 u8 *buf8 = (u8 *) buf, csum8 = 0;
12715
12716                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12717                     TG3_EEPROM_SB_REVISION_2) {
12718                         /* For rev 2, the csum doesn't include the MBA. */
12719                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12720                                 csum8 += buf8[i];
12721                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12722                                 csum8 += buf8[i];
12723                 } else {
12724                         for (i = 0; i < size; i++)
12725                                 csum8 += buf8[i];
12726                 }
12727
12728                 if (csum8 == 0) {
12729                         err = 0;
12730                         goto out;
12731                 }
12732
12733                 err = -EIO;
12734                 goto out;
12735         }
12736
12737         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12738             TG3_EEPROM_MAGIC_HW) {
12739                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12740                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12741                 u8 *buf8 = (u8 *) buf;
12742
12743                 /* Separate the parity bits and the data bytes.  */
12744                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12745                         if ((i == 0) || (i == 8)) {
12746                                 int l;
12747                                 u8 msk;
12748
12749                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12750                                         parity[k++] = buf8[i] & msk;
12751                                 i++;
12752                         } else if (i == 16) {
12753                                 int l;
12754                                 u8 msk;
12755
12756                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12757                                         parity[k++] = buf8[i] & msk;
12758                                 i++;
12759
12760                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12761                                         parity[k++] = buf8[i] & msk;
12762                                 i++;
12763                         }
12764                         data[j++] = buf8[i];
12765                 }
12766
12767                 err = -EIO;
12768                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12769                         u8 hw8 = hweight8(data[i]);
12770
12771                         if ((hw8 & 0x1) && parity[i])
12772                                 goto out;
12773                         else if (!(hw8 & 0x1) && !parity[i])
12774                                 goto out;
12775                 }
12776                 err = 0;
12777                 goto out;
12778         }
12779
12780         err = -EIO;
12781
12782         /* Bootstrap checksum at offset 0x10 */
12783         csum = calc_crc((unsigned char *) buf, 0x10);
12784         if (csum != le32_to_cpu(buf[0x10/4]))
12785                 goto out;
12786
12787         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12788         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12789         if (csum != le32_to_cpu(buf[0xfc/4]))
12790                 goto out;
12791
12792         kfree(buf);
12793
12794         buf = tg3_vpd_readblock(tp, &len);
12795         if (!buf)
12796                 return -ENOMEM;
12797
12798         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12799         if (i > 0) {
12800                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12801                 if (j < 0)
12802                         goto out;
12803
12804                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12805                         goto out;
12806
12807                 i += PCI_VPD_LRDT_TAG_SIZE;
12808                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12809                                               PCI_VPD_RO_KEYWORD_CHKSUM);
12810                 if (j > 0) {
12811                         u8 csum8 = 0;
12812
12813                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
12814
12815                         for (i = 0; i <= j; i++)
12816                                 csum8 += ((u8 *)buf)[i];
12817
12818                         if (csum8)
12819                                 goto out;
12820                 }
12821         }
12822
12823         err = 0;
12824
12825 out:
12826         kfree(buf);
12827         return err;
12828 }
12829
12830 #define TG3_SERDES_TIMEOUT_SEC  2
12831 #define TG3_COPPER_TIMEOUT_SEC  6
12832
12833 static int tg3_test_link(struct tg3 *tp)
12834 {
12835         int i, max;
12836
12837         if (!netif_running(tp->dev))
12838                 return -ENODEV;
12839
12840         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12841                 max = TG3_SERDES_TIMEOUT_SEC;
12842         else
12843                 max = TG3_COPPER_TIMEOUT_SEC;
12844
12845         for (i = 0; i < max; i++) {
12846                 if (tp->link_up)
12847                         return 0;
12848
12849                 if (msleep_interruptible(1000))
12850                         break;
12851         }
12852
12853         return -EIO;
12854 }
12855
12856 /* Only test the commonly used registers */
12857 static int tg3_test_registers(struct tg3 *tp)
12858 {
12859         int i, is_5705, is_5750;
12860         u32 offset, read_mask, write_mask, val, save_val, read_val;
12861         static struct {
12862                 u16 offset;
12863                 u16 flags;
12864 #define TG3_FL_5705     0x1
12865 #define TG3_FL_NOT_5705 0x2
12866 #define TG3_FL_NOT_5788 0x4
12867 #define TG3_FL_NOT_5750 0x8
12868                 u32 read_mask;
12869                 u32 write_mask;
12870         } reg_tbl[] = {
12871                 /* MAC Control Registers */
12872                 { MAC_MODE, TG3_FL_NOT_5705,
12873                         0x00000000, 0x00ef6f8c },
12874                 { MAC_MODE, TG3_FL_5705,
12875                         0x00000000, 0x01ef6b8c },
12876                 { MAC_STATUS, TG3_FL_NOT_5705,
12877                         0x03800107, 0x00000000 },
12878                 { MAC_STATUS, TG3_FL_5705,
12879                         0x03800100, 0x00000000 },
12880                 { MAC_ADDR_0_HIGH, 0x0000,
12881                         0x00000000, 0x0000ffff },
12882                 { MAC_ADDR_0_LOW, 0x0000,
12883                         0x00000000, 0xffffffff },
12884                 { MAC_RX_MTU_SIZE, 0x0000,
12885                         0x00000000, 0x0000ffff },
12886                 { MAC_TX_MODE, 0x0000,
12887                         0x00000000, 0x00000070 },
12888                 { MAC_TX_LENGTHS, 0x0000,
12889                         0x00000000, 0x00003fff },
12890                 { MAC_RX_MODE, TG3_FL_NOT_5705,
12891                         0x00000000, 0x000007fc },
12892                 { MAC_RX_MODE, TG3_FL_5705,
12893                         0x00000000, 0x000007dc },
12894                 { MAC_HASH_REG_0, 0x0000,
12895                         0x00000000, 0xffffffff },
12896                 { MAC_HASH_REG_1, 0x0000,
12897                         0x00000000, 0xffffffff },
12898                 { MAC_HASH_REG_2, 0x0000,
12899                         0x00000000, 0xffffffff },
12900                 { MAC_HASH_REG_3, 0x0000,
12901                         0x00000000, 0xffffffff },
12902
12903                 /* Receive Data and Receive BD Initiator Control Registers. */
12904                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12905                         0x00000000, 0xffffffff },
12906                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12907                         0x00000000, 0xffffffff },
12908                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12909                         0x00000000, 0x00000003 },
12910                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12911                         0x00000000, 0xffffffff },
12912                 { RCVDBDI_STD_BD+0, 0x0000,
12913                         0x00000000, 0xffffffff },
12914                 { RCVDBDI_STD_BD+4, 0x0000,
12915                         0x00000000, 0xffffffff },
12916                 { RCVDBDI_STD_BD+8, 0x0000,
12917                         0x00000000, 0xffff0002 },
12918                 { RCVDBDI_STD_BD+0xc, 0x0000,
12919                         0x00000000, 0xffffffff },
12920
12921                 /* Receive BD Initiator Control Registers. */
12922                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12923                         0x00000000, 0xffffffff },
12924                 { RCVBDI_STD_THRESH, TG3_FL_5705,
12925                         0x00000000, 0x000003ff },
12926                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12927                         0x00000000, 0xffffffff },
12928
12929                 /* Host Coalescing Control Registers. */
12930                 { HOSTCC_MODE, TG3_FL_NOT_5705,
12931                         0x00000000, 0x00000004 },
12932                 { HOSTCC_MODE, TG3_FL_5705,
12933                         0x00000000, 0x000000f6 },
12934                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12935                         0x00000000, 0xffffffff },
12936                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12937                         0x00000000, 0x000003ff },
12938                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12939                         0x00000000, 0xffffffff },
12940                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12941                         0x00000000, 0x000003ff },
12942                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12943                         0x00000000, 0xffffffff },
12944                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12945                         0x00000000, 0x000000ff },
12946                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12947                         0x00000000, 0xffffffff },
12948                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12949                         0x00000000, 0x000000ff },
12950                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12951                         0x00000000, 0xffffffff },
12952                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12953                         0x00000000, 0xffffffff },
12954                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12955                         0x00000000, 0xffffffff },
12956                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12957                         0x00000000, 0x000000ff },
12958                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12959                         0x00000000, 0xffffffff },
12960                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12961                         0x00000000, 0x000000ff },
12962                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12963                         0x00000000, 0xffffffff },
12964                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12965                         0x00000000, 0xffffffff },
12966                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12967                         0x00000000, 0xffffffff },
12968                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12969                         0x00000000, 0xffffffff },
12970                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12971                         0x00000000, 0xffffffff },
12972                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12973                         0xffffffff, 0x00000000 },
12974                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12975                         0xffffffff, 0x00000000 },
12976
12977                 /* Buffer Manager Control Registers. */
12978                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12979                         0x00000000, 0x007fff80 },
12980                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12981                         0x00000000, 0x007fffff },
12982                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12983                         0x00000000, 0x0000003f },
12984                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12985                         0x00000000, 0x000001ff },
12986                 { BUFMGR_MB_HIGH_WATER, 0x0000,
12987                         0x00000000, 0x000001ff },
12988                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12989                         0xffffffff, 0x00000000 },
12990                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12991                         0xffffffff, 0x00000000 },
12992
12993                 /* Mailbox Registers */
12994                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12995                         0x00000000, 0x000001ff },
12996                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12997                         0x00000000, 0x000001ff },
12998                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12999                         0x00000000, 0x000007ff },
13000                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13001                         0x00000000, 0x000001ff },
13002
13003                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13004         };
13005
13006         is_5705 = is_5750 = 0;
13007         if (tg3_flag(tp, 5705_PLUS)) {
13008                 is_5705 = 1;
13009                 if (tg3_flag(tp, 5750_PLUS))
13010                         is_5750 = 1;
13011         }
13012
13013         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13014                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13015                         continue;
13016
13017                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13018                         continue;
13019
13020                 if (tg3_flag(tp, IS_5788) &&
13021                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
13022                         continue;
13023
13024                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13025                         continue;
13026
13027                 offset = (u32) reg_tbl[i].offset;
13028                 read_mask = reg_tbl[i].read_mask;
13029                 write_mask = reg_tbl[i].write_mask;
13030
13031                 /* Save the original register content */
13032                 save_val = tr32(offset);
13033
13034                 /* Determine the read-only value. */
13035                 read_val = save_val & read_mask;
13036
13037                 /* Write zero to the register, then make sure the read-only bits
13038                  * are not changed and the read/write bits are all zeros.
13039                  */
13040                 tw32(offset, 0);
13041
13042                 val = tr32(offset);
13043
13044                 /* Test the read-only and read/write bits. */
13045                 if (((val & read_mask) != read_val) || (val & write_mask))
13046                         goto out;
13047
13048                 /* Write ones to all the bits defined by RdMask and WrMask, then
13049                  * make sure the read-only bits are not changed and the
13050                  * read/write bits are all ones.
13051                  */
13052                 tw32(offset, read_mask | write_mask);
13053
13054                 val = tr32(offset);
13055
13056                 /* Test the read-only bits. */
13057                 if ((val & read_mask) != read_val)
13058                         goto out;
13059
13060                 /* Test the read/write bits. */
13061                 if ((val & write_mask) != write_mask)
13062                         goto out;
13063
13064                 tw32(offset, save_val);
13065         }
13066
13067         return 0;
13068
13069 out:
13070         if (netif_msg_hw(tp))
13071                 netdev_err(tp->dev,
13072                            "Register test failed at offset %x\n", offset);
13073         tw32(offset, save_val);
13074         return -EIO;
13075 }
13076
13077 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13078 {
13079         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13080         int i;
13081         u32 j;
13082
13083         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13084                 for (j = 0; j < len; j += 4) {
13085                         u32 val;
13086
13087                         tg3_write_mem(tp, offset + j, test_pattern[i]);
13088                         tg3_read_mem(tp, offset + j, &val);
13089                         if (val != test_pattern[i])
13090                                 return -EIO;
13091                 }
13092         }
13093         return 0;
13094 }
13095
13096 static int tg3_test_memory(struct tg3 *tp)
13097 {
13098         static struct mem_entry {
13099                 u32 offset;
13100                 u32 len;
13101         } mem_tbl_570x[] = {
13102                 { 0x00000000, 0x00b50},
13103                 { 0x00002000, 0x1c000},
13104                 { 0xffffffff, 0x00000}
13105         }, mem_tbl_5705[] = {
13106                 { 0x00000100, 0x0000c},
13107                 { 0x00000200, 0x00008},
13108                 { 0x00004000, 0x00800},
13109                 { 0x00006000, 0x01000},
13110                 { 0x00008000, 0x02000},
13111                 { 0x00010000, 0x0e000},
13112                 { 0xffffffff, 0x00000}
13113         }, mem_tbl_5755[] = {
13114                 { 0x00000200, 0x00008},
13115                 { 0x00004000, 0x00800},
13116                 { 0x00006000, 0x00800},
13117                 { 0x00008000, 0x02000},
13118                 { 0x00010000, 0x0c000},
13119                 { 0xffffffff, 0x00000}
13120         }, mem_tbl_5906[] = {
13121                 { 0x00000200, 0x00008},
13122                 { 0x00004000, 0x00400},
13123                 { 0x00006000, 0x00400},
13124                 { 0x00008000, 0x01000},
13125                 { 0x00010000, 0x01000},
13126                 { 0xffffffff, 0x00000}
13127         }, mem_tbl_5717[] = {
13128                 { 0x00000200, 0x00008},
13129                 { 0x00010000, 0x0a000},
13130                 { 0x00020000, 0x13c00},
13131                 { 0xffffffff, 0x00000}
13132         }, mem_tbl_57765[] = {
13133                 { 0x00000200, 0x00008},
13134                 { 0x00004000, 0x00800},
13135                 { 0x00006000, 0x09800},
13136                 { 0x00010000, 0x0a000},
13137                 { 0xffffffff, 0x00000}
13138         };
13139         struct mem_entry *mem_tbl;
13140         int err = 0;
13141         int i;
13142
13143         if (tg3_flag(tp, 5717_PLUS))
13144                 mem_tbl = mem_tbl_5717;
13145         else if (tg3_flag(tp, 57765_CLASS) ||
13146                  tg3_asic_rev(tp) == ASIC_REV_5762)
13147                 mem_tbl = mem_tbl_57765;
13148         else if (tg3_flag(tp, 5755_PLUS))
13149                 mem_tbl = mem_tbl_5755;
13150         else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13151                 mem_tbl = mem_tbl_5906;
13152         else if (tg3_flag(tp, 5705_PLUS))
13153                 mem_tbl = mem_tbl_5705;
13154         else
13155                 mem_tbl = mem_tbl_570x;
13156
13157         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13158                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13159                 if (err)
13160                         break;
13161         }
13162
13163         return err;
13164 }
13165
13166 #define TG3_TSO_MSS             500
13167
13168 #define TG3_TSO_IP_HDR_LEN      20
13169 #define TG3_TSO_TCP_HDR_LEN     20
13170 #define TG3_TSO_TCP_OPT_LEN     12
13171
13172 static const u8 tg3_tso_header[] = {
13173 0x08, 0x00,
13174 0x45, 0x00, 0x00, 0x00,
13175 0x00, 0x00, 0x40, 0x00,
13176 0x40, 0x06, 0x00, 0x00,
13177 0x0a, 0x00, 0x00, 0x01,
13178 0x0a, 0x00, 0x00, 0x02,
13179 0x0d, 0x00, 0xe0, 0x00,
13180 0x00, 0x00, 0x01, 0x00,
13181 0x00, 0x00, 0x02, 0x00,
13182 0x80, 0x10, 0x10, 0x00,
13183 0x14, 0x09, 0x00, 0x00,
13184 0x01, 0x01, 0x08, 0x0a,
13185 0x11, 0x11, 0x11, 0x11,
13186 0x11, 0x11, 0x11, 0x11,
13187 };
13188
13189 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13190 {
13191         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13192         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13193         u32 budget;
13194         struct sk_buff *skb;
13195         u8 *tx_data, *rx_data;
13196         dma_addr_t map;
13197         int num_pkts, tx_len, rx_len, i, err;
13198         struct tg3_rx_buffer_desc *desc;
13199         struct tg3_napi *tnapi, *rnapi;
13200         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13201
13202         tnapi = &tp->napi[0];
13203         rnapi = &tp->napi[0];
13204         if (tp->irq_cnt > 1) {
13205                 if (tg3_flag(tp, ENABLE_RSS))
13206                         rnapi = &tp->napi[1];
13207                 if (tg3_flag(tp, ENABLE_TSS))
13208                         tnapi = &tp->napi[1];
13209         }
13210         coal_now = tnapi->coal_now | rnapi->coal_now;
13211
13212         err = -EIO;
13213
13214         tx_len = pktsz;
13215         skb = netdev_alloc_skb(tp->dev, tx_len);
13216         if (!skb)
13217                 return -ENOMEM;
13218
13219         tx_data = skb_put(skb, tx_len);
13220         memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13221         memset(tx_data + ETH_ALEN, 0x0, 8);
13222
13223         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13224
13225         if (tso_loopback) {
13226                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13227
13228                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13229                               TG3_TSO_TCP_OPT_LEN;
13230
13231                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13232                        sizeof(tg3_tso_header));
13233                 mss = TG3_TSO_MSS;
13234
13235                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13236                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13237
13238                 /* Set the total length field in the IP header */
13239                 iph->tot_len = htons((u16)(mss + hdr_len));
13240
13241                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13242                               TXD_FLAG_CPU_POST_DMA);
13243
13244                 if (tg3_flag(tp, HW_TSO_1) ||
13245                     tg3_flag(tp, HW_TSO_2) ||
13246                     tg3_flag(tp, HW_TSO_3)) {
13247                         struct tcphdr *th;
13248                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13249                         th = (struct tcphdr *)&tx_data[val];
13250                         th->check = 0;
13251                 } else
13252                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
13253
13254                 if (tg3_flag(tp, HW_TSO_3)) {
13255                         mss |= (hdr_len & 0xc) << 12;
13256                         if (hdr_len & 0x10)
13257                                 base_flags |= 0x00000010;
13258                         base_flags |= (hdr_len & 0x3e0) << 5;
13259                 } else if (tg3_flag(tp, HW_TSO_2))
13260                         mss |= hdr_len << 9;
13261                 else if (tg3_flag(tp, HW_TSO_1) ||
13262                          tg3_asic_rev(tp) == ASIC_REV_5705) {
13263                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13264                 } else {
13265                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13266                 }
13267
13268                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13269         } else {
13270                 num_pkts = 1;
13271                 data_off = ETH_HLEN;
13272
13273                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13274                     tx_len > VLAN_ETH_FRAME_LEN)
13275                         base_flags |= TXD_FLAG_JMB_PKT;
13276         }
13277
13278         for (i = data_off; i < tx_len; i++)
13279                 tx_data[i] = (u8) (i & 0xff);
13280
13281         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13282         if (pci_dma_mapping_error(tp->pdev, map)) {
13283                 dev_kfree_skb(skb);
13284                 return -EIO;
13285         }
13286
13287         val = tnapi->tx_prod;
13288         tnapi->tx_buffers[val].skb = skb;
13289         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13290
13291         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13292                rnapi->coal_now);
13293
13294         udelay(10);
13295
13296         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13297
13298         budget = tg3_tx_avail(tnapi);
13299         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13300                             base_flags | TXD_FLAG_END, mss, 0)) {
13301                 tnapi->tx_buffers[val].skb = NULL;
13302                 dev_kfree_skb(skb);
13303                 return -EIO;
13304         }
13305
13306         tnapi->tx_prod++;
13307
13308         /* Sync BD data before updating mailbox */
13309         wmb();
13310
13311         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13312         tr32_mailbox(tnapi->prodmbox);
13313
13314         udelay(10);
13315
13316         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13317         for (i = 0; i < 35; i++) {
13318                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13319                        coal_now);
13320
13321                 udelay(10);
13322
13323                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13324                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13325                 if ((tx_idx == tnapi->tx_prod) &&
13326                     (rx_idx == (rx_start_idx + num_pkts)))
13327                         break;
13328         }
13329
13330         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13331         dev_kfree_skb(skb);
13332
13333         if (tx_idx != tnapi->tx_prod)
13334                 goto out;
13335
13336         if (rx_idx != rx_start_idx + num_pkts)
13337                 goto out;
13338
13339         val = data_off;
13340         while (rx_idx != rx_start_idx) {
13341                 desc = &rnapi->rx_rcb[rx_start_idx++];
13342                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13343                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13344
13345                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13346                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13347                         goto out;
13348
13349                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13350                          - ETH_FCS_LEN;
13351
13352                 if (!tso_loopback) {
13353                         if (rx_len != tx_len)
13354                                 goto out;
13355
13356                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13357                                 if (opaque_key != RXD_OPAQUE_RING_STD)
13358                                         goto out;
13359                         } else {
13360                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13361                                         goto out;
13362                         }
13363                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13364                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13365                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
13366                         goto out;
13367                 }
13368
13369                 if (opaque_key == RXD_OPAQUE_RING_STD) {
13370                         rx_data = tpr->rx_std_buffers[desc_idx].data;
13371                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13372                                              mapping);
13373                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13374                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13375                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13376                                              mapping);
13377                 } else
13378                         goto out;
13379
13380                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13381                                             PCI_DMA_FROMDEVICE);
13382
13383                 rx_data += TG3_RX_OFFSET(tp);
13384                 for (i = data_off; i < rx_len; i++, val++) {
13385                         if (*(rx_data + i) != (u8) (val & 0xff))
13386                                 goto out;
13387                 }
13388         }
13389
13390         err = 0;
13391
13392         /* tg3_free_rings will unmap and free the rx_data */
13393 out:
13394         return err;
13395 }
13396
13397 #define TG3_STD_LOOPBACK_FAILED         1
13398 #define TG3_JMB_LOOPBACK_FAILED         2
13399 #define TG3_TSO_LOOPBACK_FAILED         4
13400 #define TG3_LOOPBACK_FAILED \
13401         (TG3_STD_LOOPBACK_FAILED | \
13402          TG3_JMB_LOOPBACK_FAILED | \
13403          TG3_TSO_LOOPBACK_FAILED)
13404
13405 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13406 {
13407         int err = -EIO;
13408         u32 eee_cap;
13409         u32 jmb_pkt_sz = 9000;
13410
13411         if (tp->dma_limit)
13412                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13413
13414         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13415         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13416
13417         if (!netif_running(tp->dev)) {
13418                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13419                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13420                 if (do_extlpbk)
13421                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13422                 goto done;
13423         }
13424
13425         err = tg3_reset_hw(tp, true);
13426         if (err) {
13427                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13428                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13429                 if (do_extlpbk)
13430                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13431                 goto done;
13432         }
13433
13434         if (tg3_flag(tp, ENABLE_RSS)) {
13435                 int i;
13436
13437                 /* Reroute all rx packets to the 1st queue */
13438                 for (i = MAC_RSS_INDIR_TBL_0;
13439                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13440                         tw32(i, 0x0);
13441         }
13442
13443         /* HW errata - mac loopback fails in some cases on 5780.
13444          * Normal traffic and PHY loopback are not affected by
13445          * errata.  Also, the MAC loopback test is deprecated for
13446          * all newer ASIC revisions.
13447          */
13448         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13449             !tg3_flag(tp, CPMU_PRESENT)) {
13450                 tg3_mac_loopback(tp, true);
13451
13452                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13453                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13454
13455                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13456                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13457                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13458
13459                 tg3_mac_loopback(tp, false);
13460         }
13461
13462         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13463             !tg3_flag(tp, USE_PHYLIB)) {
13464                 int i;
13465
13466                 tg3_phy_lpbk_set(tp, 0, false);
13467
13468                 /* Wait for link */
13469                 for (i = 0; i < 100; i++) {
13470                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13471                                 break;
13472                         mdelay(1);
13473                 }
13474
13475                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13476                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13477                 if (tg3_flag(tp, TSO_CAPABLE) &&
13478                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13479                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13480                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13481                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13482                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13483
13484                 if (do_extlpbk) {
13485                         tg3_phy_lpbk_set(tp, 0, true);
13486
13487                         /* All link indications report up, but the hardware
13488                          * isn't really ready for about 20 msec.  Double it
13489                          * to be sure.
13490                          */
13491                         mdelay(40);
13492
13493                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13494                                 data[TG3_EXT_LOOPB_TEST] |=
13495                                                         TG3_STD_LOOPBACK_FAILED;
13496                         if (tg3_flag(tp, TSO_CAPABLE) &&
13497                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13498                                 data[TG3_EXT_LOOPB_TEST] |=
13499                                                         TG3_TSO_LOOPBACK_FAILED;
13500                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13501                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13502                                 data[TG3_EXT_LOOPB_TEST] |=
13503                                                         TG3_JMB_LOOPBACK_FAILED;
13504                 }
13505
13506                 /* Re-enable gphy autopowerdown. */
13507                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13508                         tg3_phy_toggle_apd(tp, true);
13509         }
13510
13511         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13512                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13513
13514 done:
13515         tp->phy_flags |= eee_cap;
13516
13517         return err;
13518 }
13519
13520 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13521                           u64 *data)
13522 {
13523         struct tg3 *tp = netdev_priv(dev);
13524         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13525
13526         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13527                 if (tg3_power_up(tp)) {
13528                         etest->flags |= ETH_TEST_FL_FAILED;
13529                         memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13530                         return;
13531                 }
13532                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13533         }
13534
13535         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13536
13537         if (tg3_test_nvram(tp) != 0) {
13538                 etest->flags |= ETH_TEST_FL_FAILED;
13539                 data[TG3_NVRAM_TEST] = 1;
13540         }
13541         if (!doextlpbk && tg3_test_link(tp)) {
13542                 etest->flags |= ETH_TEST_FL_FAILED;
13543                 data[TG3_LINK_TEST] = 1;
13544         }
13545         if (etest->flags & ETH_TEST_FL_OFFLINE) {
13546                 int err, err2 = 0, irq_sync = 0;
13547
13548                 if (netif_running(dev)) {
13549                         tg3_phy_stop(tp);
13550                         tg3_netif_stop(tp);
13551                         irq_sync = 1;
13552                 }
13553
13554                 tg3_full_lock(tp, irq_sync);
13555                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13556                 err = tg3_nvram_lock(tp);
13557                 tg3_halt_cpu(tp, RX_CPU_BASE);
13558                 if (!tg3_flag(tp, 5705_PLUS))
13559                         tg3_halt_cpu(tp, TX_CPU_BASE);
13560                 if (!err)
13561                         tg3_nvram_unlock(tp);
13562
13563                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13564                         tg3_phy_reset(tp);
13565
13566                 if (tg3_test_registers(tp) != 0) {
13567                         etest->flags |= ETH_TEST_FL_FAILED;
13568                         data[TG3_REGISTER_TEST] = 1;
13569                 }
13570
13571                 if (tg3_test_memory(tp) != 0) {
13572                         etest->flags |= ETH_TEST_FL_FAILED;
13573                         data[TG3_MEMORY_TEST] = 1;
13574                 }
13575
13576                 if (doextlpbk)
13577                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13578
13579                 if (tg3_test_loopback(tp, data, doextlpbk))
13580                         etest->flags |= ETH_TEST_FL_FAILED;
13581
13582                 tg3_full_unlock(tp);
13583
13584                 if (tg3_test_interrupt(tp) != 0) {
13585                         etest->flags |= ETH_TEST_FL_FAILED;
13586                         data[TG3_INTERRUPT_TEST] = 1;
13587                 }
13588
13589                 tg3_full_lock(tp, 0);
13590
13591                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13592                 if (netif_running(dev)) {
13593                         tg3_flag_set(tp, INIT_COMPLETE);
13594                         err2 = tg3_restart_hw(tp, true);
13595                         if (!err2)
13596                                 tg3_netif_start(tp);
13597                 }
13598
13599                 tg3_full_unlock(tp);
13600
13601                 if (irq_sync && !err2)
13602                         tg3_phy_start(tp);
13603         }
13604         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13605                 tg3_power_down_prepare(tp);
13606
13607 }
13608
13609 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13610 {
13611         struct tg3 *tp = netdev_priv(dev);
13612         struct hwtstamp_config stmpconf;
13613
13614         if (!tg3_flag(tp, PTP_CAPABLE))
13615                 return -EOPNOTSUPP;
13616
13617         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13618                 return -EFAULT;
13619
13620         if (stmpconf.flags)
13621                 return -EINVAL;
13622
13623         if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13624             stmpconf.tx_type != HWTSTAMP_TX_OFF)
13625                 return -ERANGE;
13626
13627         switch (stmpconf.rx_filter) {
13628         case HWTSTAMP_FILTER_NONE:
13629                 tp->rxptpctl = 0;
13630                 break;
13631         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13632                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13633                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13634                 break;
13635         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13636                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13637                                TG3_RX_PTP_CTL_SYNC_EVNT;
13638                 break;
13639         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13640                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13641                                TG3_RX_PTP_CTL_DELAY_REQ;
13642                 break;
13643         case HWTSTAMP_FILTER_PTP_V2_EVENT:
13644                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13645                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13646                 break;
13647         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13648                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13649                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13650                 break;
13651         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13652                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13653                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13654                 break;
13655         case HWTSTAMP_FILTER_PTP_V2_SYNC:
13656                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13657                                TG3_RX_PTP_CTL_SYNC_EVNT;
13658                 break;
13659         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13660                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13661                                TG3_RX_PTP_CTL_SYNC_EVNT;
13662                 break;
13663         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13664                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13665                                TG3_RX_PTP_CTL_SYNC_EVNT;
13666                 break;
13667         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13668                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13669                                TG3_RX_PTP_CTL_DELAY_REQ;
13670                 break;
13671         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13672                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13673                                TG3_RX_PTP_CTL_DELAY_REQ;
13674                 break;
13675         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13676                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13677                                TG3_RX_PTP_CTL_DELAY_REQ;
13678                 break;
13679         default:
13680                 return -ERANGE;
13681         }
13682
13683         if (netif_running(dev) && tp->rxptpctl)
13684                 tw32(TG3_RX_PTP_CTL,
13685                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13686
13687         if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13688                 tg3_flag_set(tp, TX_TSTAMP_EN);
13689         else
13690                 tg3_flag_clear(tp, TX_TSTAMP_EN);
13691
13692         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13693                 -EFAULT : 0;
13694 }
13695
13696 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13697 {
13698         struct tg3 *tp = netdev_priv(dev);
13699         struct hwtstamp_config stmpconf;
13700
13701         if (!tg3_flag(tp, PTP_CAPABLE))
13702                 return -EOPNOTSUPP;
13703
13704         stmpconf.flags = 0;
13705         stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13706                             HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13707
13708         switch (tp->rxptpctl) {
13709         case 0:
13710                 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13711                 break;
13712         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13713                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13714                 break;
13715         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13716                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13717                 break;
13718         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13719                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13720                 break;
13721         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13722                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13723                 break;
13724         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13725                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13726                 break;
13727         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13728                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13729                 break;
13730         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13731                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13732                 break;
13733         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13734                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13735                 break;
13736         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13737                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13738                 break;
13739         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13740                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13741                 break;
13742         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13743                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13744                 break;
13745         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13746                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13747                 break;
13748         default:
13749                 WARN_ON_ONCE(1);
13750                 return -ERANGE;
13751         }
13752
13753         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13754                 -EFAULT : 0;
13755 }
13756
13757 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13758 {
13759         struct mii_ioctl_data *data = if_mii(ifr);
13760         struct tg3 *tp = netdev_priv(dev);
13761         int err;
13762
13763         if (tg3_flag(tp, USE_PHYLIB)) {
13764                 struct phy_device *phydev;
13765                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13766                         return -EAGAIN;
13767                 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
13768                 return phy_mii_ioctl(phydev, ifr, cmd);
13769         }
13770
13771         switch (cmd) {
13772         case SIOCGMIIPHY:
13773                 data->phy_id = tp->phy_addr;
13774
13775                 /* fallthru */
13776         case SIOCGMIIREG: {
13777                 u32 mii_regval;
13778
13779                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13780                         break;                  /* We have no PHY */
13781
13782                 if (!netif_running(dev))
13783                         return -EAGAIN;
13784
13785                 spin_lock_bh(&tp->lock);
13786                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13787                                     data->reg_num & 0x1f, &mii_regval);
13788                 spin_unlock_bh(&tp->lock);
13789
13790                 data->val_out = mii_regval;
13791
13792                 return err;
13793         }
13794
13795         case SIOCSMIIREG:
13796                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13797                         break;                  /* We have no PHY */
13798
13799                 if (!netif_running(dev))
13800                         return -EAGAIN;
13801
13802                 spin_lock_bh(&tp->lock);
13803                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13804                                      data->reg_num & 0x1f, data->val_in);
13805                 spin_unlock_bh(&tp->lock);
13806
13807                 return err;
13808
13809         case SIOCSHWTSTAMP:
13810                 return tg3_hwtstamp_set(dev, ifr);
13811
13812         case SIOCGHWTSTAMP:
13813                 return tg3_hwtstamp_get(dev, ifr);
13814
13815         default:
13816                 /* do nothing */
13817                 break;
13818         }
13819         return -EOPNOTSUPP;
13820 }
13821
13822 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13823 {
13824         struct tg3 *tp = netdev_priv(dev);
13825
13826         memcpy(ec, &tp->coal, sizeof(*ec));
13827         return 0;
13828 }
13829
13830 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13831 {
13832         struct tg3 *tp = netdev_priv(dev);
13833         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13834         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13835
13836         if (!tg3_flag(tp, 5705_PLUS)) {
13837                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13838                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13839                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13840                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13841         }
13842
13843         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13844             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13845             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13846             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13847             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13848             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13849             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13850             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13851             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13852             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13853                 return -EINVAL;
13854
13855         /* No rx interrupts will be generated if both are zero */
13856         if ((ec->rx_coalesce_usecs == 0) &&
13857             (ec->rx_max_coalesced_frames == 0))
13858                 return -EINVAL;
13859
13860         /* No tx interrupts will be generated if both are zero */
13861         if ((ec->tx_coalesce_usecs == 0) &&
13862             (ec->tx_max_coalesced_frames == 0))
13863                 return -EINVAL;
13864
13865         /* Only copy relevant parameters, ignore all others. */
13866         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13867         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13868         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13869         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13870         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13871         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13872         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13873         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13874         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13875
13876         if (netif_running(dev)) {
13877                 tg3_full_lock(tp, 0);
13878                 __tg3_set_coalesce(tp, &tp->coal);
13879                 tg3_full_unlock(tp);
13880         }
13881         return 0;
13882 }
13883
13884 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
13885 {
13886         struct tg3 *tp = netdev_priv(dev);
13887
13888         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13889                 netdev_warn(tp->dev, "Board does not support EEE!\n");
13890                 return -EOPNOTSUPP;
13891         }
13892
13893         if (edata->advertised != tp->eee.advertised) {
13894                 netdev_warn(tp->dev,
13895                             "Direct manipulation of EEE advertisement is not supported\n");
13896                 return -EINVAL;
13897         }
13898
13899         if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
13900                 netdev_warn(tp->dev,
13901                             "Maximal Tx Lpi timer supported is %#x(u)\n",
13902                             TG3_CPMU_DBTMR1_LNKIDLE_MAX);
13903                 return -EINVAL;
13904         }
13905
13906         tp->eee = *edata;
13907
13908         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
13909         tg3_warn_mgmt_link_flap(tp);
13910
13911         if (netif_running(tp->dev)) {
13912                 tg3_full_lock(tp, 0);
13913                 tg3_setup_eee(tp);
13914                 tg3_phy_reset(tp);
13915                 tg3_full_unlock(tp);
13916         }
13917
13918         return 0;
13919 }
13920
13921 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
13922 {
13923         struct tg3 *tp = netdev_priv(dev);
13924
13925         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13926                 netdev_warn(tp->dev,
13927                             "Board does not support EEE!\n");
13928                 return -EOPNOTSUPP;
13929         }
13930
13931         *edata = tp->eee;
13932         return 0;
13933 }
13934
13935 static const struct ethtool_ops tg3_ethtool_ops = {
13936         .get_settings           = tg3_get_settings,
13937         .set_settings           = tg3_set_settings,
13938         .get_drvinfo            = tg3_get_drvinfo,
13939         .get_regs_len           = tg3_get_regs_len,
13940         .get_regs               = tg3_get_regs,
13941         .get_wol                = tg3_get_wol,
13942         .set_wol                = tg3_set_wol,
13943         .get_msglevel           = tg3_get_msglevel,
13944         .set_msglevel           = tg3_set_msglevel,
13945         .nway_reset             = tg3_nway_reset,
13946         .get_link               = ethtool_op_get_link,
13947         .get_eeprom_len         = tg3_get_eeprom_len,
13948         .get_eeprom             = tg3_get_eeprom,
13949         .set_eeprom             = tg3_set_eeprom,
13950         .get_ringparam          = tg3_get_ringparam,
13951         .set_ringparam          = tg3_set_ringparam,
13952         .get_pauseparam         = tg3_get_pauseparam,
13953         .set_pauseparam         = tg3_set_pauseparam,
13954         .self_test              = tg3_self_test,
13955         .get_strings            = tg3_get_strings,
13956         .set_phys_id            = tg3_set_phys_id,
13957         .get_ethtool_stats      = tg3_get_ethtool_stats,
13958         .get_coalesce           = tg3_get_coalesce,
13959         .set_coalesce           = tg3_set_coalesce,
13960         .get_sset_count         = tg3_get_sset_count,
13961         .get_rxnfc              = tg3_get_rxnfc,
13962         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
13963         .get_rxfh_indir         = tg3_get_rxfh_indir,
13964         .set_rxfh_indir         = tg3_set_rxfh_indir,
13965         .get_channels           = tg3_get_channels,
13966         .set_channels           = tg3_set_channels,
13967         .get_ts_info            = tg3_get_ts_info,
13968         .get_eee                = tg3_get_eee,
13969         .set_eee                = tg3_set_eee,
13970 };
13971
13972 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13973                                                 struct rtnl_link_stats64 *stats)
13974 {
13975         struct tg3 *tp = netdev_priv(dev);
13976
13977         spin_lock_bh(&tp->lock);
13978         if (!tp->hw_stats) {
13979                 spin_unlock_bh(&tp->lock);
13980                 return &tp->net_stats_prev;
13981         }
13982
13983         tg3_get_nstats(tp, stats);
13984         spin_unlock_bh(&tp->lock);
13985
13986         return stats;
13987 }
13988
13989 static void tg3_set_rx_mode(struct net_device *dev)
13990 {
13991         struct tg3 *tp = netdev_priv(dev);
13992
13993         if (!netif_running(dev))
13994                 return;
13995
13996         tg3_full_lock(tp, 0);
13997         __tg3_set_rx_mode(dev);
13998         tg3_full_unlock(tp);
13999 }
14000
14001 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14002                                int new_mtu)
14003 {
14004         dev->mtu = new_mtu;
14005
14006         if (new_mtu > ETH_DATA_LEN) {
14007                 if (tg3_flag(tp, 5780_CLASS)) {
14008                         netdev_update_features(dev);
14009                         tg3_flag_clear(tp, TSO_CAPABLE);
14010                 } else {
14011                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
14012                 }
14013         } else {
14014                 if (tg3_flag(tp, 5780_CLASS)) {
14015                         tg3_flag_set(tp, TSO_CAPABLE);
14016                         netdev_update_features(dev);
14017                 }
14018                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14019         }
14020 }
14021
14022 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14023 {
14024         struct tg3 *tp = netdev_priv(dev);
14025         int err;
14026         bool reset_phy = false;
14027
14028         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
14029                 return -EINVAL;
14030
14031         if (!netif_running(dev)) {
14032                 /* We'll just catch it later when the
14033                  * device is up'd.
14034                  */
14035                 tg3_set_mtu(dev, tp, new_mtu);
14036                 return 0;
14037         }
14038
14039         tg3_phy_stop(tp);
14040
14041         tg3_netif_stop(tp);
14042
14043         tg3_full_lock(tp, 1);
14044
14045         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14046
14047         tg3_set_mtu(dev, tp, new_mtu);
14048
14049         /* Reset PHY, otherwise the read DMA engine will be in a mode that
14050          * breaks all requests to 256 bytes.
14051          */
14052         if (tg3_asic_rev(tp) == ASIC_REV_57766)
14053                 reset_phy = true;
14054
14055         err = tg3_restart_hw(tp, reset_phy);
14056
14057         if (!err)
14058                 tg3_netif_start(tp);
14059
14060         tg3_full_unlock(tp);
14061
14062         if (!err)
14063                 tg3_phy_start(tp);
14064
14065         return err;
14066 }
14067
14068 static const struct net_device_ops tg3_netdev_ops = {
14069         .ndo_open               = tg3_open,
14070         .ndo_stop               = tg3_close,
14071         .ndo_start_xmit         = tg3_start_xmit,
14072         .ndo_get_stats64        = tg3_get_stats64,
14073         .ndo_validate_addr      = eth_validate_addr,
14074         .ndo_set_rx_mode        = tg3_set_rx_mode,
14075         .ndo_set_mac_address    = tg3_set_mac_addr,
14076         .ndo_do_ioctl           = tg3_ioctl,
14077         .ndo_tx_timeout         = tg3_tx_timeout,
14078         .ndo_change_mtu         = tg3_change_mtu,
14079         .ndo_fix_features       = tg3_fix_features,
14080         .ndo_set_features       = tg3_set_features,
14081 #ifdef CONFIG_NET_POLL_CONTROLLER
14082         .ndo_poll_controller    = tg3_poll_controller,
14083 #endif
14084 };
14085
14086 static void tg3_get_eeprom_size(struct tg3 *tp)
14087 {
14088         u32 cursize, val, magic;
14089
14090         tp->nvram_size = EEPROM_CHIP_SIZE;
14091
14092         if (tg3_nvram_read(tp, 0, &magic) != 0)
14093                 return;
14094
14095         if ((magic != TG3_EEPROM_MAGIC) &&
14096             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14097             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14098                 return;
14099
14100         /*
14101          * Size the chip by reading offsets at increasing powers of two.
14102          * When we encounter our validation signature, we know the addressing
14103          * has wrapped around, and thus have our chip size.
14104          */
14105         cursize = 0x10;
14106
14107         while (cursize < tp->nvram_size) {
14108                 if (tg3_nvram_read(tp, cursize, &val) != 0)
14109                         return;
14110
14111                 if (val == magic)
14112                         break;
14113
14114                 cursize <<= 1;
14115         }
14116
14117         tp->nvram_size = cursize;
14118 }
14119
14120 static void tg3_get_nvram_size(struct tg3 *tp)
14121 {
14122         u32 val;
14123
14124         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14125                 return;
14126
14127         /* Selfboot format */
14128         if (val != TG3_EEPROM_MAGIC) {
14129                 tg3_get_eeprom_size(tp);
14130                 return;
14131         }
14132
14133         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14134                 if (val != 0) {
14135                         /* This is confusing.  We want to operate on the
14136                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14137                          * call will read from NVRAM and byteswap the data
14138                          * according to the byteswapping settings for all
14139                          * other register accesses.  This ensures the data we
14140                          * want will always reside in the lower 16-bits.
14141                          * However, the data in NVRAM is in LE format, which
14142                          * means the data from the NVRAM read will always be
14143                          * opposite the endianness of the CPU.  The 16-bit
14144                          * byteswap then brings the data to CPU endianness.
14145                          */
14146                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14147                         return;
14148                 }
14149         }
14150         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14151 }
14152
14153 static void tg3_get_nvram_info(struct tg3 *tp)
14154 {
14155         u32 nvcfg1;
14156
14157         nvcfg1 = tr32(NVRAM_CFG1);
14158         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14159                 tg3_flag_set(tp, FLASH);
14160         } else {
14161                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14162                 tw32(NVRAM_CFG1, nvcfg1);
14163         }
14164
14165         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14166             tg3_flag(tp, 5780_CLASS)) {
14167                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14168                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14169                         tp->nvram_jedecnum = JEDEC_ATMEL;
14170                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14171                         tg3_flag_set(tp, NVRAM_BUFFERED);
14172                         break;
14173                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14174                         tp->nvram_jedecnum = JEDEC_ATMEL;
14175                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14176                         break;
14177                 case FLASH_VENDOR_ATMEL_EEPROM:
14178                         tp->nvram_jedecnum = JEDEC_ATMEL;
14179                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14180                         tg3_flag_set(tp, NVRAM_BUFFERED);
14181                         break;
14182                 case FLASH_VENDOR_ST:
14183                         tp->nvram_jedecnum = JEDEC_ST;
14184                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14185                         tg3_flag_set(tp, NVRAM_BUFFERED);
14186                         break;
14187                 case FLASH_VENDOR_SAIFUN:
14188                         tp->nvram_jedecnum = JEDEC_SAIFUN;
14189                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14190                         break;
14191                 case FLASH_VENDOR_SST_SMALL:
14192                 case FLASH_VENDOR_SST_LARGE:
14193                         tp->nvram_jedecnum = JEDEC_SST;
14194                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14195                         break;
14196                 }
14197         } else {
14198                 tp->nvram_jedecnum = JEDEC_ATMEL;
14199                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14200                 tg3_flag_set(tp, NVRAM_BUFFERED);
14201         }
14202 }
14203
14204 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14205 {
14206         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14207         case FLASH_5752PAGE_SIZE_256:
14208                 tp->nvram_pagesize = 256;
14209                 break;
14210         case FLASH_5752PAGE_SIZE_512:
14211                 tp->nvram_pagesize = 512;
14212                 break;
14213         case FLASH_5752PAGE_SIZE_1K:
14214                 tp->nvram_pagesize = 1024;
14215                 break;
14216         case FLASH_5752PAGE_SIZE_2K:
14217                 tp->nvram_pagesize = 2048;
14218                 break;
14219         case FLASH_5752PAGE_SIZE_4K:
14220                 tp->nvram_pagesize = 4096;
14221                 break;
14222         case FLASH_5752PAGE_SIZE_264:
14223                 tp->nvram_pagesize = 264;
14224                 break;
14225         case FLASH_5752PAGE_SIZE_528:
14226                 tp->nvram_pagesize = 528;
14227                 break;
14228         }
14229 }
14230
14231 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14232 {
14233         u32 nvcfg1;
14234
14235         nvcfg1 = tr32(NVRAM_CFG1);
14236
14237         /* NVRAM protection for TPM */
14238         if (nvcfg1 & (1 << 27))
14239                 tg3_flag_set(tp, PROTECTED_NVRAM);
14240
14241         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14242         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14243         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14244                 tp->nvram_jedecnum = JEDEC_ATMEL;
14245                 tg3_flag_set(tp, NVRAM_BUFFERED);
14246                 break;
14247         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14248                 tp->nvram_jedecnum = JEDEC_ATMEL;
14249                 tg3_flag_set(tp, NVRAM_BUFFERED);
14250                 tg3_flag_set(tp, FLASH);
14251                 break;
14252         case FLASH_5752VENDOR_ST_M45PE10:
14253         case FLASH_5752VENDOR_ST_M45PE20:
14254         case FLASH_5752VENDOR_ST_M45PE40:
14255                 tp->nvram_jedecnum = JEDEC_ST;
14256                 tg3_flag_set(tp, NVRAM_BUFFERED);
14257                 tg3_flag_set(tp, FLASH);
14258                 break;
14259         }
14260
14261         if (tg3_flag(tp, FLASH)) {
14262                 tg3_nvram_get_pagesize(tp, nvcfg1);
14263         } else {
14264                 /* For eeprom, set pagesize to maximum eeprom size */
14265                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14266
14267                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14268                 tw32(NVRAM_CFG1, nvcfg1);
14269         }
14270 }
14271
14272 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14273 {
14274         u32 nvcfg1, protect = 0;
14275
14276         nvcfg1 = tr32(NVRAM_CFG1);
14277
14278         /* NVRAM protection for TPM */
14279         if (nvcfg1 & (1 << 27)) {
14280                 tg3_flag_set(tp, PROTECTED_NVRAM);
14281                 protect = 1;
14282         }
14283
14284         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14285         switch (nvcfg1) {
14286         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14287         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14288         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14289         case FLASH_5755VENDOR_ATMEL_FLASH_5:
14290                 tp->nvram_jedecnum = JEDEC_ATMEL;
14291                 tg3_flag_set(tp, NVRAM_BUFFERED);
14292                 tg3_flag_set(tp, FLASH);
14293                 tp->nvram_pagesize = 264;
14294                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14295                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14296                         tp->nvram_size = (protect ? 0x3e200 :
14297                                           TG3_NVRAM_SIZE_512KB);
14298                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14299                         tp->nvram_size = (protect ? 0x1f200 :
14300                                           TG3_NVRAM_SIZE_256KB);
14301                 else
14302                         tp->nvram_size = (protect ? 0x1f200 :
14303                                           TG3_NVRAM_SIZE_128KB);
14304                 break;
14305         case FLASH_5752VENDOR_ST_M45PE10:
14306         case FLASH_5752VENDOR_ST_M45PE20:
14307         case FLASH_5752VENDOR_ST_M45PE40:
14308                 tp->nvram_jedecnum = JEDEC_ST;
14309                 tg3_flag_set(tp, NVRAM_BUFFERED);
14310                 tg3_flag_set(tp, FLASH);
14311                 tp->nvram_pagesize = 256;
14312                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14313                         tp->nvram_size = (protect ?
14314                                           TG3_NVRAM_SIZE_64KB :
14315                                           TG3_NVRAM_SIZE_128KB);
14316                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14317                         tp->nvram_size = (protect ?
14318                                           TG3_NVRAM_SIZE_64KB :
14319                                           TG3_NVRAM_SIZE_256KB);
14320                 else
14321                         tp->nvram_size = (protect ?
14322                                           TG3_NVRAM_SIZE_128KB :
14323                                           TG3_NVRAM_SIZE_512KB);
14324                 break;
14325         }
14326 }
14327
14328 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14329 {
14330         u32 nvcfg1;
14331
14332         nvcfg1 = tr32(NVRAM_CFG1);
14333
14334         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14335         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14336         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14337         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14338         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14339                 tp->nvram_jedecnum = JEDEC_ATMEL;
14340                 tg3_flag_set(tp, NVRAM_BUFFERED);
14341                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14342
14343                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14344                 tw32(NVRAM_CFG1, nvcfg1);
14345                 break;
14346         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14347         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14348         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14349         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14350                 tp->nvram_jedecnum = JEDEC_ATMEL;
14351                 tg3_flag_set(tp, NVRAM_BUFFERED);
14352                 tg3_flag_set(tp, FLASH);
14353                 tp->nvram_pagesize = 264;
14354                 break;
14355         case FLASH_5752VENDOR_ST_M45PE10:
14356         case FLASH_5752VENDOR_ST_M45PE20:
14357         case FLASH_5752VENDOR_ST_M45PE40:
14358                 tp->nvram_jedecnum = JEDEC_ST;
14359                 tg3_flag_set(tp, NVRAM_BUFFERED);
14360                 tg3_flag_set(tp, FLASH);
14361                 tp->nvram_pagesize = 256;
14362                 break;
14363         }
14364 }
14365
14366 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14367 {
14368         u32 nvcfg1, protect = 0;
14369
14370         nvcfg1 = tr32(NVRAM_CFG1);
14371
14372         /* NVRAM protection for TPM */
14373         if (nvcfg1 & (1 << 27)) {
14374                 tg3_flag_set(tp, PROTECTED_NVRAM);
14375                 protect = 1;
14376         }
14377
14378         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14379         switch (nvcfg1) {
14380         case FLASH_5761VENDOR_ATMEL_ADB021D:
14381         case FLASH_5761VENDOR_ATMEL_ADB041D:
14382         case FLASH_5761VENDOR_ATMEL_ADB081D:
14383         case FLASH_5761VENDOR_ATMEL_ADB161D:
14384         case FLASH_5761VENDOR_ATMEL_MDB021D:
14385         case FLASH_5761VENDOR_ATMEL_MDB041D:
14386         case FLASH_5761VENDOR_ATMEL_MDB081D:
14387         case FLASH_5761VENDOR_ATMEL_MDB161D:
14388                 tp->nvram_jedecnum = JEDEC_ATMEL;
14389                 tg3_flag_set(tp, NVRAM_BUFFERED);
14390                 tg3_flag_set(tp, FLASH);
14391                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14392                 tp->nvram_pagesize = 256;
14393                 break;
14394         case FLASH_5761VENDOR_ST_A_M45PE20:
14395         case FLASH_5761VENDOR_ST_A_M45PE40:
14396         case FLASH_5761VENDOR_ST_A_M45PE80:
14397         case FLASH_5761VENDOR_ST_A_M45PE16:
14398         case FLASH_5761VENDOR_ST_M_M45PE20:
14399         case FLASH_5761VENDOR_ST_M_M45PE40:
14400         case FLASH_5761VENDOR_ST_M_M45PE80:
14401         case FLASH_5761VENDOR_ST_M_M45PE16:
14402                 tp->nvram_jedecnum = JEDEC_ST;
14403                 tg3_flag_set(tp, NVRAM_BUFFERED);
14404                 tg3_flag_set(tp, FLASH);
14405                 tp->nvram_pagesize = 256;
14406                 break;
14407         }
14408
14409         if (protect) {
14410                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14411         } else {
14412                 switch (nvcfg1) {
14413                 case FLASH_5761VENDOR_ATMEL_ADB161D:
14414                 case FLASH_5761VENDOR_ATMEL_MDB161D:
14415                 case FLASH_5761VENDOR_ST_A_M45PE16:
14416                 case FLASH_5761VENDOR_ST_M_M45PE16:
14417                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14418                         break;
14419                 case FLASH_5761VENDOR_ATMEL_ADB081D:
14420                 case FLASH_5761VENDOR_ATMEL_MDB081D:
14421                 case FLASH_5761VENDOR_ST_A_M45PE80:
14422                 case FLASH_5761VENDOR_ST_M_M45PE80:
14423                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14424                         break;
14425                 case FLASH_5761VENDOR_ATMEL_ADB041D:
14426                 case FLASH_5761VENDOR_ATMEL_MDB041D:
14427                 case FLASH_5761VENDOR_ST_A_M45PE40:
14428                 case FLASH_5761VENDOR_ST_M_M45PE40:
14429                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14430                         break;
14431                 case FLASH_5761VENDOR_ATMEL_ADB021D:
14432                 case FLASH_5761VENDOR_ATMEL_MDB021D:
14433                 case FLASH_5761VENDOR_ST_A_M45PE20:
14434                 case FLASH_5761VENDOR_ST_M_M45PE20:
14435                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14436                         break;
14437                 }
14438         }
14439 }
14440
14441 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14442 {
14443         tp->nvram_jedecnum = JEDEC_ATMEL;
14444         tg3_flag_set(tp, NVRAM_BUFFERED);
14445         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14446 }
14447
14448 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14449 {
14450         u32 nvcfg1;
14451
14452         nvcfg1 = tr32(NVRAM_CFG1);
14453
14454         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14455         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14456         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14457                 tp->nvram_jedecnum = JEDEC_ATMEL;
14458                 tg3_flag_set(tp, NVRAM_BUFFERED);
14459                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14460
14461                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14462                 tw32(NVRAM_CFG1, nvcfg1);
14463                 return;
14464         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14465         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14466         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14467         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14468         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14469         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14470         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14471                 tp->nvram_jedecnum = JEDEC_ATMEL;
14472                 tg3_flag_set(tp, NVRAM_BUFFERED);
14473                 tg3_flag_set(tp, FLASH);
14474
14475                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14476                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14477                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14478                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14479                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14480                         break;
14481                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14482                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14483                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14484                         break;
14485                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14486                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14487                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14488                         break;
14489                 }
14490                 break;
14491         case FLASH_5752VENDOR_ST_M45PE10:
14492         case FLASH_5752VENDOR_ST_M45PE20:
14493         case FLASH_5752VENDOR_ST_M45PE40:
14494                 tp->nvram_jedecnum = JEDEC_ST;
14495                 tg3_flag_set(tp, NVRAM_BUFFERED);
14496                 tg3_flag_set(tp, FLASH);
14497
14498                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14499                 case FLASH_5752VENDOR_ST_M45PE10:
14500                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14501                         break;
14502                 case FLASH_5752VENDOR_ST_M45PE20:
14503                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14504                         break;
14505                 case FLASH_5752VENDOR_ST_M45PE40:
14506                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14507                         break;
14508                 }
14509                 break;
14510         default:
14511                 tg3_flag_set(tp, NO_NVRAM);
14512                 return;
14513         }
14514
14515         tg3_nvram_get_pagesize(tp, nvcfg1);
14516         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14517                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14518 }
14519
14520
14521 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14522 {
14523         u32 nvcfg1;
14524
14525         nvcfg1 = tr32(NVRAM_CFG1);
14526
14527         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14528         case FLASH_5717VENDOR_ATMEL_EEPROM:
14529         case FLASH_5717VENDOR_MICRO_EEPROM:
14530                 tp->nvram_jedecnum = JEDEC_ATMEL;
14531                 tg3_flag_set(tp, NVRAM_BUFFERED);
14532                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14533
14534                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14535                 tw32(NVRAM_CFG1, nvcfg1);
14536                 return;
14537         case FLASH_5717VENDOR_ATMEL_MDB011D:
14538         case FLASH_5717VENDOR_ATMEL_ADB011B:
14539         case FLASH_5717VENDOR_ATMEL_ADB011D:
14540         case FLASH_5717VENDOR_ATMEL_MDB021D:
14541         case FLASH_5717VENDOR_ATMEL_ADB021B:
14542         case FLASH_5717VENDOR_ATMEL_ADB021D:
14543         case FLASH_5717VENDOR_ATMEL_45USPT:
14544                 tp->nvram_jedecnum = JEDEC_ATMEL;
14545                 tg3_flag_set(tp, NVRAM_BUFFERED);
14546                 tg3_flag_set(tp, FLASH);
14547
14548                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14549                 case FLASH_5717VENDOR_ATMEL_MDB021D:
14550                         /* Detect size with tg3_nvram_get_size() */
14551                         break;
14552                 case FLASH_5717VENDOR_ATMEL_ADB021B:
14553                 case FLASH_5717VENDOR_ATMEL_ADB021D:
14554                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14555                         break;
14556                 default:
14557                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14558                         break;
14559                 }
14560                 break;
14561         case FLASH_5717VENDOR_ST_M_M25PE10:
14562         case FLASH_5717VENDOR_ST_A_M25PE10:
14563         case FLASH_5717VENDOR_ST_M_M45PE10:
14564         case FLASH_5717VENDOR_ST_A_M45PE10:
14565         case FLASH_5717VENDOR_ST_M_M25PE20:
14566         case FLASH_5717VENDOR_ST_A_M25PE20:
14567         case FLASH_5717VENDOR_ST_M_M45PE20:
14568         case FLASH_5717VENDOR_ST_A_M45PE20:
14569         case FLASH_5717VENDOR_ST_25USPT:
14570         case FLASH_5717VENDOR_ST_45USPT:
14571                 tp->nvram_jedecnum = JEDEC_ST;
14572                 tg3_flag_set(tp, NVRAM_BUFFERED);
14573                 tg3_flag_set(tp, FLASH);
14574
14575                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14576                 case FLASH_5717VENDOR_ST_M_M25PE20:
14577                 case FLASH_5717VENDOR_ST_M_M45PE20:
14578                         /* Detect size with tg3_nvram_get_size() */
14579                         break;
14580                 case FLASH_5717VENDOR_ST_A_M25PE20:
14581                 case FLASH_5717VENDOR_ST_A_M45PE20:
14582                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14583                         break;
14584                 default:
14585                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14586                         break;
14587                 }
14588                 break;
14589         default:
14590                 tg3_flag_set(tp, NO_NVRAM);
14591                 return;
14592         }
14593
14594         tg3_nvram_get_pagesize(tp, nvcfg1);
14595         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14596                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14597 }
14598
14599 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14600 {
14601         u32 nvcfg1, nvmpinstrp;
14602
14603         nvcfg1 = tr32(NVRAM_CFG1);
14604         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14605
14606         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14607                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14608                         tg3_flag_set(tp, NO_NVRAM);
14609                         return;
14610                 }
14611
14612                 switch (nvmpinstrp) {
14613                 case FLASH_5762_EEPROM_HD:
14614                         nvmpinstrp = FLASH_5720_EEPROM_HD;
14615                         break;
14616                 case FLASH_5762_EEPROM_LD:
14617                         nvmpinstrp = FLASH_5720_EEPROM_LD;
14618                         break;
14619                 case FLASH_5720VENDOR_M_ST_M45PE20:
14620                         /* This pinstrap supports multiple sizes, so force it
14621                          * to read the actual size from location 0xf0.
14622                          */
14623                         nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14624                         break;
14625                 }
14626         }
14627
14628         switch (nvmpinstrp) {
14629         case FLASH_5720_EEPROM_HD:
14630         case FLASH_5720_EEPROM_LD:
14631                 tp->nvram_jedecnum = JEDEC_ATMEL;
14632                 tg3_flag_set(tp, NVRAM_BUFFERED);
14633
14634                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14635                 tw32(NVRAM_CFG1, nvcfg1);
14636                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14637                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14638                 else
14639                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14640                 return;
14641         case FLASH_5720VENDOR_M_ATMEL_DB011D:
14642         case FLASH_5720VENDOR_A_ATMEL_DB011B:
14643         case FLASH_5720VENDOR_A_ATMEL_DB011D:
14644         case FLASH_5720VENDOR_M_ATMEL_DB021D:
14645         case FLASH_5720VENDOR_A_ATMEL_DB021B:
14646         case FLASH_5720VENDOR_A_ATMEL_DB021D:
14647         case FLASH_5720VENDOR_M_ATMEL_DB041D:
14648         case FLASH_5720VENDOR_A_ATMEL_DB041B:
14649         case FLASH_5720VENDOR_A_ATMEL_DB041D:
14650         case FLASH_5720VENDOR_M_ATMEL_DB081D:
14651         case FLASH_5720VENDOR_A_ATMEL_DB081D:
14652         case FLASH_5720VENDOR_ATMEL_45USPT:
14653                 tp->nvram_jedecnum = JEDEC_ATMEL;
14654                 tg3_flag_set(tp, NVRAM_BUFFERED);
14655                 tg3_flag_set(tp, FLASH);
14656
14657                 switch (nvmpinstrp) {
14658                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14659                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14660                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14661                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14662                         break;
14663                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14664                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14665                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14666                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14667                         break;
14668                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14669                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14670                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14671                         break;
14672                 default:
14673                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14674                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14675                         break;
14676                 }
14677                 break;
14678         case FLASH_5720VENDOR_M_ST_M25PE10:
14679         case FLASH_5720VENDOR_M_ST_M45PE10:
14680         case FLASH_5720VENDOR_A_ST_M25PE10:
14681         case FLASH_5720VENDOR_A_ST_M45PE10:
14682         case FLASH_5720VENDOR_M_ST_M25PE20:
14683         case FLASH_5720VENDOR_M_ST_M45PE20:
14684         case FLASH_5720VENDOR_A_ST_M25PE20:
14685         case FLASH_5720VENDOR_A_ST_M45PE20:
14686         case FLASH_5720VENDOR_M_ST_M25PE40:
14687         case FLASH_5720VENDOR_M_ST_M45PE40:
14688         case FLASH_5720VENDOR_A_ST_M25PE40:
14689         case FLASH_5720VENDOR_A_ST_M45PE40:
14690         case FLASH_5720VENDOR_M_ST_M25PE80:
14691         case FLASH_5720VENDOR_M_ST_M45PE80:
14692         case FLASH_5720VENDOR_A_ST_M25PE80:
14693         case FLASH_5720VENDOR_A_ST_M45PE80:
14694         case FLASH_5720VENDOR_ST_25USPT:
14695         case FLASH_5720VENDOR_ST_45USPT:
14696                 tp->nvram_jedecnum = JEDEC_ST;
14697                 tg3_flag_set(tp, NVRAM_BUFFERED);
14698                 tg3_flag_set(tp, FLASH);
14699
14700                 switch (nvmpinstrp) {
14701                 case FLASH_5720VENDOR_M_ST_M25PE20:
14702                 case FLASH_5720VENDOR_M_ST_M45PE20:
14703                 case FLASH_5720VENDOR_A_ST_M25PE20:
14704                 case FLASH_5720VENDOR_A_ST_M45PE20:
14705                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14706                         break;
14707                 case FLASH_5720VENDOR_M_ST_M25PE40:
14708                 case FLASH_5720VENDOR_M_ST_M45PE40:
14709                 case FLASH_5720VENDOR_A_ST_M25PE40:
14710                 case FLASH_5720VENDOR_A_ST_M45PE40:
14711                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14712                         break;
14713                 case FLASH_5720VENDOR_M_ST_M25PE80:
14714                 case FLASH_5720VENDOR_M_ST_M45PE80:
14715                 case FLASH_5720VENDOR_A_ST_M25PE80:
14716                 case FLASH_5720VENDOR_A_ST_M45PE80:
14717                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14718                         break;
14719                 default:
14720                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14721                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14722                         break;
14723                 }
14724                 break;
14725         default:
14726                 tg3_flag_set(tp, NO_NVRAM);
14727                 return;
14728         }
14729
14730         tg3_nvram_get_pagesize(tp, nvcfg1);
14731         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14732                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14733
14734         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14735                 u32 val;
14736
14737                 if (tg3_nvram_read(tp, 0, &val))
14738                         return;
14739
14740                 if (val != TG3_EEPROM_MAGIC &&
14741                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14742                         tg3_flag_set(tp, NO_NVRAM);
14743         }
14744 }
14745
14746 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14747 static void tg3_nvram_init(struct tg3 *tp)
14748 {
14749         if (tg3_flag(tp, IS_SSB_CORE)) {
14750                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14751                 tg3_flag_clear(tp, NVRAM);
14752                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14753                 tg3_flag_set(tp, NO_NVRAM);
14754                 return;
14755         }
14756
14757         tw32_f(GRC_EEPROM_ADDR,
14758              (EEPROM_ADDR_FSM_RESET |
14759               (EEPROM_DEFAULT_CLOCK_PERIOD <<
14760                EEPROM_ADDR_CLKPERD_SHIFT)));
14761
14762         msleep(1);
14763
14764         /* Enable seeprom accesses. */
14765         tw32_f(GRC_LOCAL_CTRL,
14766              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14767         udelay(100);
14768
14769         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14770             tg3_asic_rev(tp) != ASIC_REV_5701) {
14771                 tg3_flag_set(tp, NVRAM);
14772
14773                 if (tg3_nvram_lock(tp)) {
14774                         netdev_warn(tp->dev,
14775                                     "Cannot get nvram lock, %s failed\n",
14776                                     __func__);
14777                         return;
14778                 }
14779                 tg3_enable_nvram_access(tp);
14780
14781                 tp->nvram_size = 0;
14782
14783                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14784                         tg3_get_5752_nvram_info(tp);
14785                 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14786                         tg3_get_5755_nvram_info(tp);
14787                 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14788                          tg3_asic_rev(tp) == ASIC_REV_5784 ||
14789                          tg3_asic_rev(tp) == ASIC_REV_5785)
14790                         tg3_get_5787_nvram_info(tp);
14791                 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14792                         tg3_get_5761_nvram_info(tp);
14793                 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14794                         tg3_get_5906_nvram_info(tp);
14795                 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14796                          tg3_flag(tp, 57765_CLASS))
14797                         tg3_get_57780_nvram_info(tp);
14798                 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14799                          tg3_asic_rev(tp) == ASIC_REV_5719)
14800                         tg3_get_5717_nvram_info(tp);
14801                 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14802                          tg3_asic_rev(tp) == ASIC_REV_5762)
14803                         tg3_get_5720_nvram_info(tp);
14804                 else
14805                         tg3_get_nvram_info(tp);
14806
14807                 if (tp->nvram_size == 0)
14808                         tg3_get_nvram_size(tp);
14809
14810                 tg3_disable_nvram_access(tp);
14811                 tg3_nvram_unlock(tp);
14812
14813         } else {
14814                 tg3_flag_clear(tp, NVRAM);
14815                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14816
14817                 tg3_get_eeprom_size(tp);
14818         }
14819 }
14820
14821 struct subsys_tbl_ent {
14822         u16 subsys_vendor, subsys_devid;
14823         u32 phy_id;
14824 };
14825
14826 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14827         /* Broadcom boards. */
14828         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14829           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14830         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14831           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14832         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14833           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14834         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14835           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14836         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14837           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14838         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14839           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14840         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14841           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14842         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14843           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14844         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14845           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14846         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14847           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14848         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14849           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14850
14851         /* 3com boards. */
14852         { TG3PCI_SUBVENDOR_ID_3COM,
14853           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14854         { TG3PCI_SUBVENDOR_ID_3COM,
14855           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14856         { TG3PCI_SUBVENDOR_ID_3COM,
14857           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14858         { TG3PCI_SUBVENDOR_ID_3COM,
14859           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14860         { TG3PCI_SUBVENDOR_ID_3COM,
14861           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14862
14863         /* DELL boards. */
14864         { TG3PCI_SUBVENDOR_ID_DELL,
14865           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14866         { TG3PCI_SUBVENDOR_ID_DELL,
14867           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14868         { TG3PCI_SUBVENDOR_ID_DELL,
14869           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14870         { TG3PCI_SUBVENDOR_ID_DELL,
14871           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14872
14873         /* Compaq boards. */
14874         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14875           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14876         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14877           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14878         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14879           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14880         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14881           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14882         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14883           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14884
14885         /* IBM boards. */
14886         { TG3PCI_SUBVENDOR_ID_IBM,
14887           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14888 };
14889
14890 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14891 {
14892         int i;
14893
14894         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14895                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14896                      tp->pdev->subsystem_vendor) &&
14897                     (subsys_id_to_phy_id[i].subsys_devid ==
14898                      tp->pdev->subsystem_device))
14899                         return &subsys_id_to_phy_id[i];
14900         }
14901         return NULL;
14902 }
14903
14904 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14905 {
14906         u32 val;
14907
14908         tp->phy_id = TG3_PHY_ID_INVALID;
14909         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14910
14911         /* Assume an onboard device and WOL capable by default.  */
14912         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14913         tg3_flag_set(tp, WOL_CAP);
14914
14915         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14916                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14917                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14918                         tg3_flag_set(tp, IS_NIC);
14919                 }
14920                 val = tr32(VCPU_CFGSHDW);
14921                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14922                         tg3_flag_set(tp, ASPM_WORKAROUND);
14923                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14924                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14925                         tg3_flag_set(tp, WOL_ENABLE);
14926                         device_set_wakeup_enable(&tp->pdev->dev, true);
14927                 }
14928                 goto done;
14929         }
14930
14931         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14932         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14933                 u32 nic_cfg, led_cfg;
14934                 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
14935                 u32 nic_phy_id, ver, eeprom_phy_id;
14936                 int eeprom_phy_serdes = 0;
14937
14938                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14939                 tp->nic_sram_data_cfg = nic_cfg;
14940
14941                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14942                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14943                 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14944                     tg3_asic_rev(tp) != ASIC_REV_5701 &&
14945                     tg3_asic_rev(tp) != ASIC_REV_5703 &&
14946                     (ver > 0) && (ver < 0x100))
14947                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14948
14949                 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14950                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14951
14952                 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14953                     tg3_asic_rev(tp) == ASIC_REV_5719 ||
14954                     tg3_asic_rev(tp) == ASIC_REV_5720)
14955                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
14956
14957                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14958                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14959                         eeprom_phy_serdes = 1;
14960
14961                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14962                 if (nic_phy_id != 0) {
14963                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14964                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14965
14966                         eeprom_phy_id  = (id1 >> 16) << 10;
14967                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
14968                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
14969                 } else
14970                         eeprom_phy_id = 0;
14971
14972                 tp->phy_id = eeprom_phy_id;
14973                 if (eeprom_phy_serdes) {
14974                         if (!tg3_flag(tp, 5705_PLUS))
14975                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14976                         else
14977                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14978                 }
14979
14980                 if (tg3_flag(tp, 5750_PLUS))
14981                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14982                                     SHASTA_EXT_LED_MODE_MASK);
14983                 else
14984                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14985
14986                 switch (led_cfg) {
14987                 default:
14988                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14989                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14990                         break;
14991
14992                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14993                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14994                         break;
14995
14996                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14997                         tp->led_ctrl = LED_CTRL_MODE_MAC;
14998
14999                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15000                          * read on some older 5700/5701 bootcode.
15001                          */
15002                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15003                             tg3_asic_rev(tp) == ASIC_REV_5701)
15004                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15005
15006                         break;
15007
15008                 case SHASTA_EXT_LED_SHARED:
15009                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
15010                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15011                             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15012                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15013                                                  LED_CTRL_MODE_PHY_2);
15014
15015                         if (tg3_flag(tp, 5717_PLUS) ||
15016                             tg3_asic_rev(tp) == ASIC_REV_5762)
15017                                 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15018                                                 LED_CTRL_BLINK_RATE_MASK;
15019
15020                         break;
15021
15022                 case SHASTA_EXT_LED_MAC:
15023                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15024                         break;
15025
15026                 case SHASTA_EXT_LED_COMBO:
15027                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
15028                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15029                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15030                                                  LED_CTRL_MODE_PHY_2);
15031                         break;
15032
15033                 }
15034
15035                 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15036                      tg3_asic_rev(tp) == ASIC_REV_5701) &&
15037                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15038                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15039
15040                 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15041                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15042
15043                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15044                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15045                         if ((tp->pdev->subsystem_vendor ==
15046                              PCI_VENDOR_ID_ARIMA) &&
15047                             (tp->pdev->subsystem_device == 0x205a ||
15048                              tp->pdev->subsystem_device == 0x2063))
15049                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15050                 } else {
15051                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15052                         tg3_flag_set(tp, IS_NIC);
15053                 }
15054
15055                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15056                         tg3_flag_set(tp, ENABLE_ASF);
15057                         if (tg3_flag(tp, 5750_PLUS))
15058                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15059                 }
15060
15061                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15062                     tg3_flag(tp, 5750_PLUS))
15063                         tg3_flag_set(tp, ENABLE_APE);
15064
15065                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15066                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15067                         tg3_flag_clear(tp, WOL_CAP);
15068
15069                 if (tg3_flag(tp, WOL_CAP) &&
15070                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15071                         tg3_flag_set(tp, WOL_ENABLE);
15072                         device_set_wakeup_enable(&tp->pdev->dev, true);
15073                 }
15074
15075                 if (cfg2 & (1 << 17))
15076                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15077
15078                 /* serdes signal pre-emphasis in register 0x590 set by */
15079                 /* bootcode if bit 18 is set */
15080                 if (cfg2 & (1 << 18))
15081                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15082
15083                 if ((tg3_flag(tp, 57765_PLUS) ||
15084                      (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15085                       tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15086                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15087                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15088
15089                 if (tg3_flag(tp, PCI_EXPRESS)) {
15090                         u32 cfg3;
15091
15092                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15093                         if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15094                             !tg3_flag(tp, 57765_PLUS) &&
15095                             (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15096                                 tg3_flag_set(tp, ASPM_WORKAROUND);
15097                         if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15098                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15099                         if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15100                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15101                 }
15102
15103                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15104                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15105                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15106                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15107                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15108                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15109
15110                 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15111                         tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15112         }
15113 done:
15114         if (tg3_flag(tp, WOL_CAP))
15115                 device_set_wakeup_enable(&tp->pdev->dev,
15116                                          tg3_flag(tp, WOL_ENABLE));
15117         else
15118                 device_set_wakeup_capable(&tp->pdev->dev, false);
15119 }
15120
15121 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15122 {
15123         int i, err;
15124         u32 val2, off = offset * 8;
15125
15126         err = tg3_nvram_lock(tp);
15127         if (err)
15128                 return err;
15129
15130         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15131         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15132                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15133         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15134         udelay(10);
15135
15136         for (i = 0; i < 100; i++) {
15137                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15138                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15139                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15140                         break;
15141                 }
15142                 udelay(10);
15143         }
15144
15145         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15146
15147         tg3_nvram_unlock(tp);
15148         if (val2 & APE_OTP_STATUS_CMD_DONE)
15149                 return 0;
15150
15151         return -EBUSY;
15152 }
15153
15154 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15155 {
15156         int i;
15157         u32 val;
15158
15159         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15160         tw32(OTP_CTRL, cmd);
15161
15162         /* Wait for up to 1 ms for command to execute. */
15163         for (i = 0; i < 100; i++) {
15164                 val = tr32(OTP_STATUS);
15165                 if (val & OTP_STATUS_CMD_DONE)
15166                         break;
15167                 udelay(10);
15168         }
15169
15170         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15171 }
15172
15173 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15174  * configuration is a 32-bit value that straddles the alignment boundary.
15175  * We do two 32-bit reads and then shift and merge the results.
15176  */
15177 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15178 {
15179         u32 bhalf_otp, thalf_otp;
15180
15181         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15182
15183         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15184                 return 0;
15185
15186         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15187
15188         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15189                 return 0;
15190
15191         thalf_otp = tr32(OTP_READ_DATA);
15192
15193         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15194
15195         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15196                 return 0;
15197
15198         bhalf_otp = tr32(OTP_READ_DATA);
15199
15200         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15201 }
15202
15203 static void tg3_phy_init_link_config(struct tg3 *tp)
15204 {
15205         u32 adv = ADVERTISED_Autoneg;
15206
15207         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15208                 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15209                         adv |= ADVERTISED_1000baseT_Half;
15210                 adv |= ADVERTISED_1000baseT_Full;
15211         }
15212
15213         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15214                 adv |= ADVERTISED_100baseT_Half |
15215                        ADVERTISED_100baseT_Full |
15216                        ADVERTISED_10baseT_Half |
15217                        ADVERTISED_10baseT_Full |
15218                        ADVERTISED_TP;
15219         else
15220                 adv |= ADVERTISED_FIBRE;
15221
15222         tp->link_config.advertising = adv;
15223         tp->link_config.speed = SPEED_UNKNOWN;
15224         tp->link_config.duplex = DUPLEX_UNKNOWN;
15225         tp->link_config.autoneg = AUTONEG_ENABLE;
15226         tp->link_config.active_speed = SPEED_UNKNOWN;
15227         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15228
15229         tp->old_link = -1;
15230 }
15231
15232 static int tg3_phy_probe(struct tg3 *tp)
15233 {
15234         u32 hw_phy_id_1, hw_phy_id_2;
15235         u32 hw_phy_id, hw_phy_id_masked;
15236         int err;
15237
15238         /* flow control autonegotiation is default behavior */
15239         tg3_flag_set(tp, PAUSE_AUTONEG);
15240         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15241
15242         if (tg3_flag(tp, ENABLE_APE)) {
15243                 switch (tp->pci_fn) {
15244                 case 0:
15245                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15246                         break;
15247                 case 1:
15248                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15249                         break;
15250                 case 2:
15251                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15252                         break;
15253                 case 3:
15254                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15255                         break;
15256                 }
15257         }
15258
15259         if (!tg3_flag(tp, ENABLE_ASF) &&
15260             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15261             !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15262                 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15263                                    TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15264
15265         if (tg3_flag(tp, USE_PHYLIB))
15266                 return tg3_phy_init(tp);
15267
15268         /* Reading the PHY ID register can conflict with ASF
15269          * firmware access to the PHY hardware.
15270          */
15271         err = 0;
15272         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15273                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15274         } else {
15275                 /* Now read the physical PHY_ID from the chip and verify
15276                  * that it is sane.  If it doesn't look good, we fall back
15277                  * to either the hard-coded table based PHY_ID and failing
15278                  * that the value found in the eeprom area.
15279                  */
15280                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15281                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15282
15283                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15284                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15285                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15286
15287                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15288         }
15289
15290         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15291                 tp->phy_id = hw_phy_id;
15292                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15293                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15294                 else
15295                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15296         } else {
15297                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15298                         /* Do nothing, phy ID already set up in
15299                          * tg3_get_eeprom_hw_cfg().
15300                          */
15301                 } else {
15302                         struct subsys_tbl_ent *p;
15303
15304                         /* No eeprom signature?  Try the hardcoded
15305                          * subsys device table.
15306                          */
15307                         p = tg3_lookup_by_subsys(tp);
15308                         if (p) {
15309                                 tp->phy_id = p->phy_id;
15310                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15311                                 /* For now we saw the IDs 0xbc050cd0,
15312                                  * 0xbc050f80 and 0xbc050c30 on devices
15313                                  * connected to an BCM4785 and there are
15314                                  * probably more. Just assume that the phy is
15315                                  * supported when it is connected to a SSB core
15316                                  * for now.
15317                                  */
15318                                 return -ENODEV;
15319                         }
15320
15321                         if (!tp->phy_id ||
15322                             tp->phy_id == TG3_PHY_ID_BCM8002)
15323                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15324                 }
15325         }
15326
15327         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15328             (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15329              tg3_asic_rev(tp) == ASIC_REV_5720 ||
15330              tg3_asic_rev(tp) == ASIC_REV_57766 ||
15331              tg3_asic_rev(tp) == ASIC_REV_5762 ||
15332              (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15333               tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15334              (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15335               tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15336                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15337
15338                 tp->eee.supported = SUPPORTED_100baseT_Full |
15339                                     SUPPORTED_1000baseT_Full;
15340                 tp->eee.advertised = ADVERTISED_100baseT_Full |
15341                                      ADVERTISED_1000baseT_Full;
15342                 tp->eee.eee_enabled = 1;
15343                 tp->eee.tx_lpi_enabled = 1;
15344                 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15345         }
15346
15347         tg3_phy_init_link_config(tp);
15348
15349         if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15350             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15351             !tg3_flag(tp, ENABLE_APE) &&
15352             !tg3_flag(tp, ENABLE_ASF)) {
15353                 u32 bmsr, dummy;
15354
15355                 tg3_readphy(tp, MII_BMSR, &bmsr);
15356                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15357                     (bmsr & BMSR_LSTATUS))
15358                         goto skip_phy_reset;
15359
15360                 err = tg3_phy_reset(tp);
15361                 if (err)
15362                         return err;
15363
15364                 tg3_phy_set_wirespeed(tp);
15365
15366                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15367                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15368                                             tp->link_config.flowctrl);
15369
15370                         tg3_writephy(tp, MII_BMCR,
15371                                      BMCR_ANENABLE | BMCR_ANRESTART);
15372                 }
15373         }
15374
15375 skip_phy_reset:
15376         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15377                 err = tg3_init_5401phy_dsp(tp);
15378                 if (err)
15379                         return err;
15380
15381                 err = tg3_init_5401phy_dsp(tp);
15382         }
15383
15384         return err;
15385 }
15386
15387 static void tg3_read_vpd(struct tg3 *tp)
15388 {
15389         u8 *vpd_data;
15390         unsigned int block_end, rosize, len;
15391         u32 vpdlen;
15392         int j, i = 0;
15393
15394         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15395         if (!vpd_data)
15396                 goto out_no_vpd;
15397
15398         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15399         if (i < 0)
15400                 goto out_not_found;
15401
15402         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15403         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15404         i += PCI_VPD_LRDT_TAG_SIZE;
15405
15406         if (block_end > vpdlen)
15407                 goto out_not_found;
15408
15409         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15410                                       PCI_VPD_RO_KEYWORD_MFR_ID);
15411         if (j > 0) {
15412                 len = pci_vpd_info_field_size(&vpd_data[j]);
15413
15414                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15415                 if (j + len > block_end || len != 4 ||
15416                     memcmp(&vpd_data[j], "1028", 4))
15417                         goto partno;
15418
15419                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15420                                               PCI_VPD_RO_KEYWORD_VENDOR0);
15421                 if (j < 0)
15422                         goto partno;
15423
15424                 len = pci_vpd_info_field_size(&vpd_data[j]);
15425
15426                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15427                 if (j + len > block_end)
15428                         goto partno;
15429
15430                 if (len >= sizeof(tp->fw_ver))
15431                         len = sizeof(tp->fw_ver) - 1;
15432                 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15433                 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15434                          &vpd_data[j]);
15435         }
15436
15437 partno:
15438         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15439                                       PCI_VPD_RO_KEYWORD_PARTNO);
15440         if (i < 0)
15441                 goto out_not_found;
15442
15443         len = pci_vpd_info_field_size(&vpd_data[i]);
15444
15445         i += PCI_VPD_INFO_FLD_HDR_SIZE;
15446         if (len > TG3_BPN_SIZE ||
15447             (len + i) > vpdlen)
15448                 goto out_not_found;
15449
15450         memcpy(tp->board_part_number, &vpd_data[i], len);
15451
15452 out_not_found:
15453         kfree(vpd_data);
15454         if (tp->board_part_number[0])
15455                 return;
15456
15457 out_no_vpd:
15458         if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15459                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15460                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15461                         strcpy(tp->board_part_number, "BCM5717");
15462                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15463                         strcpy(tp->board_part_number, "BCM5718");
15464                 else
15465                         goto nomatch;
15466         } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15467                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15468                         strcpy(tp->board_part_number, "BCM57780");
15469                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15470                         strcpy(tp->board_part_number, "BCM57760");
15471                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15472                         strcpy(tp->board_part_number, "BCM57790");
15473                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15474                         strcpy(tp->board_part_number, "BCM57788");
15475                 else
15476                         goto nomatch;
15477         } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15478                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15479                         strcpy(tp->board_part_number, "BCM57761");
15480                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15481                         strcpy(tp->board_part_number, "BCM57765");
15482                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15483                         strcpy(tp->board_part_number, "BCM57781");
15484                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15485                         strcpy(tp->board_part_number, "BCM57785");
15486                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15487                         strcpy(tp->board_part_number, "BCM57791");
15488                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15489                         strcpy(tp->board_part_number, "BCM57795");
15490                 else
15491                         goto nomatch;
15492         } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15493                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15494                         strcpy(tp->board_part_number, "BCM57762");
15495                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15496                         strcpy(tp->board_part_number, "BCM57766");
15497                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15498                         strcpy(tp->board_part_number, "BCM57782");
15499                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15500                         strcpy(tp->board_part_number, "BCM57786");
15501                 else
15502                         goto nomatch;
15503         } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15504                 strcpy(tp->board_part_number, "BCM95906");
15505         } else {
15506 nomatch:
15507                 strcpy(tp->board_part_number, "none");
15508         }
15509 }
15510
15511 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15512 {
15513         u32 val;
15514
15515         if (tg3_nvram_read(tp, offset, &val) ||
15516             (val & 0xfc000000) != 0x0c000000 ||
15517             tg3_nvram_read(tp, offset + 4, &val) ||
15518             val != 0)
15519                 return 0;
15520
15521         return 1;
15522 }
15523
15524 static void tg3_read_bc_ver(struct tg3 *tp)
15525 {
15526         u32 val, offset, start, ver_offset;
15527         int i, dst_off;
15528         bool newver = false;
15529
15530         if (tg3_nvram_read(tp, 0xc, &offset) ||
15531             tg3_nvram_read(tp, 0x4, &start))
15532                 return;
15533
15534         offset = tg3_nvram_logical_addr(tp, offset);
15535
15536         if (tg3_nvram_read(tp, offset, &val))
15537                 return;
15538
15539         if ((val & 0xfc000000) == 0x0c000000) {
15540                 if (tg3_nvram_read(tp, offset + 4, &val))
15541                         return;
15542
15543                 if (val == 0)
15544                         newver = true;
15545         }
15546
15547         dst_off = strlen(tp->fw_ver);
15548
15549         if (newver) {
15550                 if (TG3_VER_SIZE - dst_off < 16 ||
15551                     tg3_nvram_read(tp, offset + 8, &ver_offset))
15552                         return;
15553
15554                 offset = offset + ver_offset - start;
15555                 for (i = 0; i < 16; i += 4) {
15556                         __be32 v;
15557                         if (tg3_nvram_read_be32(tp, offset + i, &v))
15558                                 return;
15559
15560                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15561                 }
15562         } else {
15563                 u32 major, minor;
15564
15565                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15566                         return;
15567
15568                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15569                         TG3_NVM_BCVER_MAJSFT;
15570                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15571                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15572                          "v%d.%02d", major, minor);
15573         }
15574 }
15575
15576 static void tg3_read_hwsb_ver(struct tg3 *tp)
15577 {
15578         u32 val, major, minor;
15579
15580         /* Use native endian representation */
15581         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15582                 return;
15583
15584         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15585                 TG3_NVM_HWSB_CFG1_MAJSFT;
15586         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15587                 TG3_NVM_HWSB_CFG1_MINSFT;
15588
15589         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15590 }
15591
15592 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15593 {
15594         u32 offset, major, minor, build;
15595
15596         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15597
15598         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15599                 return;
15600
15601         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15602         case TG3_EEPROM_SB_REVISION_0:
15603                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15604                 break;
15605         case TG3_EEPROM_SB_REVISION_2:
15606                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15607                 break;
15608         case TG3_EEPROM_SB_REVISION_3:
15609                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15610                 break;
15611         case TG3_EEPROM_SB_REVISION_4:
15612                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15613                 break;
15614         case TG3_EEPROM_SB_REVISION_5:
15615                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15616                 break;
15617         case TG3_EEPROM_SB_REVISION_6:
15618                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15619                 break;
15620         default:
15621                 return;
15622         }
15623
15624         if (tg3_nvram_read(tp, offset, &val))
15625                 return;
15626
15627         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15628                 TG3_EEPROM_SB_EDH_BLD_SHFT;
15629         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15630                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15631         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15632
15633         if (minor > 99 || build > 26)
15634                 return;
15635
15636         offset = strlen(tp->fw_ver);
15637         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15638                  " v%d.%02d", major, minor);
15639
15640         if (build > 0) {
15641                 offset = strlen(tp->fw_ver);
15642                 if (offset < TG3_VER_SIZE - 1)
15643                         tp->fw_ver[offset] = 'a' + build - 1;
15644         }
15645 }
15646
15647 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15648 {
15649         u32 val, offset, start;
15650         int i, vlen;
15651
15652         for (offset = TG3_NVM_DIR_START;
15653              offset < TG3_NVM_DIR_END;
15654              offset += TG3_NVM_DIRENT_SIZE) {
15655                 if (tg3_nvram_read(tp, offset, &val))
15656                         return;
15657
15658                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15659                         break;
15660         }
15661
15662         if (offset == TG3_NVM_DIR_END)
15663                 return;
15664
15665         if (!tg3_flag(tp, 5705_PLUS))
15666                 start = 0x08000000;
15667         else if (tg3_nvram_read(tp, offset - 4, &start))
15668                 return;
15669
15670         if (tg3_nvram_read(tp, offset + 4, &offset) ||
15671             !tg3_fw_img_is_valid(tp, offset) ||
15672             tg3_nvram_read(tp, offset + 8, &val))
15673                 return;
15674
15675         offset += val - start;
15676
15677         vlen = strlen(tp->fw_ver);
15678
15679         tp->fw_ver[vlen++] = ',';
15680         tp->fw_ver[vlen++] = ' ';
15681
15682         for (i = 0; i < 4; i++) {
15683                 __be32 v;
15684                 if (tg3_nvram_read_be32(tp, offset, &v))
15685                         return;
15686
15687                 offset += sizeof(v);
15688
15689                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15690                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15691                         break;
15692                 }
15693
15694                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15695                 vlen += sizeof(v);
15696         }
15697 }
15698
15699 static void tg3_probe_ncsi(struct tg3 *tp)
15700 {
15701         u32 apedata;
15702
15703         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15704         if (apedata != APE_SEG_SIG_MAGIC)
15705                 return;
15706
15707         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15708         if (!(apedata & APE_FW_STATUS_READY))
15709                 return;
15710
15711         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15712                 tg3_flag_set(tp, APE_HAS_NCSI);
15713 }
15714
15715 static void tg3_read_dash_ver(struct tg3 *tp)
15716 {
15717         int vlen;
15718         u32 apedata;
15719         char *fwtype;
15720
15721         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15722
15723         if (tg3_flag(tp, APE_HAS_NCSI))
15724                 fwtype = "NCSI";
15725         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15726                 fwtype = "SMASH";
15727         else
15728                 fwtype = "DASH";
15729
15730         vlen = strlen(tp->fw_ver);
15731
15732         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15733                  fwtype,
15734                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15735                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15736                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15737                  (apedata & APE_FW_VERSION_BLDMSK));
15738 }
15739
15740 static void tg3_read_otp_ver(struct tg3 *tp)
15741 {
15742         u32 val, val2;
15743
15744         if (tg3_asic_rev(tp) != ASIC_REV_5762)
15745                 return;
15746
15747         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15748             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15749             TG3_OTP_MAGIC0_VALID(val)) {
15750                 u64 val64 = (u64) val << 32 | val2;
15751                 u32 ver = 0;
15752                 int i, vlen;
15753
15754                 for (i = 0; i < 7; i++) {
15755                         if ((val64 & 0xff) == 0)
15756                                 break;
15757                         ver = val64 & 0xff;
15758                         val64 >>= 8;
15759                 }
15760                 vlen = strlen(tp->fw_ver);
15761                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15762         }
15763 }
15764
15765 static void tg3_read_fw_ver(struct tg3 *tp)
15766 {
15767         u32 val;
15768         bool vpd_vers = false;
15769
15770         if (tp->fw_ver[0] != 0)
15771                 vpd_vers = true;
15772
15773         if (tg3_flag(tp, NO_NVRAM)) {
15774                 strcat(tp->fw_ver, "sb");
15775                 tg3_read_otp_ver(tp);
15776                 return;
15777         }
15778
15779         if (tg3_nvram_read(tp, 0, &val))
15780                 return;
15781
15782         if (val == TG3_EEPROM_MAGIC)
15783                 tg3_read_bc_ver(tp);
15784         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15785                 tg3_read_sb_ver(tp, val);
15786         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15787                 tg3_read_hwsb_ver(tp);
15788
15789         if (tg3_flag(tp, ENABLE_ASF)) {
15790                 if (tg3_flag(tp, ENABLE_APE)) {
15791                         tg3_probe_ncsi(tp);
15792                         if (!vpd_vers)
15793                                 tg3_read_dash_ver(tp);
15794                 } else if (!vpd_vers) {
15795                         tg3_read_mgmtfw_ver(tp);
15796                 }
15797         }
15798
15799         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15800 }
15801
15802 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15803 {
15804         if (tg3_flag(tp, LRG_PROD_RING_CAP))
15805                 return TG3_RX_RET_MAX_SIZE_5717;
15806         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15807                 return TG3_RX_RET_MAX_SIZE_5700;
15808         else
15809                 return TG3_RX_RET_MAX_SIZE_5705;
15810 }
15811
15812 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15813         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15814         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15815         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15816         { },
15817 };
15818
15819 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15820 {
15821         struct pci_dev *peer;
15822         unsigned int func, devnr = tp->pdev->devfn & ~7;
15823
15824         for (func = 0; func < 8; func++) {
15825                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15826                 if (peer && peer != tp->pdev)
15827                         break;
15828                 pci_dev_put(peer);
15829         }
15830         /* 5704 can be configured in single-port mode, set peer to
15831          * tp->pdev in that case.
15832          */
15833         if (!peer) {
15834                 peer = tp->pdev;
15835                 return peer;
15836         }
15837
15838         /*
15839          * We don't need to keep the refcount elevated; there's no way
15840          * to remove one half of this device without removing the other
15841          */
15842         pci_dev_put(peer);
15843
15844         return peer;
15845 }
15846
15847 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15848 {
15849         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15850         if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15851                 u32 reg;
15852
15853                 /* All devices that use the alternate
15854                  * ASIC REV location have a CPMU.
15855                  */
15856                 tg3_flag_set(tp, CPMU_PRESENT);
15857
15858                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15859                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15860                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15861                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15862                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15863                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
15864                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
15865                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15866                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15867                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
15868                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
15869                         reg = TG3PCI_GEN2_PRODID_ASICREV;
15870                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15871                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15872                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15873                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15874                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15875                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15876                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15877                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15878                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15879                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15880                         reg = TG3PCI_GEN15_PRODID_ASICREV;
15881                 else
15882                         reg = TG3PCI_PRODID_ASICREV;
15883
15884                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15885         }
15886
15887         /* Wrong chip ID in 5752 A0. This code can be removed later
15888          * as A0 is not in production.
15889          */
15890         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15891                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15892
15893         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15894                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15895
15896         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15897             tg3_asic_rev(tp) == ASIC_REV_5719 ||
15898             tg3_asic_rev(tp) == ASIC_REV_5720)
15899                 tg3_flag_set(tp, 5717_PLUS);
15900
15901         if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15902             tg3_asic_rev(tp) == ASIC_REV_57766)
15903                 tg3_flag_set(tp, 57765_CLASS);
15904
15905         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15906              tg3_asic_rev(tp) == ASIC_REV_5762)
15907                 tg3_flag_set(tp, 57765_PLUS);
15908
15909         /* Intentionally exclude ASIC_REV_5906 */
15910         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15911             tg3_asic_rev(tp) == ASIC_REV_5787 ||
15912             tg3_asic_rev(tp) == ASIC_REV_5784 ||
15913             tg3_asic_rev(tp) == ASIC_REV_5761 ||
15914             tg3_asic_rev(tp) == ASIC_REV_5785 ||
15915             tg3_asic_rev(tp) == ASIC_REV_57780 ||
15916             tg3_flag(tp, 57765_PLUS))
15917                 tg3_flag_set(tp, 5755_PLUS);
15918
15919         if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15920             tg3_asic_rev(tp) == ASIC_REV_5714)
15921                 tg3_flag_set(tp, 5780_CLASS);
15922
15923         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15924             tg3_asic_rev(tp) == ASIC_REV_5752 ||
15925             tg3_asic_rev(tp) == ASIC_REV_5906 ||
15926             tg3_flag(tp, 5755_PLUS) ||
15927             tg3_flag(tp, 5780_CLASS))
15928                 tg3_flag_set(tp, 5750_PLUS);
15929
15930         if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15931             tg3_flag(tp, 5750_PLUS))
15932                 tg3_flag_set(tp, 5705_PLUS);
15933 }
15934
15935 static bool tg3_10_100_only_device(struct tg3 *tp,
15936                                    const struct pci_device_id *ent)
15937 {
15938         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15939
15940         if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15941              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15942             (tp->phy_flags & TG3_PHYFLG_IS_FET))
15943                 return true;
15944
15945         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15946                 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15947                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15948                                 return true;
15949                 } else {
15950                         return true;
15951                 }
15952         }
15953
15954         return false;
15955 }
15956
15957 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15958 {
15959         u32 misc_ctrl_reg;
15960         u32 pci_state_reg, grc_misc_cfg;
15961         u32 val;
15962         u16 pci_cmd;
15963         int err;
15964
15965         /* Force memory write invalidate off.  If we leave it on,
15966          * then on 5700_BX chips we have to enable a workaround.
15967          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15968          * to match the cacheline size.  The Broadcom driver have this
15969          * workaround but turns MWI off all the times so never uses
15970          * it.  This seems to suggest that the workaround is insufficient.
15971          */
15972         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15973         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15974         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15975
15976         /* Important! -- Make sure register accesses are byteswapped
15977          * correctly.  Also, for those chips that require it, make
15978          * sure that indirect register accesses are enabled before
15979          * the first operation.
15980          */
15981         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15982                               &misc_ctrl_reg);
15983         tp->misc_host_ctrl |= (misc_ctrl_reg &
15984                                MISC_HOST_CTRL_CHIPREV);
15985         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15986                                tp->misc_host_ctrl);
15987
15988         tg3_detect_asic_rev(tp, misc_ctrl_reg);
15989
15990         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15991          * we need to disable memory and use config. cycles
15992          * only to access all registers. The 5702/03 chips
15993          * can mistakenly decode the special cycles from the
15994          * ICH chipsets as memory write cycles, causing corruption
15995          * of register and memory space. Only certain ICH bridges
15996          * will drive special cycles with non-zero data during the
15997          * address phase which can fall within the 5703's address
15998          * range. This is not an ICH bug as the PCI spec allows
15999          * non-zero address during special cycles. However, only
16000          * these ICH bridges are known to drive non-zero addresses
16001          * during special cycles.
16002          *
16003          * Since special cycles do not cross PCI bridges, we only
16004          * enable this workaround if the 5703 is on the secondary
16005          * bus of these ICH bridges.
16006          */
16007         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16008             (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16009                 static struct tg3_dev_id {
16010                         u32     vendor;
16011                         u32     device;
16012                         u32     rev;
16013                 } ich_chipsets[] = {
16014                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16015                           PCI_ANY_ID },
16016                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16017                           PCI_ANY_ID },
16018                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16019                           0xa },
16020                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16021                           PCI_ANY_ID },
16022                         { },
16023                 };
16024                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16025                 struct pci_dev *bridge = NULL;
16026
16027                 while (pci_id->vendor != 0) {
16028                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
16029                                                 bridge);
16030                         if (!bridge) {
16031                                 pci_id++;
16032                                 continue;
16033                         }
16034                         if (pci_id->rev != PCI_ANY_ID) {
16035                                 if (bridge->revision > pci_id->rev)
16036                                         continue;
16037                         }
16038                         if (bridge->subordinate &&
16039                             (bridge->subordinate->number ==
16040                              tp->pdev->bus->number)) {
16041                                 tg3_flag_set(tp, ICH_WORKAROUND);
16042                                 pci_dev_put(bridge);
16043                                 break;
16044                         }
16045                 }
16046         }
16047
16048         if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16049                 static struct tg3_dev_id {
16050                         u32     vendor;
16051                         u32     device;
16052                 } bridge_chipsets[] = {
16053                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16054                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16055                         { },
16056                 };
16057                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16058                 struct pci_dev *bridge = NULL;
16059
16060                 while (pci_id->vendor != 0) {
16061                         bridge = pci_get_device(pci_id->vendor,
16062                                                 pci_id->device,
16063                                                 bridge);
16064                         if (!bridge) {
16065                                 pci_id++;
16066                                 continue;
16067                         }
16068                         if (bridge->subordinate &&
16069                             (bridge->subordinate->number <=
16070                              tp->pdev->bus->number) &&
16071                             (bridge->subordinate->busn_res.end >=
16072                              tp->pdev->bus->number)) {
16073                                 tg3_flag_set(tp, 5701_DMA_BUG);
16074                                 pci_dev_put(bridge);
16075                                 break;
16076                         }
16077                 }
16078         }
16079
16080         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16081          * DMA addresses > 40-bit. This bridge may have other additional
16082          * 57xx devices behind it in some 4-port NIC designs for example.
16083          * Any tg3 device found behind the bridge will also need the 40-bit
16084          * DMA workaround.
16085          */
16086         if (tg3_flag(tp, 5780_CLASS)) {
16087                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16088                 tp->msi_cap = tp->pdev->msi_cap;
16089         } else {
16090                 struct pci_dev *bridge = NULL;
16091
16092                 do {
16093                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16094                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
16095                                                 bridge);
16096                         if (bridge && bridge->subordinate &&
16097                             (bridge->subordinate->number <=
16098                              tp->pdev->bus->number) &&
16099                             (bridge->subordinate->busn_res.end >=
16100                              tp->pdev->bus->number)) {
16101                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16102                                 pci_dev_put(bridge);
16103                                 break;
16104                         }
16105                 } while (bridge);
16106         }
16107
16108         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16109             tg3_asic_rev(tp) == ASIC_REV_5714)
16110                 tp->pdev_peer = tg3_find_peer(tp);
16111
16112         /* Determine TSO capabilities */
16113         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16114                 ; /* Do nothing. HW bug. */
16115         else if (tg3_flag(tp, 57765_PLUS))
16116                 tg3_flag_set(tp, HW_TSO_3);
16117         else if (tg3_flag(tp, 5755_PLUS) ||
16118                  tg3_asic_rev(tp) == ASIC_REV_5906)
16119                 tg3_flag_set(tp, HW_TSO_2);
16120         else if (tg3_flag(tp, 5750_PLUS)) {
16121                 tg3_flag_set(tp, HW_TSO_1);
16122                 tg3_flag_set(tp, TSO_BUG);
16123                 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16124                     tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16125                         tg3_flag_clear(tp, TSO_BUG);
16126         } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16127                    tg3_asic_rev(tp) != ASIC_REV_5701 &&
16128                    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16129                 tg3_flag_set(tp, FW_TSO);
16130                 tg3_flag_set(tp, TSO_BUG);
16131                 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16132                         tp->fw_needed = FIRMWARE_TG3TSO5;
16133                 else
16134                         tp->fw_needed = FIRMWARE_TG3TSO;
16135         }
16136
16137         /* Selectively allow TSO based on operating conditions */
16138         if (tg3_flag(tp, HW_TSO_1) ||
16139             tg3_flag(tp, HW_TSO_2) ||
16140             tg3_flag(tp, HW_TSO_3) ||
16141             tg3_flag(tp, FW_TSO)) {
16142                 /* For firmware TSO, assume ASF is disabled.
16143                  * We'll disable TSO later if we discover ASF
16144                  * is enabled in tg3_get_eeprom_hw_cfg().
16145                  */
16146                 tg3_flag_set(tp, TSO_CAPABLE);
16147         } else {
16148                 tg3_flag_clear(tp, TSO_CAPABLE);
16149                 tg3_flag_clear(tp, TSO_BUG);
16150                 tp->fw_needed = NULL;
16151         }
16152
16153         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16154                 tp->fw_needed = FIRMWARE_TG3;
16155
16156         if (tg3_asic_rev(tp) == ASIC_REV_57766)
16157                 tp->fw_needed = FIRMWARE_TG357766;
16158
16159         tp->irq_max = 1;
16160
16161         if (tg3_flag(tp, 5750_PLUS)) {
16162                 tg3_flag_set(tp, SUPPORT_MSI);
16163                 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16164                     tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16165                     (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16166                      tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16167                      tp->pdev_peer == tp->pdev))
16168                         tg3_flag_clear(tp, SUPPORT_MSI);
16169
16170                 if (tg3_flag(tp, 5755_PLUS) ||
16171                     tg3_asic_rev(tp) == ASIC_REV_5906) {
16172                         tg3_flag_set(tp, 1SHOT_MSI);
16173                 }
16174
16175                 if (tg3_flag(tp, 57765_PLUS)) {
16176                         tg3_flag_set(tp, SUPPORT_MSIX);
16177                         tp->irq_max = TG3_IRQ_MAX_VECS;
16178                 }
16179         }
16180
16181         tp->txq_max = 1;
16182         tp->rxq_max = 1;
16183         if (tp->irq_max > 1) {
16184                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16185                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16186
16187                 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16188                     tg3_asic_rev(tp) == ASIC_REV_5720)
16189                         tp->txq_max = tp->irq_max - 1;
16190         }
16191
16192         if (tg3_flag(tp, 5755_PLUS) ||
16193             tg3_asic_rev(tp) == ASIC_REV_5906)
16194                 tg3_flag_set(tp, SHORT_DMA_BUG);
16195
16196         if (tg3_asic_rev(tp) == ASIC_REV_5719)
16197                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16198
16199         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16200             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16201             tg3_asic_rev(tp) == ASIC_REV_5720 ||
16202             tg3_asic_rev(tp) == ASIC_REV_5762)
16203                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16204
16205         if (tg3_flag(tp, 57765_PLUS) &&
16206             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16207                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16208
16209         if (!tg3_flag(tp, 5705_PLUS) ||
16210             tg3_flag(tp, 5780_CLASS) ||
16211             tg3_flag(tp, USE_JUMBO_BDFLAG))
16212                 tg3_flag_set(tp, JUMBO_CAPABLE);
16213
16214         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16215                               &pci_state_reg);
16216
16217         if (pci_is_pcie(tp->pdev)) {
16218                 u16 lnkctl;
16219
16220                 tg3_flag_set(tp, PCI_EXPRESS);
16221
16222                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16223                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16224                         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16225                                 tg3_flag_clear(tp, HW_TSO_2);
16226                                 tg3_flag_clear(tp, TSO_CAPABLE);
16227                         }
16228                         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16229                             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16230                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16231                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16232                                 tg3_flag_set(tp, CLKREQ_BUG);
16233                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16234                         tg3_flag_set(tp, L1PLLPD_EN);
16235                 }
16236         } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16237                 /* BCM5785 devices are effectively PCIe devices, and should
16238                  * follow PCIe codepaths, but do not have a PCIe capabilities
16239                  * section.
16240                  */
16241                 tg3_flag_set(tp, PCI_EXPRESS);
16242         } else if (!tg3_flag(tp, 5705_PLUS) ||
16243                    tg3_flag(tp, 5780_CLASS)) {
16244                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16245                 if (!tp->pcix_cap) {
16246                         dev_err(&tp->pdev->dev,
16247                                 "Cannot find PCI-X capability, aborting\n");
16248                         return -EIO;
16249                 }
16250
16251                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16252                         tg3_flag_set(tp, PCIX_MODE);
16253         }
16254
16255         /* If we have an AMD 762 or VIA K8T800 chipset, write
16256          * reordering to the mailbox registers done by the host
16257          * controller can cause major troubles.  We read back from
16258          * every mailbox register write to force the writes to be
16259          * posted to the chip in order.
16260          */
16261         if (pci_dev_present(tg3_write_reorder_chipsets) &&
16262             !tg3_flag(tp, PCI_EXPRESS))
16263                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16264
16265         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16266                              &tp->pci_cacheline_sz);
16267         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16268                              &tp->pci_lat_timer);
16269         if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16270             tp->pci_lat_timer < 64) {
16271                 tp->pci_lat_timer = 64;
16272                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16273                                       tp->pci_lat_timer);
16274         }
16275
16276         /* Important! -- It is critical that the PCI-X hw workaround
16277          * situation is decided before the first MMIO register access.
16278          */
16279         if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16280                 /* 5700 BX chips need to have their TX producer index
16281                  * mailboxes written twice to workaround a bug.
16282                  */
16283                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16284
16285                 /* If we are in PCI-X mode, enable register write workaround.
16286                  *
16287                  * The workaround is to use indirect register accesses
16288                  * for all chip writes not to mailbox registers.
16289                  */
16290                 if (tg3_flag(tp, PCIX_MODE)) {
16291                         u32 pm_reg;
16292
16293                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16294
16295                         /* The chip can have it's power management PCI config
16296                          * space registers clobbered due to this bug.
16297                          * So explicitly force the chip into D0 here.
16298                          */
16299                         pci_read_config_dword(tp->pdev,
16300                                               tp->pdev->pm_cap + PCI_PM_CTRL,
16301                                               &pm_reg);
16302                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16303                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16304                         pci_write_config_dword(tp->pdev,
16305                                                tp->pdev->pm_cap + PCI_PM_CTRL,
16306                                                pm_reg);
16307
16308                         /* Also, force SERR#/PERR# in PCI command. */
16309                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16310                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16311                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16312                 }
16313         }
16314
16315         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16316                 tg3_flag_set(tp, PCI_HIGH_SPEED);
16317         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16318                 tg3_flag_set(tp, PCI_32BIT);
16319
16320         /* Chip-specific fixup from Broadcom driver */
16321         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16322             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16323                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16324                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16325         }
16326
16327         /* Default fast path register access methods */
16328         tp->read32 = tg3_read32;
16329         tp->write32 = tg3_write32;
16330         tp->read32_mbox = tg3_read32;
16331         tp->write32_mbox = tg3_write32;
16332         tp->write32_tx_mbox = tg3_write32;
16333         tp->write32_rx_mbox = tg3_write32;
16334
16335         /* Various workaround register access methods */
16336         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16337                 tp->write32 = tg3_write_indirect_reg32;
16338         else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16339                  (tg3_flag(tp, PCI_EXPRESS) &&
16340                   tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16341                 /*
16342                  * Back to back register writes can cause problems on these
16343                  * chips, the workaround is to read back all reg writes
16344                  * except those to mailbox regs.
16345                  *
16346                  * See tg3_write_indirect_reg32().
16347                  */
16348                 tp->write32 = tg3_write_flush_reg32;
16349         }
16350
16351         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16352                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16353                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16354                         tp->write32_rx_mbox = tg3_write_flush_reg32;
16355         }
16356
16357         if (tg3_flag(tp, ICH_WORKAROUND)) {
16358                 tp->read32 = tg3_read_indirect_reg32;
16359                 tp->write32 = tg3_write_indirect_reg32;
16360                 tp->read32_mbox = tg3_read_indirect_mbox;
16361                 tp->write32_mbox = tg3_write_indirect_mbox;
16362                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16363                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16364
16365                 iounmap(tp->regs);
16366                 tp->regs = NULL;
16367
16368                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16369                 pci_cmd &= ~PCI_COMMAND_MEMORY;
16370                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16371         }
16372         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16373                 tp->read32_mbox = tg3_read32_mbox_5906;
16374                 tp->write32_mbox = tg3_write32_mbox_5906;
16375                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16376                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16377         }
16378
16379         if (tp->write32 == tg3_write_indirect_reg32 ||
16380             (tg3_flag(tp, PCIX_MODE) &&
16381              (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16382               tg3_asic_rev(tp) == ASIC_REV_5701)))
16383                 tg3_flag_set(tp, SRAM_USE_CONFIG);
16384
16385         /* The memory arbiter has to be enabled in order for SRAM accesses
16386          * to succeed.  Normally on powerup the tg3 chip firmware will make
16387          * sure it is enabled, but other entities such as system netboot
16388          * code might disable it.
16389          */
16390         val = tr32(MEMARB_MODE);
16391         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16392
16393         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16394         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16395             tg3_flag(tp, 5780_CLASS)) {
16396                 if (tg3_flag(tp, PCIX_MODE)) {
16397                         pci_read_config_dword(tp->pdev,
16398                                               tp->pcix_cap + PCI_X_STATUS,
16399                                               &val);
16400                         tp->pci_fn = val & 0x7;
16401                 }
16402         } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16403                    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16404                    tg3_asic_rev(tp) == ASIC_REV_5720) {
16405                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16406                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16407                         val = tr32(TG3_CPMU_STATUS);
16408
16409                 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16410                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16411                 else
16412                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16413                                      TG3_CPMU_STATUS_FSHFT_5719;
16414         }
16415
16416         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16417                 tp->write32_tx_mbox = tg3_write_flush_reg32;
16418                 tp->write32_rx_mbox = tg3_write_flush_reg32;
16419         }
16420
16421         /* Get eeprom hw config before calling tg3_set_power_state().
16422          * In particular, the TG3_FLAG_IS_NIC flag must be
16423          * determined before calling tg3_set_power_state() so that
16424          * we know whether or not to switch out of Vaux power.
16425          * When the flag is set, it means that GPIO1 is used for eeprom
16426          * write protect and also implies that it is a LOM where GPIOs
16427          * are not used to switch power.
16428          */
16429         tg3_get_eeprom_hw_cfg(tp);
16430
16431         if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16432                 tg3_flag_clear(tp, TSO_CAPABLE);
16433                 tg3_flag_clear(tp, TSO_BUG);
16434                 tp->fw_needed = NULL;
16435         }
16436
16437         if (tg3_flag(tp, ENABLE_APE)) {
16438                 /* Allow reads and writes to the
16439                  * APE register and memory space.
16440                  */
16441                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16442                                  PCISTATE_ALLOW_APE_SHMEM_WR |
16443                                  PCISTATE_ALLOW_APE_PSPACE_WR;
16444                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16445                                        pci_state_reg);
16446
16447                 tg3_ape_lock_init(tp);
16448         }
16449
16450         /* Set up tp->grc_local_ctrl before calling
16451          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16452          * will bring 5700's external PHY out of reset.
16453          * It is also used as eeprom write protect on LOMs.
16454          */
16455         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16456         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16457             tg3_flag(tp, EEPROM_WRITE_PROT))
16458                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16459                                        GRC_LCLCTRL_GPIO_OUTPUT1);
16460         /* Unused GPIO3 must be driven as output on 5752 because there
16461          * are no pull-up resistors on unused GPIO pins.
16462          */
16463         else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16464                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16465
16466         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16467             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16468             tg3_flag(tp, 57765_CLASS))
16469                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16470
16471         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16472             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16473                 /* Turn off the debug UART. */
16474                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16475                 if (tg3_flag(tp, IS_NIC))
16476                         /* Keep VMain power. */
16477                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16478                                               GRC_LCLCTRL_GPIO_OUTPUT0;
16479         }
16480
16481         if (tg3_asic_rev(tp) == ASIC_REV_5762)
16482                 tp->grc_local_ctrl |=
16483                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16484
16485         /* Switch out of Vaux if it is a NIC */
16486         tg3_pwrsrc_switch_to_vmain(tp);
16487
16488         /* Derive initial jumbo mode from MTU assigned in
16489          * ether_setup() via the alloc_etherdev() call
16490          */
16491         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16492                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16493
16494         /* Determine WakeOnLan speed to use. */
16495         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16496             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16497             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16498             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16499                 tg3_flag_clear(tp, WOL_SPEED_100MB);
16500         } else {
16501                 tg3_flag_set(tp, WOL_SPEED_100MB);
16502         }
16503
16504         if (tg3_asic_rev(tp) == ASIC_REV_5906)
16505                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16506
16507         /* A few boards don't want Ethernet@WireSpeed phy feature */
16508         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16509             (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16510              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16511              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16512             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16513             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16514                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16515
16516         if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16517             tg3_chip_rev(tp) == CHIPREV_5704_AX)
16518                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16519         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16520                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16521
16522         if (tg3_flag(tp, 5705_PLUS) &&
16523             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16524             tg3_asic_rev(tp) != ASIC_REV_5785 &&
16525             tg3_asic_rev(tp) != ASIC_REV_57780 &&
16526             !tg3_flag(tp, 57765_PLUS)) {
16527                 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16528                     tg3_asic_rev(tp) == ASIC_REV_5787 ||
16529                     tg3_asic_rev(tp) == ASIC_REV_5784 ||
16530                     tg3_asic_rev(tp) == ASIC_REV_5761) {
16531                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16532                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16533                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16534                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16535                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16536                 } else
16537                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16538         }
16539
16540         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16541             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16542                 tp->phy_otp = tg3_read_otp_phycfg(tp);
16543                 if (tp->phy_otp == 0)
16544                         tp->phy_otp = TG3_OTP_DEFAULT;
16545         }
16546
16547         if (tg3_flag(tp, CPMU_PRESENT))
16548                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16549         else
16550                 tp->mi_mode = MAC_MI_MODE_BASE;
16551
16552         tp->coalesce_mode = 0;
16553         if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16554             tg3_chip_rev(tp) != CHIPREV_5700_BX)
16555                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16556
16557         /* Set these bits to enable statistics workaround. */
16558         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16559             tg3_asic_rev(tp) == ASIC_REV_5762 ||
16560             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16561             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16562                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16563                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16564         }
16565
16566         if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16567             tg3_asic_rev(tp) == ASIC_REV_57780)
16568                 tg3_flag_set(tp, USE_PHYLIB);
16569
16570         err = tg3_mdio_init(tp);
16571         if (err)
16572                 return err;
16573
16574         /* Initialize data/descriptor byte/word swapping. */
16575         val = tr32(GRC_MODE);
16576         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16577             tg3_asic_rev(tp) == ASIC_REV_5762)
16578                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16579                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
16580                         GRC_MODE_B2HRX_ENABLE |
16581                         GRC_MODE_HTX2B_ENABLE |
16582                         GRC_MODE_HOST_STACKUP);
16583         else
16584                 val &= GRC_MODE_HOST_STACKUP;
16585
16586         tw32(GRC_MODE, val | tp->grc_mode);
16587
16588         tg3_switch_clocks(tp);
16589
16590         /* Clear this out for sanity. */
16591         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16592
16593         /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16594         tw32(TG3PCI_REG_BASE_ADDR, 0);
16595
16596         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16597                               &pci_state_reg);
16598         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16599             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16600                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16601                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16602                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16603                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16604                         void __iomem *sram_base;
16605
16606                         /* Write some dummy words into the SRAM status block
16607                          * area, see if it reads back correctly.  If the return
16608                          * value is bad, force enable the PCIX workaround.
16609                          */
16610                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16611
16612                         writel(0x00000000, sram_base);
16613                         writel(0x00000000, sram_base + 4);
16614                         writel(0xffffffff, sram_base + 4);
16615                         if (readl(sram_base) != 0x00000000)
16616                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16617                 }
16618         }
16619
16620         udelay(50);
16621         tg3_nvram_init(tp);
16622
16623         /* If the device has an NVRAM, no need to load patch firmware */
16624         if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16625             !tg3_flag(tp, NO_NVRAM))
16626                 tp->fw_needed = NULL;
16627
16628         grc_misc_cfg = tr32(GRC_MISC_CFG);
16629         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16630
16631         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16632             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16633              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16634                 tg3_flag_set(tp, IS_5788);
16635
16636         if (!tg3_flag(tp, IS_5788) &&
16637             tg3_asic_rev(tp) != ASIC_REV_5700)
16638                 tg3_flag_set(tp, TAGGED_STATUS);
16639         if (tg3_flag(tp, TAGGED_STATUS)) {
16640                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16641                                       HOSTCC_MODE_CLRTICK_TXBD);
16642
16643                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16644                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16645                                        tp->misc_host_ctrl);
16646         }
16647
16648         /* Preserve the APE MAC_MODE bits */
16649         if (tg3_flag(tp, ENABLE_APE))
16650                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16651         else
16652                 tp->mac_mode = 0;
16653
16654         if (tg3_10_100_only_device(tp, ent))
16655                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16656
16657         err = tg3_phy_probe(tp);
16658         if (err) {
16659                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16660                 /* ... but do not return immediately ... */
16661                 tg3_mdio_fini(tp);
16662         }
16663
16664         tg3_read_vpd(tp);
16665         tg3_read_fw_ver(tp);
16666
16667         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16668                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16669         } else {
16670                 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16671                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16672                 else
16673                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16674         }
16675
16676         /* 5700 {AX,BX} chips have a broken status block link
16677          * change bit implementation, so we must use the
16678          * status register in those cases.
16679          */
16680         if (tg3_asic_rev(tp) == ASIC_REV_5700)
16681                 tg3_flag_set(tp, USE_LINKCHG_REG);
16682         else
16683                 tg3_flag_clear(tp, USE_LINKCHG_REG);
16684
16685         /* The led_ctrl is set during tg3_phy_probe, here we might
16686          * have to force the link status polling mechanism based
16687          * upon subsystem IDs.
16688          */
16689         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16690             tg3_asic_rev(tp) == ASIC_REV_5701 &&
16691             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16692                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16693                 tg3_flag_set(tp, USE_LINKCHG_REG);
16694         }
16695
16696         /* For all SERDES we poll the MAC status register. */
16697         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16698                 tg3_flag_set(tp, POLL_SERDES);
16699         else
16700                 tg3_flag_clear(tp, POLL_SERDES);
16701
16702         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16703         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16704         if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16705             tg3_flag(tp, PCIX_MODE)) {
16706                 tp->rx_offset = NET_SKB_PAD;
16707 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16708                 tp->rx_copy_thresh = ~(u16)0;
16709 #endif
16710         }
16711
16712         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16713         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16714         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16715
16716         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16717
16718         /* Increment the rx prod index on the rx std ring by at most
16719          * 8 for these chips to workaround hw errata.
16720          */
16721         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16722             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16723             tg3_asic_rev(tp) == ASIC_REV_5755)
16724                 tp->rx_std_max_post = 8;
16725
16726         if (tg3_flag(tp, ASPM_WORKAROUND))
16727                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16728                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
16729
16730         return err;
16731 }
16732
16733 #ifdef CONFIG_SPARC
16734 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16735 {
16736         struct net_device *dev = tp->dev;
16737         struct pci_dev *pdev = tp->pdev;
16738         struct device_node *dp = pci_device_to_OF_node(pdev);
16739         const unsigned char *addr;
16740         int len;
16741
16742         addr = of_get_property(dp, "local-mac-address", &len);
16743         if (addr && len == ETH_ALEN) {
16744                 memcpy(dev->dev_addr, addr, ETH_ALEN);
16745                 return 0;
16746         }
16747         return -ENODEV;
16748 }
16749
16750 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16751 {
16752         struct net_device *dev = tp->dev;
16753
16754         memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
16755         return 0;
16756 }
16757 #endif
16758
16759 static int tg3_get_device_address(struct tg3 *tp)
16760 {
16761         struct net_device *dev = tp->dev;
16762         u32 hi, lo, mac_offset;
16763         int addr_ok = 0;
16764         int err;
16765
16766 #ifdef CONFIG_SPARC
16767         if (!tg3_get_macaddr_sparc(tp))
16768                 return 0;
16769 #endif
16770
16771         if (tg3_flag(tp, IS_SSB_CORE)) {
16772                 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16773                 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16774                         return 0;
16775         }
16776
16777         mac_offset = 0x7c;
16778         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16779             tg3_flag(tp, 5780_CLASS)) {
16780                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16781                         mac_offset = 0xcc;
16782                 if (tg3_nvram_lock(tp))
16783                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16784                 else
16785                         tg3_nvram_unlock(tp);
16786         } else if (tg3_flag(tp, 5717_PLUS)) {
16787                 if (tp->pci_fn & 1)
16788                         mac_offset = 0xcc;
16789                 if (tp->pci_fn > 1)
16790                         mac_offset += 0x18c;
16791         } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16792                 mac_offset = 0x10;
16793
16794         /* First try to get it from MAC address mailbox. */
16795         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16796         if ((hi >> 16) == 0x484b) {
16797                 dev->dev_addr[0] = (hi >>  8) & 0xff;
16798                 dev->dev_addr[1] = (hi >>  0) & 0xff;
16799
16800                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16801                 dev->dev_addr[2] = (lo >> 24) & 0xff;
16802                 dev->dev_addr[3] = (lo >> 16) & 0xff;
16803                 dev->dev_addr[4] = (lo >>  8) & 0xff;
16804                 dev->dev_addr[5] = (lo >>  0) & 0xff;
16805
16806                 /* Some old bootcode may report a 0 MAC address in SRAM */
16807                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16808         }
16809         if (!addr_ok) {
16810                 /* Next, try NVRAM. */
16811                 if (!tg3_flag(tp, NO_NVRAM) &&
16812                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16813                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16814                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16815                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16816                 }
16817                 /* Finally just fetch it out of the MAC control regs. */
16818                 else {
16819                         hi = tr32(MAC_ADDR_0_HIGH);
16820                         lo = tr32(MAC_ADDR_0_LOW);
16821
16822                         dev->dev_addr[5] = lo & 0xff;
16823                         dev->dev_addr[4] = (lo >> 8) & 0xff;
16824                         dev->dev_addr[3] = (lo >> 16) & 0xff;
16825                         dev->dev_addr[2] = (lo >> 24) & 0xff;
16826                         dev->dev_addr[1] = hi & 0xff;
16827                         dev->dev_addr[0] = (hi >> 8) & 0xff;
16828                 }
16829         }
16830
16831         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16832 #ifdef CONFIG_SPARC
16833                 if (!tg3_get_default_macaddr_sparc(tp))
16834                         return 0;
16835 #endif
16836                 return -EINVAL;
16837         }
16838         return 0;
16839 }
16840
16841 #define BOUNDARY_SINGLE_CACHELINE       1
16842 #define BOUNDARY_MULTI_CACHELINE        2
16843
16844 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16845 {
16846         int cacheline_size;
16847         u8 byte;
16848         int goal;
16849
16850         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16851         if (byte == 0)
16852                 cacheline_size = 1024;
16853         else
16854                 cacheline_size = (int) byte * 4;
16855
16856         /* On 5703 and later chips, the boundary bits have no
16857          * effect.
16858          */
16859         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16860             tg3_asic_rev(tp) != ASIC_REV_5701 &&
16861             !tg3_flag(tp, PCI_EXPRESS))
16862                 goto out;
16863
16864 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16865         goal = BOUNDARY_MULTI_CACHELINE;
16866 #else
16867 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16868         goal = BOUNDARY_SINGLE_CACHELINE;
16869 #else
16870         goal = 0;
16871 #endif
16872 #endif
16873
16874         if (tg3_flag(tp, 57765_PLUS)) {
16875                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16876                 goto out;
16877         }
16878
16879         if (!goal)
16880                 goto out;
16881
16882         /* PCI controllers on most RISC systems tend to disconnect
16883          * when a device tries to burst across a cache-line boundary.
16884          * Therefore, letting tg3 do so just wastes PCI bandwidth.
16885          *
16886          * Unfortunately, for PCI-E there are only limited
16887          * write-side controls for this, and thus for reads
16888          * we will still get the disconnects.  We'll also waste
16889          * these PCI cycles for both read and write for chips
16890          * other than 5700 and 5701 which do not implement the
16891          * boundary bits.
16892          */
16893         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16894                 switch (cacheline_size) {
16895                 case 16:
16896                 case 32:
16897                 case 64:
16898                 case 128:
16899                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16900                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16901                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16902                         } else {
16903                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16904                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16905                         }
16906                         break;
16907
16908                 case 256:
16909                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16910                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16911                         break;
16912
16913                 default:
16914                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16915                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16916                         break;
16917                 }
16918         } else if (tg3_flag(tp, PCI_EXPRESS)) {
16919                 switch (cacheline_size) {
16920                 case 16:
16921                 case 32:
16922                 case 64:
16923                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16924                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16925                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16926                                 break;
16927                         }
16928                         /* fallthrough */
16929                 case 128:
16930                 default:
16931                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16932                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16933                         break;
16934                 }
16935         } else {
16936                 switch (cacheline_size) {
16937                 case 16:
16938                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16939                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16940                                         DMA_RWCTRL_WRITE_BNDRY_16);
16941                                 break;
16942                         }
16943                         /* fallthrough */
16944                 case 32:
16945                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16946                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16947                                         DMA_RWCTRL_WRITE_BNDRY_32);
16948                                 break;
16949                         }
16950                         /* fallthrough */
16951                 case 64:
16952                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16953                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16954                                         DMA_RWCTRL_WRITE_BNDRY_64);
16955                                 break;
16956                         }
16957                         /* fallthrough */
16958                 case 128:
16959                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16960                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16961                                         DMA_RWCTRL_WRITE_BNDRY_128);
16962                                 break;
16963                         }
16964                         /* fallthrough */
16965                 case 256:
16966                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
16967                                 DMA_RWCTRL_WRITE_BNDRY_256);
16968                         break;
16969                 case 512:
16970                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
16971                                 DMA_RWCTRL_WRITE_BNDRY_512);
16972                         break;
16973                 case 1024:
16974                 default:
16975                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16976                                 DMA_RWCTRL_WRITE_BNDRY_1024);
16977                         break;
16978                 }
16979         }
16980
16981 out:
16982         return val;
16983 }
16984
16985 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16986                            int size, bool to_device)
16987 {
16988         struct tg3_internal_buffer_desc test_desc;
16989         u32 sram_dma_descs;
16990         int i, ret;
16991
16992         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16993
16994         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16995         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16996         tw32(RDMAC_STATUS, 0);
16997         tw32(WDMAC_STATUS, 0);
16998
16999         tw32(BUFMGR_MODE, 0);
17000         tw32(FTQ_RESET, 0);
17001
17002         test_desc.addr_hi = ((u64) buf_dma) >> 32;
17003         test_desc.addr_lo = buf_dma & 0xffffffff;
17004         test_desc.nic_mbuf = 0x00002100;
17005         test_desc.len = size;
17006
17007         /*
17008          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17009          * the *second* time the tg3 driver was getting loaded after an
17010          * initial scan.
17011          *
17012          * Broadcom tells me:
17013          *   ...the DMA engine is connected to the GRC block and a DMA
17014          *   reset may affect the GRC block in some unpredictable way...
17015          *   The behavior of resets to individual blocks has not been tested.
17016          *
17017          * Broadcom noted the GRC reset will also reset all sub-components.
17018          */
17019         if (to_device) {
17020                 test_desc.cqid_sqid = (13 << 8) | 2;
17021
17022                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17023                 udelay(40);
17024         } else {
17025                 test_desc.cqid_sqid = (16 << 8) | 7;
17026
17027                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17028                 udelay(40);
17029         }
17030         test_desc.flags = 0x00000005;
17031
17032         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17033                 u32 val;
17034
17035                 val = *(((u32 *)&test_desc) + i);
17036                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17037                                        sram_dma_descs + (i * sizeof(u32)));
17038                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17039         }
17040         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17041
17042         if (to_device)
17043                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17044         else
17045                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17046
17047         ret = -ENODEV;
17048         for (i = 0; i < 40; i++) {
17049                 u32 val;
17050
17051                 if (to_device)
17052                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17053                 else
17054                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17055                 if ((val & 0xffff) == sram_dma_descs) {
17056                         ret = 0;
17057                         break;
17058                 }
17059
17060                 udelay(100);
17061         }
17062
17063         return ret;
17064 }
17065
17066 #define TEST_BUFFER_SIZE        0x2000
17067
17068 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
17069         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17070         { },
17071 };
17072
17073 static int tg3_test_dma(struct tg3 *tp)
17074 {
17075         dma_addr_t buf_dma;
17076         u32 *buf, saved_dma_rwctrl;
17077         int ret = 0;
17078
17079         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17080                                  &buf_dma, GFP_KERNEL);
17081         if (!buf) {
17082                 ret = -ENOMEM;
17083                 goto out_nofree;
17084         }
17085
17086         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17087                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17088
17089         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17090
17091         if (tg3_flag(tp, 57765_PLUS))
17092                 goto out;
17093
17094         if (tg3_flag(tp, PCI_EXPRESS)) {
17095                 /* DMA read watermark not used on PCIE */
17096                 tp->dma_rwctrl |= 0x00180000;
17097         } else if (!tg3_flag(tp, PCIX_MODE)) {
17098                 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17099                     tg3_asic_rev(tp) == ASIC_REV_5750)
17100                         tp->dma_rwctrl |= 0x003f0000;
17101                 else
17102                         tp->dma_rwctrl |= 0x003f000f;
17103         } else {
17104                 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17105                     tg3_asic_rev(tp) == ASIC_REV_5704) {
17106                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17107                         u32 read_water = 0x7;
17108
17109                         /* If the 5704 is behind the EPB bridge, we can
17110                          * do the less restrictive ONE_DMA workaround for
17111                          * better performance.
17112                          */
17113                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17114                             tg3_asic_rev(tp) == ASIC_REV_5704)
17115                                 tp->dma_rwctrl |= 0x8000;
17116                         else if (ccval == 0x6 || ccval == 0x7)
17117                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17118
17119                         if (tg3_asic_rev(tp) == ASIC_REV_5703)
17120                                 read_water = 4;
17121                         /* Set bit 23 to enable PCIX hw bug fix */
17122                         tp->dma_rwctrl |=
17123                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17124                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17125                                 (1 << 23);
17126                 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17127                         /* 5780 always in PCIX mode */
17128                         tp->dma_rwctrl |= 0x00144000;
17129                 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17130                         /* 5714 always in PCIX mode */
17131                         tp->dma_rwctrl |= 0x00148000;
17132                 } else {
17133                         tp->dma_rwctrl |= 0x001b000f;
17134                 }
17135         }
17136         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17137                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17138
17139         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17140             tg3_asic_rev(tp) == ASIC_REV_5704)
17141                 tp->dma_rwctrl &= 0xfffffff0;
17142
17143         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17144             tg3_asic_rev(tp) == ASIC_REV_5701) {
17145                 /* Remove this if it causes problems for some boards. */
17146                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17147
17148                 /* On 5700/5701 chips, we need to set this bit.
17149                  * Otherwise the chip will issue cacheline transactions
17150                  * to streamable DMA memory with not all the byte
17151                  * enables turned on.  This is an error on several
17152                  * RISC PCI controllers, in particular sparc64.
17153                  *
17154                  * On 5703/5704 chips, this bit has been reassigned
17155                  * a different meaning.  In particular, it is used
17156                  * on those chips to enable a PCI-X workaround.
17157                  */
17158                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17159         }
17160
17161         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17162
17163
17164         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17165             tg3_asic_rev(tp) != ASIC_REV_5701)
17166                 goto out;
17167
17168         /* It is best to perform DMA test with maximum write burst size
17169          * to expose the 5700/5701 write DMA bug.
17170          */
17171         saved_dma_rwctrl = tp->dma_rwctrl;
17172         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17173         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17174
17175         while (1) {
17176                 u32 *p = buf, i;
17177
17178                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17179                         p[i] = i;
17180
17181                 /* Send the buffer to the chip. */
17182                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17183                 if (ret) {
17184                         dev_err(&tp->pdev->dev,
17185                                 "%s: Buffer write failed. err = %d\n",
17186                                 __func__, ret);
17187                         break;
17188                 }
17189
17190                 /* Now read it back. */
17191                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17192                 if (ret) {
17193                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17194                                 "err = %d\n", __func__, ret);
17195                         break;
17196                 }
17197
17198                 /* Verify it. */
17199                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17200                         if (p[i] == i)
17201                                 continue;
17202
17203                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17204                             DMA_RWCTRL_WRITE_BNDRY_16) {
17205                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17206                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17207                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17208                                 break;
17209                         } else {
17210                                 dev_err(&tp->pdev->dev,
17211                                         "%s: Buffer corrupted on read back! "
17212                                         "(%d != %d)\n", __func__, p[i], i);
17213                                 ret = -ENODEV;
17214                                 goto out;
17215                         }
17216                 }
17217
17218                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17219                         /* Success. */
17220                         ret = 0;
17221                         break;
17222                 }
17223         }
17224         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17225             DMA_RWCTRL_WRITE_BNDRY_16) {
17226                 /* DMA test passed without adjusting DMA boundary,
17227                  * now look for chipsets that are known to expose the
17228                  * DMA bug without failing the test.
17229                  */
17230                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17231                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17232                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17233                 } else {
17234                         /* Safe to use the calculated DMA boundary. */
17235                         tp->dma_rwctrl = saved_dma_rwctrl;
17236                 }
17237
17238                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17239         }
17240
17241 out:
17242         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17243 out_nofree:
17244         return ret;
17245 }
17246
17247 static void tg3_init_bufmgr_config(struct tg3 *tp)
17248 {
17249         if (tg3_flag(tp, 57765_PLUS)) {
17250                 tp->bufmgr_config.mbuf_read_dma_low_water =
17251                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17252                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17253                         DEFAULT_MB_MACRX_LOW_WATER_57765;
17254                 tp->bufmgr_config.mbuf_high_water =
17255                         DEFAULT_MB_HIGH_WATER_57765;
17256
17257                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17258                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17259                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17260                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17261                 tp->bufmgr_config.mbuf_high_water_jumbo =
17262                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17263         } else if (tg3_flag(tp, 5705_PLUS)) {
17264                 tp->bufmgr_config.mbuf_read_dma_low_water =
17265                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17266                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17267                         DEFAULT_MB_MACRX_LOW_WATER_5705;
17268                 tp->bufmgr_config.mbuf_high_water =
17269                         DEFAULT_MB_HIGH_WATER_5705;
17270                 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17271                         tp->bufmgr_config.mbuf_mac_rx_low_water =
17272                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
17273                         tp->bufmgr_config.mbuf_high_water =
17274                                 DEFAULT_MB_HIGH_WATER_5906;
17275                 }
17276
17277                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17278                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17279                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17280                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17281                 tp->bufmgr_config.mbuf_high_water_jumbo =
17282                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17283         } else {
17284                 tp->bufmgr_config.mbuf_read_dma_low_water =
17285                         DEFAULT_MB_RDMA_LOW_WATER;
17286                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17287                         DEFAULT_MB_MACRX_LOW_WATER;
17288                 tp->bufmgr_config.mbuf_high_water =
17289                         DEFAULT_MB_HIGH_WATER;
17290
17291                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17292                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17293                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17294                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17295                 tp->bufmgr_config.mbuf_high_water_jumbo =
17296                         DEFAULT_MB_HIGH_WATER_JUMBO;
17297         }
17298
17299         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17300         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17301 }
17302
17303 static char *tg3_phy_string(struct tg3 *tp)
17304 {
17305         switch (tp->phy_id & TG3_PHY_ID_MASK) {
17306         case TG3_PHY_ID_BCM5400:        return "5400";
17307         case TG3_PHY_ID_BCM5401:        return "5401";
17308         case TG3_PHY_ID_BCM5411:        return "5411";
17309         case TG3_PHY_ID_BCM5701:        return "5701";
17310         case TG3_PHY_ID_BCM5703:        return "5703";
17311         case TG3_PHY_ID_BCM5704:        return "5704";
17312         case TG3_PHY_ID_BCM5705:        return "5705";
17313         case TG3_PHY_ID_BCM5750:        return "5750";
17314         case TG3_PHY_ID_BCM5752:        return "5752";
17315         case TG3_PHY_ID_BCM5714:        return "5714";
17316         case TG3_PHY_ID_BCM5780:        return "5780";
17317         case TG3_PHY_ID_BCM5755:        return "5755";
17318         case TG3_PHY_ID_BCM5787:        return "5787";
17319         case TG3_PHY_ID_BCM5784:        return "5784";
17320         case TG3_PHY_ID_BCM5756:        return "5722/5756";
17321         case TG3_PHY_ID_BCM5906:        return "5906";
17322         case TG3_PHY_ID_BCM5761:        return "5761";
17323         case TG3_PHY_ID_BCM5718C:       return "5718C";
17324         case TG3_PHY_ID_BCM5718S:       return "5718S";
17325         case TG3_PHY_ID_BCM57765:       return "57765";
17326         case TG3_PHY_ID_BCM5719C:       return "5719C";
17327         case TG3_PHY_ID_BCM5720C:       return "5720C";
17328         case TG3_PHY_ID_BCM5762:        return "5762C";
17329         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
17330         case 0:                 return "serdes";
17331         default:                return "unknown";
17332         }
17333 }
17334
17335 static char *tg3_bus_string(struct tg3 *tp, char *str)
17336 {
17337         if (tg3_flag(tp, PCI_EXPRESS)) {
17338                 strcpy(str, "PCI Express");
17339                 return str;
17340         } else if (tg3_flag(tp, PCIX_MODE)) {
17341                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17342
17343                 strcpy(str, "PCIX:");
17344
17345                 if ((clock_ctrl == 7) ||
17346                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17347                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17348                         strcat(str, "133MHz");
17349                 else if (clock_ctrl == 0)
17350                         strcat(str, "33MHz");
17351                 else if (clock_ctrl == 2)
17352                         strcat(str, "50MHz");
17353                 else if (clock_ctrl == 4)
17354                         strcat(str, "66MHz");
17355                 else if (clock_ctrl == 6)
17356                         strcat(str, "100MHz");
17357         } else {
17358                 strcpy(str, "PCI:");
17359                 if (tg3_flag(tp, PCI_HIGH_SPEED))
17360                         strcat(str, "66MHz");
17361                 else
17362                         strcat(str, "33MHz");
17363         }
17364         if (tg3_flag(tp, PCI_32BIT))
17365                 strcat(str, ":32-bit");
17366         else
17367                 strcat(str, ":64-bit");
17368         return str;
17369 }
17370
17371 static void tg3_init_coal(struct tg3 *tp)
17372 {
17373         struct ethtool_coalesce *ec = &tp->coal;
17374
17375         memset(ec, 0, sizeof(*ec));
17376         ec->cmd = ETHTOOL_GCOALESCE;
17377         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17378         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17379         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17380         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17381         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17382         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17383         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17384         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17385         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17386
17387         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17388                                  HOSTCC_MODE_CLRTICK_TXBD)) {
17389                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17390                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17391                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17392                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17393         }
17394
17395         if (tg3_flag(tp, 5705_PLUS)) {
17396                 ec->rx_coalesce_usecs_irq = 0;
17397                 ec->tx_coalesce_usecs_irq = 0;
17398                 ec->stats_block_coalesce_usecs = 0;
17399         }
17400 }
17401
17402 static int tg3_init_one(struct pci_dev *pdev,
17403                                   const struct pci_device_id *ent)
17404 {
17405         struct net_device *dev;
17406         struct tg3 *tp;
17407         int i, err;
17408         u32 sndmbx, rcvmbx, intmbx;
17409         char str[40];
17410         u64 dma_mask, persist_dma_mask;
17411         netdev_features_t features = 0;
17412
17413         printk_once(KERN_INFO "%s\n", version);
17414
17415         err = pci_enable_device(pdev);
17416         if (err) {
17417                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17418                 return err;
17419         }
17420
17421         err = pci_request_regions(pdev, DRV_MODULE_NAME);
17422         if (err) {
17423                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17424                 goto err_out_disable_pdev;
17425         }
17426
17427         pci_set_master(pdev);
17428
17429         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17430         if (!dev) {
17431                 err = -ENOMEM;
17432                 goto err_out_free_res;
17433         }
17434
17435         SET_NETDEV_DEV(dev, &pdev->dev);
17436
17437         tp = netdev_priv(dev);
17438         tp->pdev = pdev;
17439         tp->dev = dev;
17440         tp->rx_mode = TG3_DEF_RX_MODE;
17441         tp->tx_mode = TG3_DEF_TX_MODE;
17442         tp->irq_sync = 1;
17443
17444         if (tg3_debug > 0)
17445                 tp->msg_enable = tg3_debug;
17446         else
17447                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17448
17449         if (pdev_is_ssb_gige_core(pdev)) {
17450                 tg3_flag_set(tp, IS_SSB_CORE);
17451                 if (ssb_gige_must_flush_posted_writes(pdev))
17452                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17453                 if (ssb_gige_one_dma_at_once(pdev))
17454                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17455                 if (ssb_gige_have_roboswitch(pdev)) {
17456                         tg3_flag_set(tp, USE_PHYLIB);
17457                         tg3_flag_set(tp, ROBOSWITCH);
17458                 }
17459                 if (ssb_gige_is_rgmii(pdev))
17460                         tg3_flag_set(tp, RGMII_MODE);
17461         }
17462
17463         /* The word/byte swap controls here control register access byte
17464          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17465          * setting below.
17466          */
17467         tp->misc_host_ctrl =
17468                 MISC_HOST_CTRL_MASK_PCI_INT |
17469                 MISC_HOST_CTRL_WORD_SWAP |
17470                 MISC_HOST_CTRL_INDIR_ACCESS |
17471                 MISC_HOST_CTRL_PCISTATE_RW;
17472
17473         /* The NONFRM (non-frame) byte/word swap controls take effect
17474          * on descriptor entries, anything which isn't packet data.
17475          *
17476          * The StrongARM chips on the board (one for tx, one for rx)
17477          * are running in big-endian mode.
17478          */
17479         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17480                         GRC_MODE_WSWAP_NONFRM_DATA);
17481 #ifdef __BIG_ENDIAN
17482         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17483 #endif
17484         spin_lock_init(&tp->lock);
17485         spin_lock_init(&tp->indirect_lock);
17486         INIT_WORK(&tp->reset_task, tg3_reset_task);
17487
17488         tp->regs = pci_ioremap_bar(pdev, BAR_0);
17489         if (!tp->regs) {
17490                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17491                 err = -ENOMEM;
17492                 goto err_out_free_dev;
17493         }
17494
17495         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17496             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17497             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17498             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17499             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17500             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17501             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17502             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17503             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17504             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17505             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17506             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17507             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17508             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17509             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17510                 tg3_flag_set(tp, ENABLE_APE);
17511                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17512                 if (!tp->aperegs) {
17513                         dev_err(&pdev->dev,
17514                                 "Cannot map APE registers, aborting\n");
17515                         err = -ENOMEM;
17516                         goto err_out_iounmap;
17517                 }
17518         }
17519
17520         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17521         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17522
17523         dev->ethtool_ops = &tg3_ethtool_ops;
17524         dev->watchdog_timeo = TG3_TX_TIMEOUT;
17525         dev->netdev_ops = &tg3_netdev_ops;
17526         dev->irq = pdev->irq;
17527
17528         err = tg3_get_invariants(tp, ent);
17529         if (err) {
17530                 dev_err(&pdev->dev,
17531                         "Problem fetching invariants of chip, aborting\n");
17532                 goto err_out_apeunmap;
17533         }
17534
17535         /* The EPB bridge inside 5714, 5715, and 5780 and any
17536          * device behind the EPB cannot support DMA addresses > 40-bit.
17537          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17538          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17539          * do DMA address check in tg3_start_xmit().
17540          */
17541         if (tg3_flag(tp, IS_5788))
17542                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17543         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17544                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17545 #ifdef CONFIG_HIGHMEM
17546                 dma_mask = DMA_BIT_MASK(64);
17547 #endif
17548         } else
17549                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17550
17551         /* Configure DMA attributes. */
17552         if (dma_mask > DMA_BIT_MASK(32)) {
17553                 err = pci_set_dma_mask(pdev, dma_mask);
17554                 if (!err) {
17555                         features |= NETIF_F_HIGHDMA;
17556                         err = pci_set_consistent_dma_mask(pdev,
17557                                                           persist_dma_mask);
17558                         if (err < 0) {
17559                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17560                                         "DMA for consistent allocations\n");
17561                                 goto err_out_apeunmap;
17562                         }
17563                 }
17564         }
17565         if (err || dma_mask == DMA_BIT_MASK(32)) {
17566                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17567                 if (err) {
17568                         dev_err(&pdev->dev,
17569                                 "No usable DMA configuration, aborting\n");
17570                         goto err_out_apeunmap;
17571                 }
17572         }
17573
17574         tg3_init_bufmgr_config(tp);
17575
17576         features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
17577
17578         /* 5700 B0 chips do not support checksumming correctly due
17579          * to hardware bugs.
17580          */
17581         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17582                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17583
17584                 if (tg3_flag(tp, 5755_PLUS))
17585                         features |= NETIF_F_IPV6_CSUM;
17586         }
17587
17588         /* TSO is on by default on chips that support hardware TSO.
17589          * Firmware TSO on older chips gives lower performance, so it
17590          * is off by default, but can be enabled using ethtool.
17591          */
17592         if ((tg3_flag(tp, HW_TSO_1) ||
17593              tg3_flag(tp, HW_TSO_2) ||
17594              tg3_flag(tp, HW_TSO_3)) &&
17595             (features & NETIF_F_IP_CSUM))
17596                 features |= NETIF_F_TSO;
17597         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17598                 if (features & NETIF_F_IPV6_CSUM)
17599                         features |= NETIF_F_TSO6;
17600                 if (tg3_flag(tp, HW_TSO_3) ||
17601                     tg3_asic_rev(tp) == ASIC_REV_5761 ||
17602                     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17603                      tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17604                     tg3_asic_rev(tp) == ASIC_REV_5785 ||
17605                     tg3_asic_rev(tp) == ASIC_REV_57780)
17606                         features |= NETIF_F_TSO_ECN;
17607         }
17608
17609         dev->features |= features;
17610         dev->vlan_features |= features;
17611
17612         /*
17613          * Add loopback capability only for a subset of devices that support
17614          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17615          * loopback for the remaining devices.
17616          */
17617         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17618             !tg3_flag(tp, CPMU_PRESENT))
17619                 /* Add the loopback capability */
17620                 features |= NETIF_F_LOOPBACK;
17621
17622         dev->hw_features |= features;
17623
17624         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17625             !tg3_flag(tp, TSO_CAPABLE) &&
17626             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17627                 tg3_flag_set(tp, MAX_RXPEND_64);
17628                 tp->rx_pending = 63;
17629         }
17630
17631         err = tg3_get_device_address(tp);
17632         if (err) {
17633                 dev_err(&pdev->dev,
17634                         "Could not obtain valid ethernet address, aborting\n");
17635                 goto err_out_apeunmap;
17636         }
17637
17638         /*
17639          * Reset chip in case UNDI or EFI driver did not shutdown
17640          * DMA self test will enable WDMAC and we'll see (spurious)
17641          * pending DMA on the PCI bus at that point.
17642          */
17643         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17644             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17645                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17646                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17647         }
17648
17649         err = tg3_test_dma(tp);
17650         if (err) {
17651                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17652                 goto err_out_apeunmap;
17653         }
17654
17655         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17656         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17657         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17658         for (i = 0; i < tp->irq_max; i++) {
17659                 struct tg3_napi *tnapi = &tp->napi[i];
17660
17661                 tnapi->tp = tp;
17662                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17663
17664                 tnapi->int_mbox = intmbx;
17665                 if (i <= 4)
17666                         intmbx += 0x8;
17667                 else
17668                         intmbx += 0x4;
17669
17670                 tnapi->consmbox = rcvmbx;
17671                 tnapi->prodmbox = sndmbx;
17672
17673                 if (i)
17674                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17675                 else
17676                         tnapi->coal_now = HOSTCC_MODE_NOW;
17677
17678                 if (!tg3_flag(tp, SUPPORT_MSIX))
17679                         break;
17680
17681                 /*
17682                  * If we support MSIX, we'll be using RSS.  If we're using
17683                  * RSS, the first vector only handles link interrupts and the
17684                  * remaining vectors handle rx and tx interrupts.  Reuse the
17685                  * mailbox values for the next iteration.  The values we setup
17686                  * above are still useful for the single vectored mode.
17687                  */
17688                 if (!i)
17689                         continue;
17690
17691                 rcvmbx += 0x8;
17692
17693                 if (sndmbx & 0x4)
17694                         sndmbx -= 0x4;
17695                 else
17696                         sndmbx += 0xc;
17697         }
17698
17699         tg3_init_coal(tp);
17700
17701         pci_set_drvdata(pdev, dev);
17702
17703         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17704             tg3_asic_rev(tp) == ASIC_REV_5720 ||
17705             tg3_asic_rev(tp) == ASIC_REV_5762)
17706                 tg3_flag_set(tp, PTP_CAPABLE);
17707
17708         tg3_timer_init(tp);
17709
17710         tg3_carrier_off(tp);
17711
17712         err = register_netdev(dev);
17713         if (err) {
17714                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17715                 goto err_out_apeunmap;
17716         }
17717
17718         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17719                     tp->board_part_number,
17720                     tg3_chip_rev_id(tp),
17721                     tg3_bus_string(tp, str),
17722                     dev->dev_addr);
17723
17724         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17725                 struct phy_device *phydev;
17726                 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
17727                 netdev_info(dev,
17728                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17729                             phydev->drv->name, dev_name(&phydev->dev));
17730         } else {
17731                 char *ethtype;
17732
17733                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17734                         ethtype = "10/100Base-TX";
17735                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17736                         ethtype = "1000Base-SX";
17737                 else
17738                         ethtype = "10/100/1000Base-T";
17739
17740                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17741                             "(WireSpeed[%d], EEE[%d])\n",
17742                             tg3_phy_string(tp), ethtype,
17743                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17744                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17745         }
17746
17747         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17748                     (dev->features & NETIF_F_RXCSUM) != 0,
17749                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
17750                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17751                     tg3_flag(tp, ENABLE_ASF) != 0,
17752                     tg3_flag(tp, TSO_CAPABLE) != 0);
17753         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17754                     tp->dma_rwctrl,
17755                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17756                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17757
17758         pci_save_state(pdev);
17759
17760         return 0;
17761
17762 err_out_apeunmap:
17763         if (tp->aperegs) {
17764                 iounmap(tp->aperegs);
17765                 tp->aperegs = NULL;
17766         }
17767
17768 err_out_iounmap:
17769         if (tp->regs) {
17770                 iounmap(tp->regs);
17771                 tp->regs = NULL;
17772         }
17773
17774 err_out_free_dev:
17775         free_netdev(dev);
17776
17777 err_out_free_res:
17778         pci_release_regions(pdev);
17779
17780 err_out_disable_pdev:
17781         if (pci_is_enabled(pdev))
17782                 pci_disable_device(pdev);
17783         return err;
17784 }
17785
17786 static void tg3_remove_one(struct pci_dev *pdev)
17787 {
17788         struct net_device *dev = pci_get_drvdata(pdev);
17789
17790         if (dev) {
17791                 struct tg3 *tp = netdev_priv(dev);
17792
17793                 release_firmware(tp->fw);
17794
17795                 tg3_reset_task_cancel(tp);
17796
17797                 if (tg3_flag(tp, USE_PHYLIB)) {
17798                         tg3_phy_fini(tp);
17799                         tg3_mdio_fini(tp);
17800                 }
17801
17802                 unregister_netdev(dev);
17803                 if (tp->aperegs) {
17804                         iounmap(tp->aperegs);
17805                         tp->aperegs = NULL;
17806                 }
17807                 if (tp->regs) {
17808                         iounmap(tp->regs);
17809                         tp->regs = NULL;
17810                 }
17811                 free_netdev(dev);
17812                 pci_release_regions(pdev);
17813                 pci_disable_device(pdev);
17814         }
17815 }
17816
17817 #ifdef CONFIG_PM_SLEEP
17818 static int tg3_suspend(struct device *device)
17819 {
17820         struct pci_dev *pdev = to_pci_dev(device);
17821         struct net_device *dev = pci_get_drvdata(pdev);
17822         struct tg3 *tp = netdev_priv(dev);
17823         int err = 0;
17824
17825         rtnl_lock();
17826
17827         if (!netif_running(dev))
17828                 goto unlock;
17829
17830         tg3_reset_task_cancel(tp);
17831         tg3_phy_stop(tp);
17832         tg3_netif_stop(tp);
17833
17834         tg3_timer_stop(tp);
17835
17836         tg3_full_lock(tp, 1);
17837         tg3_disable_ints(tp);
17838         tg3_full_unlock(tp);
17839
17840         netif_device_detach(dev);
17841
17842         tg3_full_lock(tp, 0);
17843         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17844         tg3_flag_clear(tp, INIT_COMPLETE);
17845         tg3_full_unlock(tp);
17846
17847         err = tg3_power_down_prepare(tp);
17848         if (err) {
17849                 int err2;
17850
17851                 tg3_full_lock(tp, 0);
17852
17853                 tg3_flag_set(tp, INIT_COMPLETE);
17854                 err2 = tg3_restart_hw(tp, true);
17855                 if (err2)
17856                         goto out;
17857
17858                 tg3_timer_start(tp);
17859
17860                 netif_device_attach(dev);
17861                 tg3_netif_start(tp);
17862
17863 out:
17864                 tg3_full_unlock(tp);
17865
17866                 if (!err2)
17867                         tg3_phy_start(tp);
17868         }
17869
17870 unlock:
17871         rtnl_unlock();
17872         return err;
17873 }
17874
17875 static int tg3_resume(struct device *device)
17876 {
17877         struct pci_dev *pdev = to_pci_dev(device);
17878         struct net_device *dev = pci_get_drvdata(pdev);
17879         struct tg3 *tp = netdev_priv(dev);
17880         int err = 0;
17881
17882         rtnl_lock();
17883
17884         if (!netif_running(dev))
17885                 goto unlock;
17886
17887         netif_device_attach(dev);
17888
17889         tg3_full_lock(tp, 0);
17890
17891         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
17892
17893         tg3_flag_set(tp, INIT_COMPLETE);
17894         err = tg3_restart_hw(tp,
17895                              !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
17896         if (err)
17897                 goto out;
17898
17899         tg3_timer_start(tp);
17900
17901         tg3_netif_start(tp);
17902
17903 out:
17904         tg3_full_unlock(tp);
17905
17906         if (!err)
17907                 tg3_phy_start(tp);
17908
17909 unlock:
17910         rtnl_unlock();
17911         return err;
17912 }
17913 #endif /* CONFIG_PM_SLEEP */
17914
17915 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17916
17917 static void tg3_shutdown(struct pci_dev *pdev)
17918 {
17919         struct net_device *dev = pci_get_drvdata(pdev);
17920         struct tg3 *tp = netdev_priv(dev);
17921
17922         rtnl_lock();
17923         netif_device_detach(dev);
17924
17925         if (netif_running(dev))
17926                 dev_close(dev);
17927
17928         if (system_state == SYSTEM_POWER_OFF)
17929                 tg3_power_down(tp);
17930
17931         rtnl_unlock();
17932 }
17933
17934 /**
17935  * tg3_io_error_detected - called when PCI error is detected
17936  * @pdev: Pointer to PCI device
17937  * @state: The current pci connection state
17938  *
17939  * This function is called after a PCI bus error affecting
17940  * this device has been detected.
17941  */
17942 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17943                                               pci_channel_state_t state)
17944 {
17945         struct net_device *netdev = pci_get_drvdata(pdev);
17946         struct tg3 *tp = netdev_priv(netdev);
17947         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17948
17949         netdev_info(netdev, "PCI I/O error detected\n");
17950
17951         rtnl_lock();
17952
17953         /* We probably don't have netdev yet */
17954         if (!netdev || !netif_running(netdev))
17955                 goto done;
17956
17957         tg3_phy_stop(tp);
17958
17959         tg3_netif_stop(tp);
17960
17961         tg3_timer_stop(tp);
17962
17963         /* Want to make sure that the reset task doesn't run */
17964         tg3_reset_task_cancel(tp);
17965
17966         netif_device_detach(netdev);
17967
17968         /* Clean up software state, even if MMIO is blocked */
17969         tg3_full_lock(tp, 0);
17970         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17971         tg3_full_unlock(tp);
17972
17973 done:
17974         if (state == pci_channel_io_perm_failure) {
17975                 if (netdev) {
17976                         tg3_napi_enable(tp);
17977                         dev_close(netdev);
17978                 }
17979                 err = PCI_ERS_RESULT_DISCONNECT;
17980         } else {
17981                 pci_disable_device(pdev);
17982         }
17983
17984         rtnl_unlock();
17985
17986         return err;
17987 }
17988
17989 /**
17990  * tg3_io_slot_reset - called after the pci bus has been reset.
17991  * @pdev: Pointer to PCI device
17992  *
17993  * Restart the card from scratch, as if from a cold-boot.
17994  * At this point, the card has exprienced a hard reset,
17995  * followed by fixups by BIOS, and has its config space
17996  * set up identically to what it was at cold boot.
17997  */
17998 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17999 {
18000         struct net_device *netdev = pci_get_drvdata(pdev);
18001         struct tg3 *tp = netdev_priv(netdev);
18002         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18003         int err;
18004
18005         rtnl_lock();
18006
18007         if (pci_enable_device(pdev)) {
18008                 dev_err(&pdev->dev,
18009                         "Cannot re-enable PCI device after reset.\n");
18010                 goto done;
18011         }
18012
18013         pci_set_master(pdev);
18014         pci_restore_state(pdev);
18015         pci_save_state(pdev);
18016
18017         if (!netdev || !netif_running(netdev)) {
18018                 rc = PCI_ERS_RESULT_RECOVERED;
18019                 goto done;
18020         }
18021
18022         err = tg3_power_up(tp);
18023         if (err)
18024                 goto done;
18025
18026         rc = PCI_ERS_RESULT_RECOVERED;
18027
18028 done:
18029         if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18030                 tg3_napi_enable(tp);
18031                 dev_close(netdev);
18032         }
18033         rtnl_unlock();
18034
18035         return rc;
18036 }
18037
18038 /**
18039  * tg3_io_resume - called when traffic can start flowing again.
18040  * @pdev: Pointer to PCI device
18041  *
18042  * This callback is called when the error recovery driver tells
18043  * us that its OK to resume normal operation.
18044  */
18045 static void tg3_io_resume(struct pci_dev *pdev)
18046 {
18047         struct net_device *netdev = pci_get_drvdata(pdev);
18048         struct tg3 *tp = netdev_priv(netdev);
18049         int err;
18050
18051         rtnl_lock();
18052
18053         if (!netif_running(netdev))
18054                 goto done;
18055
18056         tg3_full_lock(tp, 0);
18057         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18058         tg3_flag_set(tp, INIT_COMPLETE);
18059         err = tg3_restart_hw(tp, true);
18060         if (err) {
18061                 tg3_full_unlock(tp);
18062                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18063                 goto done;
18064         }
18065
18066         netif_device_attach(netdev);
18067
18068         tg3_timer_start(tp);
18069
18070         tg3_netif_start(tp);
18071
18072         tg3_full_unlock(tp);
18073
18074         tg3_phy_start(tp);
18075
18076 done:
18077         rtnl_unlock();
18078 }
18079
18080 static const struct pci_error_handlers tg3_err_handler = {
18081         .error_detected = tg3_io_error_detected,
18082         .slot_reset     = tg3_io_slot_reset,
18083         .resume         = tg3_io_resume
18084 };
18085
18086 static struct pci_driver tg3_driver = {
18087         .name           = DRV_MODULE_NAME,
18088         .id_table       = tg3_pci_tbl,
18089         .probe          = tg3_init_one,
18090         .remove         = tg3_remove_one,
18091         .err_handler    = &tg3_err_handler,
18092         .driver.pm      = &tg3_pm_ops,
18093         .shutdown       = tg3_shutdown,
18094 };
18095
18096 module_pci_driver(tg3_driver);