]> Pileus Git - ~andy/linux/blob - drivers/net/ethernet/broadcom/tg3.c
ASoC: fsl-esai: fix ESAI TDM slot setting
[~andy/linux] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2013 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/interrupt.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mdio.h>
36 #include <linux/mii.h>
37 #include <linux/phy.h>
38 #include <linux/brcmphy.h>
39 #include <linux/if.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50
51 #include <net/checksum.h>
52 #include <net/ip.h>
53
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65
66 #define BAR_0   0
67 #define BAR_2   2
68
69 #include "tg3.h"
70
71 /* Functions & macros to verify TG3_FLAGS types */
72
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         return test_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         set_bit(flag, bits);
81 }
82
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85         clear_bit(flag, bits);
86 }
87
88 #define tg3_flag(tp, flag)                              \
89         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag)                          \
91         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag)                        \
93         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94
95 #define DRV_MODULE_NAME         "tg3"
96 #define TG3_MAJ_NUM                     3
97 #define TG3_MIN_NUM                     136
98 #define DRV_MODULE_VERSION      \
99         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE      "Jan 03, 2014"
101
102 #define RESET_KIND_SHUTDOWN     0
103 #define RESET_KIND_INIT         1
104 #define RESET_KIND_SUSPEND      2
105
106 #define TG3_DEF_RX_MODE         0
107 #define TG3_DEF_TX_MODE         0
108 #define TG3_DEF_MSG_ENABLE        \
109         (NETIF_MSG_DRV          | \
110          NETIF_MSG_PROBE        | \
111          NETIF_MSG_LINK         | \
112          NETIF_MSG_TIMER        | \
113          NETIF_MSG_IFDOWN       | \
114          NETIF_MSG_IFUP         | \
115          NETIF_MSG_RX_ERR       | \
116          NETIF_MSG_TX_ERR)
117
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
119
120 /* length of time before we decide the hardware is borked,
121  * and dev->tx_timeout() should be called to fix the problem
122  */
123
124 #define TG3_TX_TIMEOUT                  (5 * HZ)
125
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU                     60
128 #define TG3_MAX_MTU(tp) \
129         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132  * You can't change the ring sizes, but you can change where you place
133  * them in the NIC onboard memory.
134  */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING         200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
143
144 /* Do not place this n-ring entries value into the tp struct itself,
145  * we really want to expose these constants to GCC so that modulo et
146  * al.  operations are done with shifts and masks instead of with
147  * hw multiply/modulo instructions.  Another solution would be to
148  * replace things like '% foo' with '& (foo - 1)'.
149  */
150
151 #define TG3_TX_RING_SIZE                512
152 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
153
154 #define TG3_RX_STD_RING_BYTES(tp) \
155         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
161                                  TG3_TX_RING_SIZE)
162 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
163
164 #define TG3_DMA_BYTE_ENAB               64
165
166 #define TG3_RX_STD_DMA_SZ               1536
167 #define TG3_RX_JMB_DMA_SZ               9046
168
169 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
170
171 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181  * that are at least dword aligned when used in PCIX mode.  The driver
182  * works around this bug by double copying the packet.  This workaround
183  * is built into the normal double copy length check for efficiency.
184  *
185  * However, the double copy is only necessary on those architectures
186  * where unaligned memory accesses are inefficient.  For those architectures
187  * where unaligned memory accesses incur little penalty, we can reintegrate
188  * the 5701 in the normal rx path.  Doing so saves a device structure
189  * dereference by hardcoding the double copy threshold in place.
190  */
191 #define TG3_RX_COPY_THRESHOLD           256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
194 #else
195         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
196 #endif
197
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
202 #endif
203
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K            2048
207 #define TG3_TX_BD_DMA_MAX_4K            4096
208
209 #define TG3_RAW_IP_ALIGN 2
210
211 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
212 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
213
214 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
215 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
216
217 #define FIRMWARE_TG3            "tigon/tg3.bin"
218 #define FIRMWARE_TG357766       "tigon/tg357766.bin"
219 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
220 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
221
222 static char version[] =
223         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
224
225 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
226 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
227 MODULE_LICENSE("GPL");
228 MODULE_VERSION(DRV_MODULE_VERSION);
229 MODULE_FIRMWARE(FIRMWARE_TG3);
230 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
231 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
232
233 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
234 module_param(tg3_debug, int, 0);
235 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
236
237 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
238 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
239
240 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
260          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261                         TG3_DRV_DATA_FLAG_5705_10_100},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
263          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
264                         TG3_DRV_DATA_FLAG_5705_10_100},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
267          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
268                         TG3_DRV_DATA_FLAG_5705_10_100},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
275          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
281          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
289         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
290                         PCI_VENDOR_ID_LENOVO,
291                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
292          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
295          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
308         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
309         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
310         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
311         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
312         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
313         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
314         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
316          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
318                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
319          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
320         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
322         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
323          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
326         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
327         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
329         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
330         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
332         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
333          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
334         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
335          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
336         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
337         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
338         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
339         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
340         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
341         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
342         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
343         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
344         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
345         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
346         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
347         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
348         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
349         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
350         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
351         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
352         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
353         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
354         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
355         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
356         {}
357 };
358
359 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
360
361 static const struct {
362         const char string[ETH_GSTRING_LEN];
363 } ethtool_stats_keys[] = {
364         { "rx_octets" },
365         { "rx_fragments" },
366         { "rx_ucast_packets" },
367         { "rx_mcast_packets" },
368         { "rx_bcast_packets" },
369         { "rx_fcs_errors" },
370         { "rx_align_errors" },
371         { "rx_xon_pause_rcvd" },
372         { "rx_xoff_pause_rcvd" },
373         { "rx_mac_ctrl_rcvd" },
374         { "rx_xoff_entered" },
375         { "rx_frame_too_long_errors" },
376         { "rx_jabbers" },
377         { "rx_undersize_packets" },
378         { "rx_in_length_errors" },
379         { "rx_out_length_errors" },
380         { "rx_64_or_less_octet_packets" },
381         { "rx_65_to_127_octet_packets" },
382         { "rx_128_to_255_octet_packets" },
383         { "rx_256_to_511_octet_packets" },
384         { "rx_512_to_1023_octet_packets" },
385         { "rx_1024_to_1522_octet_packets" },
386         { "rx_1523_to_2047_octet_packets" },
387         { "rx_2048_to_4095_octet_packets" },
388         { "rx_4096_to_8191_octet_packets" },
389         { "rx_8192_to_9022_octet_packets" },
390
391         { "tx_octets" },
392         { "tx_collisions" },
393
394         { "tx_xon_sent" },
395         { "tx_xoff_sent" },
396         { "tx_flow_control" },
397         { "tx_mac_errors" },
398         { "tx_single_collisions" },
399         { "tx_mult_collisions" },
400         { "tx_deferred" },
401         { "tx_excessive_collisions" },
402         { "tx_late_collisions" },
403         { "tx_collide_2times" },
404         { "tx_collide_3times" },
405         { "tx_collide_4times" },
406         { "tx_collide_5times" },
407         { "tx_collide_6times" },
408         { "tx_collide_7times" },
409         { "tx_collide_8times" },
410         { "tx_collide_9times" },
411         { "tx_collide_10times" },
412         { "tx_collide_11times" },
413         { "tx_collide_12times" },
414         { "tx_collide_13times" },
415         { "tx_collide_14times" },
416         { "tx_collide_15times" },
417         { "tx_ucast_packets" },
418         { "tx_mcast_packets" },
419         { "tx_bcast_packets" },
420         { "tx_carrier_sense_errors" },
421         { "tx_discards" },
422         { "tx_errors" },
423
424         { "dma_writeq_full" },
425         { "dma_write_prioq_full" },
426         { "rxbds_empty" },
427         { "rx_discards" },
428         { "rx_errors" },
429         { "rx_threshold_hit" },
430
431         { "dma_readq_full" },
432         { "dma_read_prioq_full" },
433         { "tx_comp_queue_full" },
434
435         { "ring_set_send_prod_index" },
436         { "ring_status_update" },
437         { "nic_irqs" },
438         { "nic_avoided_irqs" },
439         { "nic_tx_threshold_hit" },
440
441         { "mbuf_lwm_thresh_hit" },
442 };
443
444 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
445 #define TG3_NVRAM_TEST          0
446 #define TG3_LINK_TEST           1
447 #define TG3_REGISTER_TEST       2
448 #define TG3_MEMORY_TEST         3
449 #define TG3_MAC_LOOPB_TEST      4
450 #define TG3_PHY_LOOPB_TEST      5
451 #define TG3_EXT_LOOPB_TEST      6
452 #define TG3_INTERRUPT_TEST      7
453
454
455 static const struct {
456         const char string[ETH_GSTRING_LEN];
457 } ethtool_test_keys[] = {
458         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
459         [TG3_LINK_TEST]         = { "link test         (online) " },
460         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
461         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
462         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
463         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
464         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
465         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
466 };
467
468 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
469
470
471 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
472 {
473         writel(val, tp->regs + off);
474 }
475
476 static u32 tg3_read32(struct tg3 *tp, u32 off)
477 {
478         return readl(tp->regs + off);
479 }
480
481 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
482 {
483         writel(val, tp->aperegs + off);
484 }
485
486 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
487 {
488         return readl(tp->aperegs + off);
489 }
490
491 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
492 {
493         unsigned long flags;
494
495         spin_lock_irqsave(&tp->indirect_lock, flags);
496         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
497         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
498         spin_unlock_irqrestore(&tp->indirect_lock, flags);
499 }
500
501 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
502 {
503         writel(val, tp->regs + off);
504         readl(tp->regs + off);
505 }
506
507 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
508 {
509         unsigned long flags;
510         u32 val;
511
512         spin_lock_irqsave(&tp->indirect_lock, flags);
513         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
514         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
515         spin_unlock_irqrestore(&tp->indirect_lock, flags);
516         return val;
517 }
518
519 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
520 {
521         unsigned long flags;
522
523         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
524                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
525                                        TG3_64BIT_REG_LOW, val);
526                 return;
527         }
528         if (off == TG3_RX_STD_PROD_IDX_REG) {
529                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
530                                        TG3_64BIT_REG_LOW, val);
531                 return;
532         }
533
534         spin_lock_irqsave(&tp->indirect_lock, flags);
535         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
536         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
537         spin_unlock_irqrestore(&tp->indirect_lock, flags);
538
539         /* In indirect mode when disabling interrupts, we also need
540          * to clear the interrupt bit in the GRC local ctrl register.
541          */
542         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
543             (val == 0x1)) {
544                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
545                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
546         }
547 }
548
549 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
550 {
551         unsigned long flags;
552         u32 val;
553
554         spin_lock_irqsave(&tp->indirect_lock, flags);
555         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
556         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
557         spin_unlock_irqrestore(&tp->indirect_lock, flags);
558         return val;
559 }
560
561 /* usec_wait specifies the wait time in usec when writing to certain registers
562  * where it is unsafe to read back the register without some delay.
563  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
564  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
565  */
566 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
567 {
568         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
569                 /* Non-posted methods */
570                 tp->write32(tp, off, val);
571         else {
572                 /* Posted method */
573                 tg3_write32(tp, off, val);
574                 if (usec_wait)
575                         udelay(usec_wait);
576                 tp->read32(tp, off);
577         }
578         /* Wait again after the read for the posted method to guarantee that
579          * the wait time is met.
580          */
581         if (usec_wait)
582                 udelay(usec_wait);
583 }
584
585 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
586 {
587         tp->write32_mbox(tp, off, val);
588         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
589             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
590              !tg3_flag(tp, ICH_WORKAROUND)))
591                 tp->read32_mbox(tp, off);
592 }
593
594 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
595 {
596         void __iomem *mbox = tp->regs + off;
597         writel(val, mbox);
598         if (tg3_flag(tp, TXD_MBOX_HWBUG))
599                 writel(val, mbox);
600         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
601             tg3_flag(tp, FLUSH_POSTED_WRITES))
602                 readl(mbox);
603 }
604
605 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
606 {
607         return readl(tp->regs + off + GRCMBOX_BASE);
608 }
609
610 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
611 {
612         writel(val, tp->regs + off + GRCMBOX_BASE);
613 }
614
615 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
616 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
617 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
618 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
619 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
620
621 #define tw32(reg, val)                  tp->write32(tp, reg, val)
622 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
623 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
624 #define tr32(reg)                       tp->read32(tp, reg)
625
626 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
627 {
628         unsigned long flags;
629
630         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
631             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
632                 return;
633
634         spin_lock_irqsave(&tp->indirect_lock, flags);
635         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
636                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
637                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
638
639                 /* Always leave this as zero. */
640                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
641         } else {
642                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
643                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
644
645                 /* Always leave this as zero. */
646                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
647         }
648         spin_unlock_irqrestore(&tp->indirect_lock, flags);
649 }
650
651 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
652 {
653         unsigned long flags;
654
655         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
656             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
657                 *val = 0;
658                 return;
659         }
660
661         spin_lock_irqsave(&tp->indirect_lock, flags);
662         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
663                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
664                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
665
666                 /* Always leave this as zero. */
667                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
668         } else {
669                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
670                 *val = tr32(TG3PCI_MEM_WIN_DATA);
671
672                 /* Always leave this as zero. */
673                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
674         }
675         spin_unlock_irqrestore(&tp->indirect_lock, flags);
676 }
677
678 static void tg3_ape_lock_init(struct tg3 *tp)
679 {
680         int i;
681         u32 regbase, bit;
682
683         if (tg3_asic_rev(tp) == ASIC_REV_5761)
684                 regbase = TG3_APE_LOCK_GRANT;
685         else
686                 regbase = TG3_APE_PER_LOCK_GRANT;
687
688         /* Make sure the driver hasn't any stale locks. */
689         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
690                 switch (i) {
691                 case TG3_APE_LOCK_PHY0:
692                 case TG3_APE_LOCK_PHY1:
693                 case TG3_APE_LOCK_PHY2:
694                 case TG3_APE_LOCK_PHY3:
695                         bit = APE_LOCK_GRANT_DRIVER;
696                         break;
697                 default:
698                         if (!tp->pci_fn)
699                                 bit = APE_LOCK_GRANT_DRIVER;
700                         else
701                                 bit = 1 << tp->pci_fn;
702                 }
703                 tg3_ape_write32(tp, regbase + 4 * i, bit);
704         }
705
706 }
707
708 static int tg3_ape_lock(struct tg3 *tp, int locknum)
709 {
710         int i, off;
711         int ret = 0;
712         u32 status, req, gnt, bit;
713
714         if (!tg3_flag(tp, ENABLE_APE))
715                 return 0;
716
717         switch (locknum) {
718         case TG3_APE_LOCK_GPIO:
719                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
720                         return 0;
721         case TG3_APE_LOCK_GRC:
722         case TG3_APE_LOCK_MEM:
723                 if (!tp->pci_fn)
724                         bit = APE_LOCK_REQ_DRIVER;
725                 else
726                         bit = 1 << tp->pci_fn;
727                 break;
728         case TG3_APE_LOCK_PHY0:
729         case TG3_APE_LOCK_PHY1:
730         case TG3_APE_LOCK_PHY2:
731         case TG3_APE_LOCK_PHY3:
732                 bit = APE_LOCK_REQ_DRIVER;
733                 break;
734         default:
735                 return -EINVAL;
736         }
737
738         if (tg3_asic_rev(tp) == ASIC_REV_5761) {
739                 req = TG3_APE_LOCK_REQ;
740                 gnt = TG3_APE_LOCK_GRANT;
741         } else {
742                 req = TG3_APE_PER_LOCK_REQ;
743                 gnt = TG3_APE_PER_LOCK_GRANT;
744         }
745
746         off = 4 * locknum;
747
748         tg3_ape_write32(tp, req + off, bit);
749
750         /* Wait for up to 1 millisecond to acquire lock. */
751         for (i = 0; i < 100; i++) {
752                 status = tg3_ape_read32(tp, gnt + off);
753                 if (status == bit)
754                         break;
755                 if (pci_channel_offline(tp->pdev))
756                         break;
757
758                 udelay(10);
759         }
760
761         if (status != bit) {
762                 /* Revoke the lock request. */
763                 tg3_ape_write32(tp, gnt + off, bit);
764                 ret = -EBUSY;
765         }
766
767         return ret;
768 }
769
770 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
771 {
772         u32 gnt, bit;
773
774         if (!tg3_flag(tp, ENABLE_APE))
775                 return;
776
777         switch (locknum) {
778         case TG3_APE_LOCK_GPIO:
779                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
780                         return;
781         case TG3_APE_LOCK_GRC:
782         case TG3_APE_LOCK_MEM:
783                 if (!tp->pci_fn)
784                         bit = APE_LOCK_GRANT_DRIVER;
785                 else
786                         bit = 1 << tp->pci_fn;
787                 break;
788         case TG3_APE_LOCK_PHY0:
789         case TG3_APE_LOCK_PHY1:
790         case TG3_APE_LOCK_PHY2:
791         case TG3_APE_LOCK_PHY3:
792                 bit = APE_LOCK_GRANT_DRIVER;
793                 break;
794         default:
795                 return;
796         }
797
798         if (tg3_asic_rev(tp) == ASIC_REV_5761)
799                 gnt = TG3_APE_LOCK_GRANT;
800         else
801                 gnt = TG3_APE_PER_LOCK_GRANT;
802
803         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
804 }
805
806 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
807 {
808         u32 apedata;
809
810         while (timeout_us) {
811                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
812                         return -EBUSY;
813
814                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
815                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
816                         break;
817
818                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
819
820                 udelay(10);
821                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
822         }
823
824         return timeout_us ? 0 : -EBUSY;
825 }
826
827 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
828 {
829         u32 i, apedata;
830
831         for (i = 0; i < timeout_us / 10; i++) {
832                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
833
834                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
835                         break;
836
837                 udelay(10);
838         }
839
840         return i == timeout_us / 10;
841 }
842
843 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
844                                    u32 len)
845 {
846         int err;
847         u32 i, bufoff, msgoff, maxlen, apedata;
848
849         if (!tg3_flag(tp, APE_HAS_NCSI))
850                 return 0;
851
852         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
853         if (apedata != APE_SEG_SIG_MAGIC)
854                 return -ENODEV;
855
856         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
857         if (!(apedata & APE_FW_STATUS_READY))
858                 return -EAGAIN;
859
860         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
861                  TG3_APE_SHMEM_BASE;
862         msgoff = bufoff + 2 * sizeof(u32);
863         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
864
865         while (len) {
866                 u32 length;
867
868                 /* Cap xfer sizes to scratchpad limits. */
869                 length = (len > maxlen) ? maxlen : len;
870                 len -= length;
871
872                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
873                 if (!(apedata & APE_FW_STATUS_READY))
874                         return -EAGAIN;
875
876                 /* Wait for up to 1 msec for APE to service previous event. */
877                 err = tg3_ape_event_lock(tp, 1000);
878                 if (err)
879                         return err;
880
881                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
882                           APE_EVENT_STATUS_SCRTCHPD_READ |
883                           APE_EVENT_STATUS_EVENT_PENDING;
884                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
885
886                 tg3_ape_write32(tp, bufoff, base_off);
887                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
888
889                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
890                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
891
892                 base_off += length;
893
894                 if (tg3_ape_wait_for_event(tp, 30000))
895                         return -EAGAIN;
896
897                 for (i = 0; length; i += 4, length -= 4) {
898                         u32 val = tg3_ape_read32(tp, msgoff + i);
899                         memcpy(data, &val, sizeof(u32));
900                         data++;
901                 }
902         }
903
904         return 0;
905 }
906
907 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
908 {
909         int err;
910         u32 apedata;
911
912         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
913         if (apedata != APE_SEG_SIG_MAGIC)
914                 return -EAGAIN;
915
916         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
917         if (!(apedata & APE_FW_STATUS_READY))
918                 return -EAGAIN;
919
920         /* Wait for up to 1 millisecond for APE to service previous event. */
921         err = tg3_ape_event_lock(tp, 1000);
922         if (err)
923                 return err;
924
925         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
926                         event | APE_EVENT_STATUS_EVENT_PENDING);
927
928         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
929         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
930
931         return 0;
932 }
933
934 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
935 {
936         u32 event;
937         u32 apedata;
938
939         if (!tg3_flag(tp, ENABLE_APE))
940                 return;
941
942         switch (kind) {
943         case RESET_KIND_INIT:
944                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
945                                 APE_HOST_SEG_SIG_MAGIC);
946                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
947                                 APE_HOST_SEG_LEN_MAGIC);
948                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
949                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
950                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
951                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
952                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
953                                 APE_HOST_BEHAV_NO_PHYLOCK);
954                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
955                                     TG3_APE_HOST_DRVR_STATE_START);
956
957                 event = APE_EVENT_STATUS_STATE_START;
958                 break;
959         case RESET_KIND_SHUTDOWN:
960                 /* With the interface we are currently using,
961                  * APE does not track driver state.  Wiping
962                  * out the HOST SEGMENT SIGNATURE forces
963                  * the APE to assume OS absent status.
964                  */
965                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
966
967                 if (device_may_wakeup(&tp->pdev->dev) &&
968                     tg3_flag(tp, WOL_ENABLE)) {
969                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
970                                             TG3_APE_HOST_WOL_SPEED_AUTO);
971                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
972                 } else
973                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
974
975                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
976
977                 event = APE_EVENT_STATUS_STATE_UNLOAD;
978                 break;
979         default:
980                 return;
981         }
982
983         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
984
985         tg3_ape_send_event(tp, event);
986 }
987
988 static void tg3_disable_ints(struct tg3 *tp)
989 {
990         int i;
991
992         tw32(TG3PCI_MISC_HOST_CTRL,
993              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
994         for (i = 0; i < tp->irq_max; i++)
995                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
996 }
997
998 static void tg3_enable_ints(struct tg3 *tp)
999 {
1000         int i;
1001
1002         tp->irq_sync = 0;
1003         wmb();
1004
1005         tw32(TG3PCI_MISC_HOST_CTRL,
1006              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1007
1008         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1009         for (i = 0; i < tp->irq_cnt; i++) {
1010                 struct tg3_napi *tnapi = &tp->napi[i];
1011
1012                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1013                 if (tg3_flag(tp, 1SHOT_MSI))
1014                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1015
1016                 tp->coal_now |= tnapi->coal_now;
1017         }
1018
1019         /* Force an initial interrupt */
1020         if (!tg3_flag(tp, TAGGED_STATUS) &&
1021             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1022                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1023         else
1024                 tw32(HOSTCC_MODE, tp->coal_now);
1025
1026         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1027 }
1028
1029 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1030 {
1031         struct tg3 *tp = tnapi->tp;
1032         struct tg3_hw_status *sblk = tnapi->hw_status;
1033         unsigned int work_exists = 0;
1034
1035         /* check for phy events */
1036         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1037                 if (sblk->status & SD_STATUS_LINK_CHG)
1038                         work_exists = 1;
1039         }
1040
1041         /* check for TX work to do */
1042         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1043                 work_exists = 1;
1044
1045         /* check for RX work to do */
1046         if (tnapi->rx_rcb_prod_idx &&
1047             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1048                 work_exists = 1;
1049
1050         return work_exists;
1051 }
1052
1053 /* tg3_int_reenable
1054  *  similar to tg3_enable_ints, but it accurately determines whether there
1055  *  is new work pending and can return without flushing the PIO write
1056  *  which reenables interrupts
1057  */
1058 static void tg3_int_reenable(struct tg3_napi *tnapi)
1059 {
1060         struct tg3 *tp = tnapi->tp;
1061
1062         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1063         mmiowb();
1064
1065         /* When doing tagged status, this work check is unnecessary.
1066          * The last_tag we write above tells the chip which piece of
1067          * work we've completed.
1068          */
1069         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1070                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1071                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1072 }
1073
1074 static void tg3_switch_clocks(struct tg3 *tp)
1075 {
1076         u32 clock_ctrl;
1077         u32 orig_clock_ctrl;
1078
1079         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1080                 return;
1081
1082         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1083
1084         orig_clock_ctrl = clock_ctrl;
1085         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1086                        CLOCK_CTRL_CLKRUN_OENABLE |
1087                        0x1f);
1088         tp->pci_clock_ctrl = clock_ctrl;
1089
1090         if (tg3_flag(tp, 5705_PLUS)) {
1091                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1092                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1093                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1094                 }
1095         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1096                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1097                             clock_ctrl |
1098                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1099                             40);
1100                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1101                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1102                             40);
1103         }
1104         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1105 }
1106
1107 #define PHY_BUSY_LOOPS  5000
1108
1109 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1110                          u32 *val)
1111 {
1112         u32 frame_val;
1113         unsigned int loops;
1114         int ret;
1115
1116         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1117                 tw32_f(MAC_MI_MODE,
1118                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1119                 udelay(80);
1120         }
1121
1122         tg3_ape_lock(tp, tp->phy_ape_lock);
1123
1124         *val = 0x0;
1125
1126         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1127                       MI_COM_PHY_ADDR_MASK);
1128         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1129                       MI_COM_REG_ADDR_MASK);
1130         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1131
1132         tw32_f(MAC_MI_COM, frame_val);
1133
1134         loops = PHY_BUSY_LOOPS;
1135         while (loops != 0) {
1136                 udelay(10);
1137                 frame_val = tr32(MAC_MI_COM);
1138
1139                 if ((frame_val & MI_COM_BUSY) == 0) {
1140                         udelay(5);
1141                         frame_val = tr32(MAC_MI_COM);
1142                         break;
1143                 }
1144                 loops -= 1;
1145         }
1146
1147         ret = -EBUSY;
1148         if (loops != 0) {
1149                 *val = frame_val & MI_COM_DATA_MASK;
1150                 ret = 0;
1151         }
1152
1153         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1154                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1155                 udelay(80);
1156         }
1157
1158         tg3_ape_unlock(tp, tp->phy_ape_lock);
1159
1160         return ret;
1161 }
1162
1163 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1164 {
1165         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1166 }
1167
1168 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1169                           u32 val)
1170 {
1171         u32 frame_val;
1172         unsigned int loops;
1173         int ret;
1174
1175         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1176             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1177                 return 0;
1178
1179         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1180                 tw32_f(MAC_MI_MODE,
1181                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1182                 udelay(80);
1183         }
1184
1185         tg3_ape_lock(tp, tp->phy_ape_lock);
1186
1187         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1188                       MI_COM_PHY_ADDR_MASK);
1189         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1190                       MI_COM_REG_ADDR_MASK);
1191         frame_val |= (val & MI_COM_DATA_MASK);
1192         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1193
1194         tw32_f(MAC_MI_COM, frame_val);
1195
1196         loops = PHY_BUSY_LOOPS;
1197         while (loops != 0) {
1198                 udelay(10);
1199                 frame_val = tr32(MAC_MI_COM);
1200                 if ((frame_val & MI_COM_BUSY) == 0) {
1201                         udelay(5);
1202                         frame_val = tr32(MAC_MI_COM);
1203                         break;
1204                 }
1205                 loops -= 1;
1206         }
1207
1208         ret = -EBUSY;
1209         if (loops != 0)
1210                 ret = 0;
1211
1212         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1213                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1214                 udelay(80);
1215         }
1216
1217         tg3_ape_unlock(tp, tp->phy_ape_lock);
1218
1219         return ret;
1220 }
1221
1222 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1223 {
1224         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1225 }
1226
1227 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1228 {
1229         int err;
1230
1231         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1232         if (err)
1233                 goto done;
1234
1235         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1236         if (err)
1237                 goto done;
1238
1239         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1240                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1241         if (err)
1242                 goto done;
1243
1244         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1245
1246 done:
1247         return err;
1248 }
1249
1250 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1251 {
1252         int err;
1253
1254         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1255         if (err)
1256                 goto done;
1257
1258         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1259         if (err)
1260                 goto done;
1261
1262         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1263                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1264         if (err)
1265                 goto done;
1266
1267         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1268
1269 done:
1270         return err;
1271 }
1272
1273 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1274 {
1275         int err;
1276
1277         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1278         if (!err)
1279                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1280
1281         return err;
1282 }
1283
1284 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1285 {
1286         int err;
1287
1288         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1289         if (!err)
1290                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1291
1292         return err;
1293 }
1294
1295 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1296 {
1297         int err;
1298
1299         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1300                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1301                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1302         if (!err)
1303                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1304
1305         return err;
1306 }
1307
1308 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1309 {
1310         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1311                 set |= MII_TG3_AUXCTL_MISC_WREN;
1312
1313         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1314 }
1315
1316 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1317 {
1318         u32 val;
1319         int err;
1320
1321         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1322
1323         if (err)
1324                 return err;
1325
1326         if (enable)
1327                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1328         else
1329                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1330
1331         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1332                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1333
1334         return err;
1335 }
1336
1337 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1338 {
1339         return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1340                             reg | val | MII_TG3_MISC_SHDW_WREN);
1341 }
1342
1343 static int tg3_bmcr_reset(struct tg3 *tp)
1344 {
1345         u32 phy_control;
1346         int limit, err;
1347
1348         /* OK, reset it, and poll the BMCR_RESET bit until it
1349          * clears or we time out.
1350          */
1351         phy_control = BMCR_RESET;
1352         err = tg3_writephy(tp, MII_BMCR, phy_control);
1353         if (err != 0)
1354                 return -EBUSY;
1355
1356         limit = 5000;
1357         while (limit--) {
1358                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1359                 if (err != 0)
1360                         return -EBUSY;
1361
1362                 if ((phy_control & BMCR_RESET) == 0) {
1363                         udelay(40);
1364                         break;
1365                 }
1366                 udelay(10);
1367         }
1368         if (limit < 0)
1369                 return -EBUSY;
1370
1371         return 0;
1372 }
1373
1374 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1375 {
1376         struct tg3 *tp = bp->priv;
1377         u32 val;
1378
1379         spin_lock_bh(&tp->lock);
1380
1381         if (__tg3_readphy(tp, mii_id, reg, &val))
1382                 val = -EIO;
1383
1384         spin_unlock_bh(&tp->lock);
1385
1386         return val;
1387 }
1388
1389 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1390 {
1391         struct tg3 *tp = bp->priv;
1392         u32 ret = 0;
1393
1394         spin_lock_bh(&tp->lock);
1395
1396         if (__tg3_writephy(tp, mii_id, reg, val))
1397                 ret = -EIO;
1398
1399         spin_unlock_bh(&tp->lock);
1400
1401         return ret;
1402 }
1403
1404 static int tg3_mdio_reset(struct mii_bus *bp)
1405 {
1406         return 0;
1407 }
1408
1409 static void tg3_mdio_config_5785(struct tg3 *tp)
1410 {
1411         u32 val;
1412         struct phy_device *phydev;
1413
1414         phydev = tp->mdio_bus->phy_map[tp->phy_addr];
1415         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1416         case PHY_ID_BCM50610:
1417         case PHY_ID_BCM50610M:
1418                 val = MAC_PHYCFG2_50610_LED_MODES;
1419                 break;
1420         case PHY_ID_BCMAC131:
1421                 val = MAC_PHYCFG2_AC131_LED_MODES;
1422                 break;
1423         case PHY_ID_RTL8211C:
1424                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1425                 break;
1426         case PHY_ID_RTL8201E:
1427                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1428                 break;
1429         default:
1430                 return;
1431         }
1432
1433         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1434                 tw32(MAC_PHYCFG2, val);
1435
1436                 val = tr32(MAC_PHYCFG1);
1437                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1438                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1439                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1440                 tw32(MAC_PHYCFG1, val);
1441
1442                 return;
1443         }
1444
1445         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1446                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1447                        MAC_PHYCFG2_FMODE_MASK_MASK |
1448                        MAC_PHYCFG2_GMODE_MASK_MASK |
1449                        MAC_PHYCFG2_ACT_MASK_MASK   |
1450                        MAC_PHYCFG2_QUAL_MASK_MASK |
1451                        MAC_PHYCFG2_INBAND_ENABLE;
1452
1453         tw32(MAC_PHYCFG2, val);
1454
1455         val = tr32(MAC_PHYCFG1);
1456         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1457                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1458         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1459                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1460                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1461                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1462                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1463         }
1464         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1465                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1466         tw32(MAC_PHYCFG1, val);
1467
1468         val = tr32(MAC_EXT_RGMII_MODE);
1469         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1470                  MAC_RGMII_MODE_RX_QUALITY |
1471                  MAC_RGMII_MODE_RX_ACTIVITY |
1472                  MAC_RGMII_MODE_RX_ENG_DET |
1473                  MAC_RGMII_MODE_TX_ENABLE |
1474                  MAC_RGMII_MODE_TX_LOWPWR |
1475                  MAC_RGMII_MODE_TX_RESET);
1476         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1477                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1478                         val |= MAC_RGMII_MODE_RX_INT_B |
1479                                MAC_RGMII_MODE_RX_QUALITY |
1480                                MAC_RGMII_MODE_RX_ACTIVITY |
1481                                MAC_RGMII_MODE_RX_ENG_DET;
1482                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1483                         val |= MAC_RGMII_MODE_TX_ENABLE |
1484                                MAC_RGMII_MODE_TX_LOWPWR |
1485                                MAC_RGMII_MODE_TX_RESET;
1486         }
1487         tw32(MAC_EXT_RGMII_MODE, val);
1488 }
1489
1490 static void tg3_mdio_start(struct tg3 *tp)
1491 {
1492         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1493         tw32_f(MAC_MI_MODE, tp->mi_mode);
1494         udelay(80);
1495
1496         if (tg3_flag(tp, MDIOBUS_INITED) &&
1497             tg3_asic_rev(tp) == ASIC_REV_5785)
1498                 tg3_mdio_config_5785(tp);
1499 }
1500
1501 static int tg3_mdio_init(struct tg3 *tp)
1502 {
1503         int i;
1504         u32 reg;
1505         struct phy_device *phydev;
1506
1507         if (tg3_flag(tp, 5717_PLUS)) {
1508                 u32 is_serdes;
1509
1510                 tp->phy_addr = tp->pci_fn + 1;
1511
1512                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1513                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1514                 else
1515                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1516                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1517                 if (is_serdes)
1518                         tp->phy_addr += 7;
1519         } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1520                 int addr;
1521
1522                 addr = ssb_gige_get_phyaddr(tp->pdev);
1523                 if (addr < 0)
1524                         return addr;
1525                 tp->phy_addr = addr;
1526         } else
1527                 tp->phy_addr = TG3_PHY_MII_ADDR;
1528
1529         tg3_mdio_start(tp);
1530
1531         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1532                 return 0;
1533
1534         tp->mdio_bus = mdiobus_alloc();
1535         if (tp->mdio_bus == NULL)
1536                 return -ENOMEM;
1537
1538         tp->mdio_bus->name     = "tg3 mdio bus";
1539         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1540                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1541         tp->mdio_bus->priv     = tp;
1542         tp->mdio_bus->parent   = &tp->pdev->dev;
1543         tp->mdio_bus->read     = &tg3_mdio_read;
1544         tp->mdio_bus->write    = &tg3_mdio_write;
1545         tp->mdio_bus->reset    = &tg3_mdio_reset;
1546         tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1547         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1548
1549         for (i = 0; i < PHY_MAX_ADDR; i++)
1550                 tp->mdio_bus->irq[i] = PHY_POLL;
1551
1552         /* The bus registration will look for all the PHYs on the mdio bus.
1553          * Unfortunately, it does not ensure the PHY is powered up before
1554          * accessing the PHY ID registers.  A chip reset is the
1555          * quickest way to bring the device back to an operational state..
1556          */
1557         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1558                 tg3_bmcr_reset(tp);
1559
1560         i = mdiobus_register(tp->mdio_bus);
1561         if (i) {
1562                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1563                 mdiobus_free(tp->mdio_bus);
1564                 return i;
1565         }
1566
1567         phydev = tp->mdio_bus->phy_map[tp->phy_addr];
1568
1569         if (!phydev || !phydev->drv) {
1570                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1571                 mdiobus_unregister(tp->mdio_bus);
1572                 mdiobus_free(tp->mdio_bus);
1573                 return -ENODEV;
1574         }
1575
1576         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1577         case PHY_ID_BCM57780:
1578                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1579                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1580                 break;
1581         case PHY_ID_BCM50610:
1582         case PHY_ID_BCM50610M:
1583                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1584                                      PHY_BRCM_RX_REFCLK_UNUSED |
1585                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1586                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1587                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1588                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1589                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1590                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1591                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1592                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1593                 /* fallthru */
1594         case PHY_ID_RTL8211C:
1595                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1596                 break;
1597         case PHY_ID_RTL8201E:
1598         case PHY_ID_BCMAC131:
1599                 phydev->interface = PHY_INTERFACE_MODE_MII;
1600                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1601                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1602                 break;
1603         }
1604
1605         tg3_flag_set(tp, MDIOBUS_INITED);
1606
1607         if (tg3_asic_rev(tp) == ASIC_REV_5785)
1608                 tg3_mdio_config_5785(tp);
1609
1610         return 0;
1611 }
1612
1613 static void tg3_mdio_fini(struct tg3 *tp)
1614 {
1615         if (tg3_flag(tp, MDIOBUS_INITED)) {
1616                 tg3_flag_clear(tp, MDIOBUS_INITED);
1617                 mdiobus_unregister(tp->mdio_bus);
1618                 mdiobus_free(tp->mdio_bus);
1619         }
1620 }
1621
1622 /* tp->lock is held. */
1623 static inline void tg3_generate_fw_event(struct tg3 *tp)
1624 {
1625         u32 val;
1626
1627         val = tr32(GRC_RX_CPU_EVENT);
1628         val |= GRC_RX_CPU_DRIVER_EVENT;
1629         tw32_f(GRC_RX_CPU_EVENT, val);
1630
1631         tp->last_event_jiffies = jiffies;
1632 }
1633
1634 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1635
1636 /* tp->lock is held. */
1637 static void tg3_wait_for_event_ack(struct tg3 *tp)
1638 {
1639         int i;
1640         unsigned int delay_cnt;
1641         long time_remain;
1642
1643         /* If enough time has passed, no wait is necessary. */
1644         time_remain = (long)(tp->last_event_jiffies + 1 +
1645                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1646                       (long)jiffies;
1647         if (time_remain < 0)
1648                 return;
1649
1650         /* Check if we can shorten the wait time. */
1651         delay_cnt = jiffies_to_usecs(time_remain);
1652         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1653                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1654         delay_cnt = (delay_cnt >> 3) + 1;
1655
1656         for (i = 0; i < delay_cnt; i++) {
1657                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1658                         break;
1659                 if (pci_channel_offline(tp->pdev))
1660                         break;
1661
1662                 udelay(8);
1663         }
1664 }
1665
1666 /* tp->lock is held. */
1667 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1668 {
1669         u32 reg, val;
1670
1671         val = 0;
1672         if (!tg3_readphy(tp, MII_BMCR, &reg))
1673                 val = reg << 16;
1674         if (!tg3_readphy(tp, MII_BMSR, &reg))
1675                 val |= (reg & 0xffff);
1676         *data++ = val;
1677
1678         val = 0;
1679         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1680                 val = reg << 16;
1681         if (!tg3_readphy(tp, MII_LPA, &reg))
1682                 val |= (reg & 0xffff);
1683         *data++ = val;
1684
1685         val = 0;
1686         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1687                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1688                         val = reg << 16;
1689                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1690                         val |= (reg & 0xffff);
1691         }
1692         *data++ = val;
1693
1694         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1695                 val = reg << 16;
1696         else
1697                 val = 0;
1698         *data++ = val;
1699 }
1700
1701 /* tp->lock is held. */
1702 static void tg3_ump_link_report(struct tg3 *tp)
1703 {
1704         u32 data[4];
1705
1706         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1707                 return;
1708
1709         tg3_phy_gather_ump_data(tp, data);
1710
1711         tg3_wait_for_event_ack(tp);
1712
1713         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1714         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1715         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1716         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1717         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1718         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1719
1720         tg3_generate_fw_event(tp);
1721 }
1722
1723 /* tp->lock is held. */
1724 static void tg3_stop_fw(struct tg3 *tp)
1725 {
1726         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1727                 /* Wait for RX cpu to ACK the previous event. */
1728                 tg3_wait_for_event_ack(tp);
1729
1730                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1731
1732                 tg3_generate_fw_event(tp);
1733
1734                 /* Wait for RX cpu to ACK this event. */
1735                 tg3_wait_for_event_ack(tp);
1736         }
1737 }
1738
1739 /* tp->lock is held. */
1740 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1741 {
1742         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1743                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1744
1745         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1746                 switch (kind) {
1747                 case RESET_KIND_INIT:
1748                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1749                                       DRV_STATE_START);
1750                         break;
1751
1752                 case RESET_KIND_SHUTDOWN:
1753                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1754                                       DRV_STATE_UNLOAD);
1755                         break;
1756
1757                 case RESET_KIND_SUSPEND:
1758                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1759                                       DRV_STATE_SUSPEND);
1760                         break;
1761
1762                 default:
1763                         break;
1764                 }
1765         }
1766 }
1767
1768 /* tp->lock is held. */
1769 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1770 {
1771         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1772                 switch (kind) {
1773                 case RESET_KIND_INIT:
1774                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1775                                       DRV_STATE_START_DONE);
1776                         break;
1777
1778                 case RESET_KIND_SHUTDOWN:
1779                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1780                                       DRV_STATE_UNLOAD_DONE);
1781                         break;
1782
1783                 default:
1784                         break;
1785                 }
1786         }
1787 }
1788
1789 /* tp->lock is held. */
1790 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1791 {
1792         if (tg3_flag(tp, ENABLE_ASF)) {
1793                 switch (kind) {
1794                 case RESET_KIND_INIT:
1795                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1796                                       DRV_STATE_START);
1797                         break;
1798
1799                 case RESET_KIND_SHUTDOWN:
1800                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1801                                       DRV_STATE_UNLOAD);
1802                         break;
1803
1804                 case RESET_KIND_SUSPEND:
1805                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1806                                       DRV_STATE_SUSPEND);
1807                         break;
1808
1809                 default:
1810                         break;
1811                 }
1812         }
1813 }
1814
1815 static int tg3_poll_fw(struct tg3 *tp)
1816 {
1817         int i;
1818         u32 val;
1819
1820         if (tg3_flag(tp, NO_FWARE_REPORTED))
1821                 return 0;
1822
1823         if (tg3_flag(tp, IS_SSB_CORE)) {
1824                 /* We don't use firmware. */
1825                 return 0;
1826         }
1827
1828         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1829                 /* Wait up to 20ms for init done. */
1830                 for (i = 0; i < 200; i++) {
1831                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1832                                 return 0;
1833                         if (pci_channel_offline(tp->pdev))
1834                                 return -ENODEV;
1835
1836                         udelay(100);
1837                 }
1838                 return -ENODEV;
1839         }
1840
1841         /* Wait for firmware initialization to complete. */
1842         for (i = 0; i < 100000; i++) {
1843                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1844                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1845                         break;
1846                 if (pci_channel_offline(tp->pdev)) {
1847                         if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1848                                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1849                                 netdev_info(tp->dev, "No firmware running\n");
1850                         }
1851
1852                         break;
1853                 }
1854
1855                 udelay(10);
1856         }
1857
1858         /* Chip might not be fitted with firmware.  Some Sun onboard
1859          * parts are configured like that.  So don't signal the timeout
1860          * of the above loop as an error, but do report the lack of
1861          * running firmware once.
1862          */
1863         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1864                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1865
1866                 netdev_info(tp->dev, "No firmware running\n");
1867         }
1868
1869         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1870                 /* The 57765 A0 needs a little more
1871                  * time to do some important work.
1872                  */
1873                 mdelay(10);
1874         }
1875
1876         return 0;
1877 }
1878
1879 static void tg3_link_report(struct tg3 *tp)
1880 {
1881         if (!netif_carrier_ok(tp->dev)) {
1882                 netif_info(tp, link, tp->dev, "Link is down\n");
1883                 tg3_ump_link_report(tp);
1884         } else if (netif_msg_link(tp)) {
1885                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1886                             (tp->link_config.active_speed == SPEED_1000 ?
1887                              1000 :
1888                              (tp->link_config.active_speed == SPEED_100 ?
1889                               100 : 10)),
1890                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1891                              "full" : "half"));
1892
1893                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1894                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1895                             "on" : "off",
1896                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1897                             "on" : "off");
1898
1899                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1900                         netdev_info(tp->dev, "EEE is %s\n",
1901                                     tp->setlpicnt ? "enabled" : "disabled");
1902
1903                 tg3_ump_link_report(tp);
1904         }
1905
1906         tp->link_up = netif_carrier_ok(tp->dev);
1907 }
1908
1909 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1910 {
1911         u32 flowctrl = 0;
1912
1913         if (adv & ADVERTISE_PAUSE_CAP) {
1914                 flowctrl |= FLOW_CTRL_RX;
1915                 if (!(adv & ADVERTISE_PAUSE_ASYM))
1916                         flowctrl |= FLOW_CTRL_TX;
1917         } else if (adv & ADVERTISE_PAUSE_ASYM)
1918                 flowctrl |= FLOW_CTRL_TX;
1919
1920         return flowctrl;
1921 }
1922
1923 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1924 {
1925         u16 miireg;
1926
1927         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1928                 miireg = ADVERTISE_1000XPAUSE;
1929         else if (flow_ctrl & FLOW_CTRL_TX)
1930                 miireg = ADVERTISE_1000XPSE_ASYM;
1931         else if (flow_ctrl & FLOW_CTRL_RX)
1932                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1933         else
1934                 miireg = 0;
1935
1936         return miireg;
1937 }
1938
1939 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1940 {
1941         u32 flowctrl = 0;
1942
1943         if (adv & ADVERTISE_1000XPAUSE) {
1944                 flowctrl |= FLOW_CTRL_RX;
1945                 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1946                         flowctrl |= FLOW_CTRL_TX;
1947         } else if (adv & ADVERTISE_1000XPSE_ASYM)
1948                 flowctrl |= FLOW_CTRL_TX;
1949
1950         return flowctrl;
1951 }
1952
1953 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1954 {
1955         u8 cap = 0;
1956
1957         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1958                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1959         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1960                 if (lcladv & ADVERTISE_1000XPAUSE)
1961                         cap = FLOW_CTRL_RX;
1962                 if (rmtadv & ADVERTISE_1000XPAUSE)
1963                         cap = FLOW_CTRL_TX;
1964         }
1965
1966         return cap;
1967 }
1968
1969 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1970 {
1971         u8 autoneg;
1972         u8 flowctrl = 0;
1973         u32 old_rx_mode = tp->rx_mode;
1974         u32 old_tx_mode = tp->tx_mode;
1975
1976         if (tg3_flag(tp, USE_PHYLIB))
1977                 autoneg = tp->mdio_bus->phy_map[tp->phy_addr]->autoneg;
1978         else
1979                 autoneg = tp->link_config.autoneg;
1980
1981         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1982                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1983                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1984                 else
1985                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1986         } else
1987                 flowctrl = tp->link_config.flowctrl;
1988
1989         tp->link_config.active_flowctrl = flowctrl;
1990
1991         if (flowctrl & FLOW_CTRL_RX)
1992                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1993         else
1994                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1995
1996         if (old_rx_mode != tp->rx_mode)
1997                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1998
1999         if (flowctrl & FLOW_CTRL_TX)
2000                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
2001         else
2002                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
2003
2004         if (old_tx_mode != tp->tx_mode)
2005                 tw32_f(MAC_TX_MODE, tp->tx_mode);
2006 }
2007
2008 static void tg3_adjust_link(struct net_device *dev)
2009 {
2010         u8 oldflowctrl, linkmesg = 0;
2011         u32 mac_mode, lcl_adv, rmt_adv;
2012         struct tg3 *tp = netdev_priv(dev);
2013         struct phy_device *phydev = tp->mdio_bus->phy_map[tp->phy_addr];
2014
2015         spin_lock_bh(&tp->lock);
2016
2017         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2018                                     MAC_MODE_HALF_DUPLEX);
2019
2020         oldflowctrl = tp->link_config.active_flowctrl;
2021
2022         if (phydev->link) {
2023                 lcl_adv = 0;
2024                 rmt_adv = 0;
2025
2026                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2027                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2028                 else if (phydev->speed == SPEED_1000 ||
2029                          tg3_asic_rev(tp) != ASIC_REV_5785)
2030                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
2031                 else
2032                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2033
2034                 if (phydev->duplex == DUPLEX_HALF)
2035                         mac_mode |= MAC_MODE_HALF_DUPLEX;
2036                 else {
2037                         lcl_adv = mii_advertise_flowctrl(
2038                                   tp->link_config.flowctrl);
2039
2040                         if (phydev->pause)
2041                                 rmt_adv = LPA_PAUSE_CAP;
2042                         if (phydev->asym_pause)
2043                                 rmt_adv |= LPA_PAUSE_ASYM;
2044                 }
2045
2046                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2047         } else
2048                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2049
2050         if (mac_mode != tp->mac_mode) {
2051                 tp->mac_mode = mac_mode;
2052                 tw32_f(MAC_MODE, tp->mac_mode);
2053                 udelay(40);
2054         }
2055
2056         if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2057                 if (phydev->speed == SPEED_10)
2058                         tw32(MAC_MI_STAT,
2059                              MAC_MI_STAT_10MBPS_MODE |
2060                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2061                 else
2062                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2063         }
2064
2065         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2066                 tw32(MAC_TX_LENGTHS,
2067                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2068                       (6 << TX_LENGTHS_IPG_SHIFT) |
2069                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2070         else
2071                 tw32(MAC_TX_LENGTHS,
2072                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2073                       (6 << TX_LENGTHS_IPG_SHIFT) |
2074                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2075
2076         if (phydev->link != tp->old_link ||
2077             phydev->speed != tp->link_config.active_speed ||
2078             phydev->duplex != tp->link_config.active_duplex ||
2079             oldflowctrl != tp->link_config.active_flowctrl)
2080                 linkmesg = 1;
2081
2082         tp->old_link = phydev->link;
2083         tp->link_config.active_speed = phydev->speed;
2084         tp->link_config.active_duplex = phydev->duplex;
2085
2086         spin_unlock_bh(&tp->lock);
2087
2088         if (linkmesg)
2089                 tg3_link_report(tp);
2090 }
2091
2092 static int tg3_phy_init(struct tg3 *tp)
2093 {
2094         struct phy_device *phydev;
2095
2096         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2097                 return 0;
2098
2099         /* Bring the PHY back to a known state. */
2100         tg3_bmcr_reset(tp);
2101
2102         phydev = tp->mdio_bus->phy_map[tp->phy_addr];
2103
2104         /* Attach the MAC to the PHY. */
2105         phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2106                              tg3_adjust_link, phydev->interface);
2107         if (IS_ERR(phydev)) {
2108                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2109                 return PTR_ERR(phydev);
2110         }
2111
2112         /* Mask with MAC supported features. */
2113         switch (phydev->interface) {
2114         case PHY_INTERFACE_MODE_GMII:
2115         case PHY_INTERFACE_MODE_RGMII:
2116                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2117                         phydev->supported &= (PHY_GBIT_FEATURES |
2118                                               SUPPORTED_Pause |
2119                                               SUPPORTED_Asym_Pause);
2120                         break;
2121                 }
2122                 /* fallthru */
2123         case PHY_INTERFACE_MODE_MII:
2124                 phydev->supported &= (PHY_BASIC_FEATURES |
2125                                       SUPPORTED_Pause |
2126                                       SUPPORTED_Asym_Pause);
2127                 break;
2128         default:
2129                 phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
2130                 return -EINVAL;
2131         }
2132
2133         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2134
2135         phydev->advertising = phydev->supported;
2136
2137         return 0;
2138 }
2139
2140 static void tg3_phy_start(struct tg3 *tp)
2141 {
2142         struct phy_device *phydev;
2143
2144         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2145                 return;
2146
2147         phydev = tp->mdio_bus->phy_map[tp->phy_addr];
2148
2149         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2150                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2151                 phydev->speed = tp->link_config.speed;
2152                 phydev->duplex = tp->link_config.duplex;
2153                 phydev->autoneg = tp->link_config.autoneg;
2154                 phydev->advertising = tp->link_config.advertising;
2155         }
2156
2157         phy_start(phydev);
2158
2159         phy_start_aneg(phydev);
2160 }
2161
2162 static void tg3_phy_stop(struct tg3 *tp)
2163 {
2164         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2165                 return;
2166
2167         phy_stop(tp->mdio_bus->phy_map[tp->phy_addr]);
2168 }
2169
2170 static void tg3_phy_fini(struct tg3 *tp)
2171 {
2172         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2173                 phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
2174                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2175         }
2176 }
2177
2178 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2179 {
2180         int err;
2181         u32 val;
2182
2183         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2184                 return 0;
2185
2186         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2187                 /* Cannot do read-modify-write on 5401 */
2188                 err = tg3_phy_auxctl_write(tp,
2189                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2190                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2191                                            0x4c20);
2192                 goto done;
2193         }
2194
2195         err = tg3_phy_auxctl_read(tp,
2196                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2197         if (err)
2198                 return err;
2199
2200         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2201         err = tg3_phy_auxctl_write(tp,
2202                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2203
2204 done:
2205         return err;
2206 }
2207
2208 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2209 {
2210         u32 phytest;
2211
2212         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2213                 u32 phy;
2214
2215                 tg3_writephy(tp, MII_TG3_FET_TEST,
2216                              phytest | MII_TG3_FET_SHADOW_EN);
2217                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2218                         if (enable)
2219                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2220                         else
2221                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2222                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2223                 }
2224                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2225         }
2226 }
2227
2228 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2229 {
2230         u32 reg;
2231
2232         if (!tg3_flag(tp, 5705_PLUS) ||
2233             (tg3_flag(tp, 5717_PLUS) &&
2234              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2235                 return;
2236
2237         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2238                 tg3_phy_fet_toggle_apd(tp, enable);
2239                 return;
2240         }
2241
2242         reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2243               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2244               MII_TG3_MISC_SHDW_SCR5_SDTL |
2245               MII_TG3_MISC_SHDW_SCR5_C125OE;
2246         if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2247                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2248
2249         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2250
2251
2252         reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2253         if (enable)
2254                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2255
2256         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2257 }
2258
2259 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2260 {
2261         u32 phy;
2262
2263         if (!tg3_flag(tp, 5705_PLUS) ||
2264             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2265                 return;
2266
2267         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2268                 u32 ephy;
2269
2270                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2271                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2272
2273                         tg3_writephy(tp, MII_TG3_FET_TEST,
2274                                      ephy | MII_TG3_FET_SHADOW_EN);
2275                         if (!tg3_readphy(tp, reg, &phy)) {
2276                                 if (enable)
2277                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2278                                 else
2279                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2280                                 tg3_writephy(tp, reg, phy);
2281                         }
2282                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2283                 }
2284         } else {
2285                 int ret;
2286
2287                 ret = tg3_phy_auxctl_read(tp,
2288                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2289                 if (!ret) {
2290                         if (enable)
2291                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2292                         else
2293                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2294                         tg3_phy_auxctl_write(tp,
2295                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2296                 }
2297         }
2298 }
2299
2300 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2301 {
2302         int ret;
2303         u32 val;
2304
2305         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2306                 return;
2307
2308         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2309         if (!ret)
2310                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2311                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2312 }
2313
2314 static void tg3_phy_apply_otp(struct tg3 *tp)
2315 {
2316         u32 otp, phy;
2317
2318         if (!tp->phy_otp)
2319                 return;
2320
2321         otp = tp->phy_otp;
2322
2323         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2324                 return;
2325
2326         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2327         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2328         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2329
2330         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2331               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2332         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2333
2334         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2335         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2336         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2337
2338         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2339         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2340
2341         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2342         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2343
2344         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2345               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2346         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2347
2348         tg3_phy_toggle_auxctl_smdsp(tp, false);
2349 }
2350
2351 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2352 {
2353         u32 val;
2354         struct ethtool_eee *dest = &tp->eee;
2355
2356         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2357                 return;
2358
2359         if (eee)
2360                 dest = eee;
2361
2362         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2363                 return;
2364
2365         /* Pull eee_active */
2366         if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2367             val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2368                 dest->eee_active = 1;
2369         } else
2370                 dest->eee_active = 0;
2371
2372         /* Pull lp advertised settings */
2373         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2374                 return;
2375         dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2376
2377         /* Pull advertised and eee_enabled settings */
2378         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2379                 return;
2380         dest->eee_enabled = !!val;
2381         dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2382
2383         /* Pull tx_lpi_enabled */
2384         val = tr32(TG3_CPMU_EEE_MODE);
2385         dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2386
2387         /* Pull lpi timer value */
2388         dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2389 }
2390
2391 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2392 {
2393         u32 val;
2394
2395         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2396                 return;
2397
2398         tp->setlpicnt = 0;
2399
2400         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2401             current_link_up &&
2402             tp->link_config.active_duplex == DUPLEX_FULL &&
2403             (tp->link_config.active_speed == SPEED_100 ||
2404              tp->link_config.active_speed == SPEED_1000)) {
2405                 u32 eeectl;
2406
2407                 if (tp->link_config.active_speed == SPEED_1000)
2408                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2409                 else
2410                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2411
2412                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2413
2414                 tg3_eee_pull_config(tp, NULL);
2415                 if (tp->eee.eee_active)
2416                         tp->setlpicnt = 2;
2417         }
2418
2419         if (!tp->setlpicnt) {
2420                 if (current_link_up &&
2421                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2422                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2423                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2424                 }
2425
2426                 val = tr32(TG3_CPMU_EEE_MODE);
2427                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2428         }
2429 }
2430
2431 static void tg3_phy_eee_enable(struct tg3 *tp)
2432 {
2433         u32 val;
2434
2435         if (tp->link_config.active_speed == SPEED_1000 &&
2436             (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2437              tg3_asic_rev(tp) == ASIC_REV_5719 ||
2438              tg3_flag(tp, 57765_CLASS)) &&
2439             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2440                 val = MII_TG3_DSP_TAP26_ALNOKO |
2441                       MII_TG3_DSP_TAP26_RMRXSTO;
2442                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2443                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2444         }
2445
2446         val = tr32(TG3_CPMU_EEE_MODE);
2447         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2448 }
2449
2450 static int tg3_wait_macro_done(struct tg3 *tp)
2451 {
2452         int limit = 100;
2453
2454         while (limit--) {
2455                 u32 tmp32;
2456
2457                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2458                         if ((tmp32 & 0x1000) == 0)
2459                                 break;
2460                 }
2461         }
2462         if (limit < 0)
2463                 return -EBUSY;
2464
2465         return 0;
2466 }
2467
2468 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2469 {
2470         static const u32 test_pat[4][6] = {
2471         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2472         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2473         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2474         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2475         };
2476         int chan;
2477
2478         for (chan = 0; chan < 4; chan++) {
2479                 int i;
2480
2481                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2482                              (chan * 0x2000) | 0x0200);
2483                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2484
2485                 for (i = 0; i < 6; i++)
2486                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2487                                      test_pat[chan][i]);
2488
2489                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2490                 if (tg3_wait_macro_done(tp)) {
2491                         *resetp = 1;
2492                         return -EBUSY;
2493                 }
2494
2495                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2496                              (chan * 0x2000) | 0x0200);
2497                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2498                 if (tg3_wait_macro_done(tp)) {
2499                         *resetp = 1;
2500                         return -EBUSY;
2501                 }
2502
2503                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2504                 if (tg3_wait_macro_done(tp)) {
2505                         *resetp = 1;
2506                         return -EBUSY;
2507                 }
2508
2509                 for (i = 0; i < 6; i += 2) {
2510                         u32 low, high;
2511
2512                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2513                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2514                             tg3_wait_macro_done(tp)) {
2515                                 *resetp = 1;
2516                                 return -EBUSY;
2517                         }
2518                         low &= 0x7fff;
2519                         high &= 0x000f;
2520                         if (low != test_pat[chan][i] ||
2521                             high != test_pat[chan][i+1]) {
2522                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2523                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2524                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2525
2526                                 return -EBUSY;
2527                         }
2528                 }
2529         }
2530
2531         return 0;
2532 }
2533
2534 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2535 {
2536         int chan;
2537
2538         for (chan = 0; chan < 4; chan++) {
2539                 int i;
2540
2541                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2542                              (chan * 0x2000) | 0x0200);
2543                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2544                 for (i = 0; i < 6; i++)
2545                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2546                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2547                 if (tg3_wait_macro_done(tp))
2548                         return -EBUSY;
2549         }
2550
2551         return 0;
2552 }
2553
2554 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2555 {
2556         u32 reg32, phy9_orig;
2557         int retries, do_phy_reset, err;
2558
2559         retries = 10;
2560         do_phy_reset = 1;
2561         do {
2562                 if (do_phy_reset) {
2563                         err = tg3_bmcr_reset(tp);
2564                         if (err)
2565                                 return err;
2566                         do_phy_reset = 0;
2567                 }
2568
2569                 /* Disable transmitter and interrupt.  */
2570                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2571                         continue;
2572
2573                 reg32 |= 0x3000;
2574                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2575
2576                 /* Set full-duplex, 1000 mbps.  */
2577                 tg3_writephy(tp, MII_BMCR,
2578                              BMCR_FULLDPLX | BMCR_SPEED1000);
2579
2580                 /* Set to master mode.  */
2581                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2582                         continue;
2583
2584                 tg3_writephy(tp, MII_CTRL1000,
2585                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2586
2587                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2588                 if (err)
2589                         return err;
2590
2591                 /* Block the PHY control access.  */
2592                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2593
2594                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2595                 if (!err)
2596                         break;
2597         } while (--retries);
2598
2599         err = tg3_phy_reset_chanpat(tp);
2600         if (err)
2601                 return err;
2602
2603         tg3_phydsp_write(tp, 0x8005, 0x0000);
2604
2605         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2606         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2607
2608         tg3_phy_toggle_auxctl_smdsp(tp, false);
2609
2610         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2611
2612         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2613                 reg32 &= ~0x3000;
2614                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2615         } else if (!err)
2616                 err = -EBUSY;
2617
2618         return err;
2619 }
2620
2621 static void tg3_carrier_off(struct tg3 *tp)
2622 {
2623         netif_carrier_off(tp->dev);
2624         tp->link_up = false;
2625 }
2626
2627 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2628 {
2629         if (tg3_flag(tp, ENABLE_ASF))
2630                 netdev_warn(tp->dev,
2631                             "Management side-band traffic will be interrupted during phy settings change\n");
2632 }
2633
2634 /* This will reset the tigon3 PHY if there is no valid
2635  * link unless the FORCE argument is non-zero.
2636  */
2637 static int tg3_phy_reset(struct tg3 *tp)
2638 {
2639         u32 val, cpmuctrl;
2640         int err;
2641
2642         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2643                 val = tr32(GRC_MISC_CFG);
2644                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2645                 udelay(40);
2646         }
2647         err  = tg3_readphy(tp, MII_BMSR, &val);
2648         err |= tg3_readphy(tp, MII_BMSR, &val);
2649         if (err != 0)
2650                 return -EBUSY;
2651
2652         if (netif_running(tp->dev) && tp->link_up) {
2653                 netif_carrier_off(tp->dev);
2654                 tg3_link_report(tp);
2655         }
2656
2657         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2658             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2659             tg3_asic_rev(tp) == ASIC_REV_5705) {
2660                 err = tg3_phy_reset_5703_4_5(tp);
2661                 if (err)
2662                         return err;
2663                 goto out;
2664         }
2665
2666         cpmuctrl = 0;
2667         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2668             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2669                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2670                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2671                         tw32(TG3_CPMU_CTRL,
2672                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2673         }
2674
2675         err = tg3_bmcr_reset(tp);
2676         if (err)
2677                 return err;
2678
2679         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2680                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2681                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2682
2683                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2684         }
2685
2686         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2687             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2688                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2689                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2690                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2691                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2692                         udelay(40);
2693                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2694                 }
2695         }
2696
2697         if (tg3_flag(tp, 5717_PLUS) &&
2698             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2699                 return 0;
2700
2701         tg3_phy_apply_otp(tp);
2702
2703         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2704                 tg3_phy_toggle_apd(tp, true);
2705         else
2706                 tg3_phy_toggle_apd(tp, false);
2707
2708 out:
2709         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2710             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2711                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2712                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2713                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2714         }
2715
2716         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2717                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2718                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2719         }
2720
2721         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2722                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2723                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2724                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2725                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2726                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2727                 }
2728         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2729                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2730                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2731                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2732                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2733                                 tg3_writephy(tp, MII_TG3_TEST1,
2734                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2735                         } else
2736                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2737
2738                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2739                 }
2740         }
2741
2742         /* Set Extended packet length bit (bit 14) on all chips that */
2743         /* support jumbo frames */
2744         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2745                 /* Cannot do read-modify-write on 5401 */
2746                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2747         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2748                 /* Set bit 14 with read-modify-write to preserve other bits */
2749                 err = tg3_phy_auxctl_read(tp,
2750                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2751                 if (!err)
2752                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2753                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2754         }
2755
2756         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2757          * jumbo frames transmission.
2758          */
2759         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2760                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2761                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2762                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2763         }
2764
2765         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2766                 /* adjust output voltage */
2767                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2768         }
2769
2770         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2771                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2772
2773         tg3_phy_toggle_automdix(tp, true);
2774         tg3_phy_set_wirespeed(tp);
2775         return 0;
2776 }
2777
2778 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2779 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2780 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2781                                           TG3_GPIO_MSG_NEED_VAUX)
2782 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2783         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2784          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2785          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2786          (TG3_GPIO_MSG_DRVR_PRES << 12))
2787
2788 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2789         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2790          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2791          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2792          (TG3_GPIO_MSG_NEED_VAUX << 12))
2793
2794 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2795 {
2796         u32 status, shift;
2797
2798         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2799             tg3_asic_rev(tp) == ASIC_REV_5719)
2800                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2801         else
2802                 status = tr32(TG3_CPMU_DRV_STATUS);
2803
2804         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2805         status &= ~(TG3_GPIO_MSG_MASK << shift);
2806         status |= (newstat << shift);
2807
2808         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2809             tg3_asic_rev(tp) == ASIC_REV_5719)
2810                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2811         else
2812                 tw32(TG3_CPMU_DRV_STATUS, status);
2813
2814         return status >> TG3_APE_GPIO_MSG_SHIFT;
2815 }
2816
2817 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2818 {
2819         if (!tg3_flag(tp, IS_NIC))
2820                 return 0;
2821
2822         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2823             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2824             tg3_asic_rev(tp) == ASIC_REV_5720) {
2825                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2826                         return -EIO;
2827
2828                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2829
2830                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2831                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2832
2833                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2834         } else {
2835                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2836                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2837         }
2838
2839         return 0;
2840 }
2841
2842 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2843 {
2844         u32 grc_local_ctrl;
2845
2846         if (!tg3_flag(tp, IS_NIC) ||
2847             tg3_asic_rev(tp) == ASIC_REV_5700 ||
2848             tg3_asic_rev(tp) == ASIC_REV_5701)
2849                 return;
2850
2851         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2852
2853         tw32_wait_f(GRC_LOCAL_CTRL,
2854                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2855                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2856
2857         tw32_wait_f(GRC_LOCAL_CTRL,
2858                     grc_local_ctrl,
2859                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2860
2861         tw32_wait_f(GRC_LOCAL_CTRL,
2862                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2863                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2864 }
2865
2866 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2867 {
2868         if (!tg3_flag(tp, IS_NIC))
2869                 return;
2870
2871         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2872             tg3_asic_rev(tp) == ASIC_REV_5701) {
2873                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2874                             (GRC_LCLCTRL_GPIO_OE0 |
2875                              GRC_LCLCTRL_GPIO_OE1 |
2876                              GRC_LCLCTRL_GPIO_OE2 |
2877                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2878                              GRC_LCLCTRL_GPIO_OUTPUT1),
2879                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2880         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2881                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2882                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2883                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2884                                      GRC_LCLCTRL_GPIO_OE1 |
2885                                      GRC_LCLCTRL_GPIO_OE2 |
2886                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2887                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2888                                      tp->grc_local_ctrl;
2889                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2890                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2891
2892                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2893                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2894                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2895
2896                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2897                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2898                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2899         } else {
2900                 u32 no_gpio2;
2901                 u32 grc_local_ctrl = 0;
2902
2903                 /* Workaround to prevent overdrawing Amps. */
2904                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2905                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2906                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2907                                     grc_local_ctrl,
2908                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2909                 }
2910
2911                 /* On 5753 and variants, GPIO2 cannot be used. */
2912                 no_gpio2 = tp->nic_sram_data_cfg &
2913                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2914
2915                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2916                                   GRC_LCLCTRL_GPIO_OE1 |
2917                                   GRC_LCLCTRL_GPIO_OE2 |
2918                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2919                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2920                 if (no_gpio2) {
2921                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2922                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2923                 }
2924                 tw32_wait_f(GRC_LOCAL_CTRL,
2925                             tp->grc_local_ctrl | grc_local_ctrl,
2926                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2927
2928                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2929
2930                 tw32_wait_f(GRC_LOCAL_CTRL,
2931                             tp->grc_local_ctrl | grc_local_ctrl,
2932                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2933
2934                 if (!no_gpio2) {
2935                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2936                         tw32_wait_f(GRC_LOCAL_CTRL,
2937                                     tp->grc_local_ctrl | grc_local_ctrl,
2938                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2939                 }
2940         }
2941 }
2942
2943 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2944 {
2945         u32 msg = 0;
2946
2947         /* Serialize power state transitions */
2948         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2949                 return;
2950
2951         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2952                 msg = TG3_GPIO_MSG_NEED_VAUX;
2953
2954         msg = tg3_set_function_status(tp, msg);
2955
2956         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2957                 goto done;
2958
2959         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2960                 tg3_pwrsrc_switch_to_vaux(tp);
2961         else
2962                 tg3_pwrsrc_die_with_vmain(tp);
2963
2964 done:
2965         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2966 }
2967
2968 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2969 {
2970         bool need_vaux = false;
2971
2972         /* The GPIOs do something completely different on 57765. */
2973         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2974                 return;
2975
2976         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2977             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2978             tg3_asic_rev(tp) == ASIC_REV_5720) {
2979                 tg3_frob_aux_power_5717(tp, include_wol ?
2980                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2981                 return;
2982         }
2983
2984         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2985                 struct net_device *dev_peer;
2986
2987                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2988
2989                 /* remove_one() may have been run on the peer. */
2990                 if (dev_peer) {
2991                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2992
2993                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2994                                 return;
2995
2996                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2997                             tg3_flag(tp_peer, ENABLE_ASF))
2998                                 need_vaux = true;
2999                 }
3000         }
3001
3002         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
3003             tg3_flag(tp, ENABLE_ASF))
3004                 need_vaux = true;
3005
3006         if (need_vaux)
3007                 tg3_pwrsrc_switch_to_vaux(tp);
3008         else
3009                 tg3_pwrsrc_die_with_vmain(tp);
3010 }
3011
3012 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3013 {
3014         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3015                 return 1;
3016         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3017                 if (speed != SPEED_10)
3018                         return 1;
3019         } else if (speed == SPEED_10)
3020                 return 1;
3021
3022         return 0;
3023 }
3024
3025 static bool tg3_phy_power_bug(struct tg3 *tp)
3026 {
3027         switch (tg3_asic_rev(tp)) {
3028         case ASIC_REV_5700:
3029         case ASIC_REV_5704:
3030                 return true;
3031         case ASIC_REV_5780:
3032                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3033                         return true;
3034                 return false;
3035         case ASIC_REV_5717:
3036                 if (!tp->pci_fn)
3037                         return true;
3038                 return false;
3039         case ASIC_REV_5719:
3040         case ASIC_REV_5720:
3041                 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3042                     !tp->pci_fn)
3043                         return true;
3044                 return false;
3045         }
3046
3047         return false;
3048 }
3049
3050 static bool tg3_phy_led_bug(struct tg3 *tp)
3051 {
3052         switch (tg3_asic_rev(tp)) {
3053         case ASIC_REV_5719:
3054         case ASIC_REV_5720:
3055                 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3056                     !tp->pci_fn)
3057                         return true;
3058                 return false;
3059         }
3060
3061         return false;
3062 }
3063
3064 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3065 {
3066         u32 val;
3067
3068         if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3069                 return;
3070
3071         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3072                 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3073                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3074                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3075
3076                         sg_dig_ctrl |=
3077                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3078                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
3079                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3080                 }
3081                 return;
3082         }
3083
3084         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3085                 tg3_bmcr_reset(tp);
3086                 val = tr32(GRC_MISC_CFG);
3087                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3088                 udelay(40);
3089                 return;
3090         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3091                 u32 phytest;
3092                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3093                         u32 phy;
3094
3095                         tg3_writephy(tp, MII_ADVERTISE, 0);
3096                         tg3_writephy(tp, MII_BMCR,
3097                                      BMCR_ANENABLE | BMCR_ANRESTART);
3098
3099                         tg3_writephy(tp, MII_TG3_FET_TEST,
3100                                      phytest | MII_TG3_FET_SHADOW_EN);
3101                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3102                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3103                                 tg3_writephy(tp,
3104                                              MII_TG3_FET_SHDW_AUXMODE4,
3105                                              phy);
3106                         }
3107                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3108                 }
3109                 return;
3110         } else if (do_low_power) {
3111                 if (!tg3_phy_led_bug(tp))
3112                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3113                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3114
3115                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3116                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3117                       MII_TG3_AUXCTL_PCTL_VREG_11V;
3118                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3119         }
3120
3121         /* The PHY should not be powered down on some chips because
3122          * of bugs.
3123          */
3124         if (tg3_phy_power_bug(tp))
3125                 return;
3126
3127         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3128             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3129                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3130                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3131                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3132                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3133         }
3134
3135         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3136 }
3137
3138 /* tp->lock is held. */
3139 static int tg3_nvram_lock(struct tg3 *tp)
3140 {
3141         if (tg3_flag(tp, NVRAM)) {
3142                 int i;
3143
3144                 if (tp->nvram_lock_cnt == 0) {
3145                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3146                         for (i = 0; i < 8000; i++) {
3147                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3148                                         break;
3149                                 udelay(20);
3150                         }
3151                         if (i == 8000) {
3152                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3153                                 return -ENODEV;
3154                         }
3155                 }
3156                 tp->nvram_lock_cnt++;
3157         }
3158         return 0;
3159 }
3160
3161 /* tp->lock is held. */
3162 static void tg3_nvram_unlock(struct tg3 *tp)
3163 {
3164         if (tg3_flag(tp, NVRAM)) {
3165                 if (tp->nvram_lock_cnt > 0)
3166                         tp->nvram_lock_cnt--;
3167                 if (tp->nvram_lock_cnt == 0)
3168                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3169         }
3170 }
3171
3172 /* tp->lock is held. */
3173 static void tg3_enable_nvram_access(struct tg3 *tp)
3174 {
3175         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3176                 u32 nvaccess = tr32(NVRAM_ACCESS);
3177
3178                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3179         }
3180 }
3181
3182 /* tp->lock is held. */
3183 static void tg3_disable_nvram_access(struct tg3 *tp)
3184 {
3185         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3186                 u32 nvaccess = tr32(NVRAM_ACCESS);
3187
3188                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3189         }
3190 }
3191
3192 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3193                                         u32 offset, u32 *val)
3194 {
3195         u32 tmp;
3196         int i;
3197
3198         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3199                 return -EINVAL;
3200
3201         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3202                                         EEPROM_ADDR_DEVID_MASK |
3203                                         EEPROM_ADDR_READ);
3204         tw32(GRC_EEPROM_ADDR,
3205              tmp |
3206              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3207              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3208               EEPROM_ADDR_ADDR_MASK) |
3209              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3210
3211         for (i = 0; i < 1000; i++) {
3212                 tmp = tr32(GRC_EEPROM_ADDR);
3213
3214                 if (tmp & EEPROM_ADDR_COMPLETE)
3215                         break;
3216                 msleep(1);
3217         }
3218         if (!(tmp & EEPROM_ADDR_COMPLETE))
3219                 return -EBUSY;
3220
3221         tmp = tr32(GRC_EEPROM_DATA);
3222
3223         /*
3224          * The data will always be opposite the native endian
3225          * format.  Perform a blind byteswap to compensate.
3226          */
3227         *val = swab32(tmp);
3228
3229         return 0;
3230 }
3231
3232 #define NVRAM_CMD_TIMEOUT 10000
3233
3234 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3235 {
3236         int i;
3237
3238         tw32(NVRAM_CMD, nvram_cmd);
3239         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3240                 udelay(10);
3241                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3242                         udelay(10);
3243                         break;
3244                 }
3245         }
3246
3247         if (i == NVRAM_CMD_TIMEOUT)
3248                 return -EBUSY;
3249
3250         return 0;
3251 }
3252
3253 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3254 {
3255         if (tg3_flag(tp, NVRAM) &&
3256             tg3_flag(tp, NVRAM_BUFFERED) &&
3257             tg3_flag(tp, FLASH) &&
3258             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3259             (tp->nvram_jedecnum == JEDEC_ATMEL))
3260
3261                 addr = ((addr / tp->nvram_pagesize) <<
3262                         ATMEL_AT45DB0X1B_PAGE_POS) +
3263                        (addr % tp->nvram_pagesize);
3264
3265         return addr;
3266 }
3267
3268 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3269 {
3270         if (tg3_flag(tp, NVRAM) &&
3271             tg3_flag(tp, NVRAM_BUFFERED) &&
3272             tg3_flag(tp, FLASH) &&
3273             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3274             (tp->nvram_jedecnum == JEDEC_ATMEL))
3275
3276                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3277                         tp->nvram_pagesize) +
3278                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3279
3280         return addr;
3281 }
3282
3283 /* NOTE: Data read in from NVRAM is byteswapped according to
3284  * the byteswapping settings for all other register accesses.
3285  * tg3 devices are BE devices, so on a BE machine, the data
3286  * returned will be exactly as it is seen in NVRAM.  On a LE
3287  * machine, the 32-bit value will be byteswapped.
3288  */
3289 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3290 {
3291         int ret;
3292
3293         if (!tg3_flag(tp, NVRAM))
3294                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3295
3296         offset = tg3_nvram_phys_addr(tp, offset);
3297
3298         if (offset > NVRAM_ADDR_MSK)
3299                 return -EINVAL;
3300
3301         ret = tg3_nvram_lock(tp);
3302         if (ret)
3303                 return ret;
3304
3305         tg3_enable_nvram_access(tp);
3306
3307         tw32(NVRAM_ADDR, offset);
3308         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3309                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3310
3311         if (ret == 0)
3312                 *val = tr32(NVRAM_RDDATA);
3313
3314         tg3_disable_nvram_access(tp);
3315
3316         tg3_nvram_unlock(tp);
3317
3318         return ret;
3319 }
3320
3321 /* Ensures NVRAM data is in bytestream format. */
3322 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3323 {
3324         u32 v;
3325         int res = tg3_nvram_read(tp, offset, &v);
3326         if (!res)
3327                 *val = cpu_to_be32(v);
3328         return res;
3329 }
3330
3331 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3332                                     u32 offset, u32 len, u8 *buf)
3333 {
3334         int i, j, rc = 0;
3335         u32 val;
3336
3337         for (i = 0; i < len; i += 4) {
3338                 u32 addr;
3339                 __be32 data;
3340
3341                 addr = offset + i;
3342
3343                 memcpy(&data, buf + i, 4);
3344
3345                 /*
3346                  * The SEEPROM interface expects the data to always be opposite
3347                  * the native endian format.  We accomplish this by reversing
3348                  * all the operations that would have been performed on the
3349                  * data from a call to tg3_nvram_read_be32().
3350                  */
3351                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3352
3353                 val = tr32(GRC_EEPROM_ADDR);
3354                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3355
3356                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3357                         EEPROM_ADDR_READ);
3358                 tw32(GRC_EEPROM_ADDR, val |
3359                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3360                         (addr & EEPROM_ADDR_ADDR_MASK) |
3361                         EEPROM_ADDR_START |
3362                         EEPROM_ADDR_WRITE);
3363
3364                 for (j = 0; j < 1000; j++) {
3365                         val = tr32(GRC_EEPROM_ADDR);
3366
3367                         if (val & EEPROM_ADDR_COMPLETE)
3368                                 break;
3369                         msleep(1);
3370                 }
3371                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3372                         rc = -EBUSY;
3373                         break;
3374                 }
3375         }
3376
3377         return rc;
3378 }
3379
3380 /* offset and length are dword aligned */
3381 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3382                 u8 *buf)
3383 {
3384         int ret = 0;
3385         u32 pagesize = tp->nvram_pagesize;
3386         u32 pagemask = pagesize - 1;
3387         u32 nvram_cmd;
3388         u8 *tmp;
3389
3390         tmp = kmalloc(pagesize, GFP_KERNEL);
3391         if (tmp == NULL)
3392                 return -ENOMEM;
3393
3394         while (len) {
3395                 int j;
3396                 u32 phy_addr, page_off, size;
3397
3398                 phy_addr = offset & ~pagemask;
3399
3400                 for (j = 0; j < pagesize; j += 4) {
3401                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3402                                                   (__be32 *) (tmp + j));
3403                         if (ret)
3404                                 break;
3405                 }
3406                 if (ret)
3407                         break;
3408
3409                 page_off = offset & pagemask;
3410                 size = pagesize;
3411                 if (len < size)
3412                         size = len;
3413
3414                 len -= size;
3415
3416                 memcpy(tmp + page_off, buf, size);
3417
3418                 offset = offset + (pagesize - page_off);
3419
3420                 tg3_enable_nvram_access(tp);
3421
3422                 /*
3423                  * Before we can erase the flash page, we need
3424                  * to issue a special "write enable" command.
3425                  */
3426                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3427
3428                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3429                         break;
3430
3431                 /* Erase the target page */
3432                 tw32(NVRAM_ADDR, phy_addr);
3433
3434                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3435                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3436
3437                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3438                         break;
3439
3440                 /* Issue another write enable to start the write. */
3441                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3442
3443                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3444                         break;
3445
3446                 for (j = 0; j < pagesize; j += 4) {
3447                         __be32 data;
3448
3449                         data = *((__be32 *) (tmp + j));
3450
3451                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3452
3453                         tw32(NVRAM_ADDR, phy_addr + j);
3454
3455                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3456                                 NVRAM_CMD_WR;
3457
3458                         if (j == 0)
3459                                 nvram_cmd |= NVRAM_CMD_FIRST;
3460                         else if (j == (pagesize - 4))
3461                                 nvram_cmd |= NVRAM_CMD_LAST;
3462
3463                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3464                         if (ret)
3465                                 break;
3466                 }
3467                 if (ret)
3468                         break;
3469         }
3470
3471         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3472         tg3_nvram_exec_cmd(tp, nvram_cmd);
3473
3474         kfree(tmp);
3475
3476         return ret;
3477 }
3478
3479 /* offset and length are dword aligned */
3480 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3481                 u8 *buf)
3482 {
3483         int i, ret = 0;
3484
3485         for (i = 0; i < len; i += 4, offset += 4) {
3486                 u32 page_off, phy_addr, nvram_cmd;
3487                 __be32 data;
3488
3489                 memcpy(&data, buf + i, 4);
3490                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3491
3492                 page_off = offset % tp->nvram_pagesize;
3493
3494                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3495
3496                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3497
3498                 if (page_off == 0 || i == 0)
3499                         nvram_cmd |= NVRAM_CMD_FIRST;
3500                 if (page_off == (tp->nvram_pagesize - 4))
3501                         nvram_cmd |= NVRAM_CMD_LAST;
3502
3503                 if (i == (len - 4))
3504                         nvram_cmd |= NVRAM_CMD_LAST;
3505
3506                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3507                     !tg3_flag(tp, FLASH) ||
3508                     !tg3_flag(tp, 57765_PLUS))
3509                         tw32(NVRAM_ADDR, phy_addr);
3510
3511                 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3512                     !tg3_flag(tp, 5755_PLUS) &&
3513                     (tp->nvram_jedecnum == JEDEC_ST) &&
3514                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3515                         u32 cmd;
3516
3517                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3518                         ret = tg3_nvram_exec_cmd(tp, cmd);
3519                         if (ret)
3520                                 break;
3521                 }
3522                 if (!tg3_flag(tp, FLASH)) {
3523                         /* We always do complete word writes to eeprom. */
3524                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3525                 }
3526
3527                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3528                 if (ret)
3529                         break;
3530         }
3531         return ret;
3532 }
3533
3534 /* offset and length are dword aligned */
3535 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3536 {
3537         int ret;
3538
3539         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3540                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3541                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3542                 udelay(40);
3543         }
3544
3545         if (!tg3_flag(tp, NVRAM)) {
3546                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3547         } else {
3548                 u32 grc_mode;
3549
3550                 ret = tg3_nvram_lock(tp);
3551                 if (ret)
3552                         return ret;
3553
3554                 tg3_enable_nvram_access(tp);
3555                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3556                         tw32(NVRAM_WRITE1, 0x406);
3557
3558                 grc_mode = tr32(GRC_MODE);
3559                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3560
3561                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3562                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3563                                 buf);
3564                 } else {
3565                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3566                                 buf);
3567                 }
3568
3569                 grc_mode = tr32(GRC_MODE);
3570                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3571
3572                 tg3_disable_nvram_access(tp);
3573                 tg3_nvram_unlock(tp);
3574         }
3575
3576         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3577                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3578                 udelay(40);
3579         }
3580
3581         return ret;
3582 }
3583
3584 #define RX_CPU_SCRATCH_BASE     0x30000
3585 #define RX_CPU_SCRATCH_SIZE     0x04000
3586 #define TX_CPU_SCRATCH_BASE     0x34000
3587 #define TX_CPU_SCRATCH_SIZE     0x04000
3588
3589 /* tp->lock is held. */
3590 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3591 {
3592         int i;
3593         const int iters = 10000;
3594
3595         for (i = 0; i < iters; i++) {
3596                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3597                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3598                 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3599                         break;
3600                 if (pci_channel_offline(tp->pdev))
3601                         return -EBUSY;
3602         }
3603
3604         return (i == iters) ? -EBUSY : 0;
3605 }
3606
3607 /* tp->lock is held. */
3608 static int tg3_rxcpu_pause(struct tg3 *tp)
3609 {
3610         int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3611
3612         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3613         tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3614         udelay(10);
3615
3616         return rc;
3617 }
3618
3619 /* tp->lock is held. */
3620 static int tg3_txcpu_pause(struct tg3 *tp)
3621 {
3622         return tg3_pause_cpu(tp, TX_CPU_BASE);
3623 }
3624
3625 /* tp->lock is held. */
3626 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3627 {
3628         tw32(cpu_base + CPU_STATE, 0xffffffff);
3629         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3630 }
3631
3632 /* tp->lock is held. */
3633 static void tg3_rxcpu_resume(struct tg3 *tp)
3634 {
3635         tg3_resume_cpu(tp, RX_CPU_BASE);
3636 }
3637
3638 /* tp->lock is held. */
3639 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3640 {
3641         int rc;
3642
3643         BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3644
3645         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3646                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3647
3648                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3649                 return 0;
3650         }
3651         if (cpu_base == RX_CPU_BASE) {
3652                 rc = tg3_rxcpu_pause(tp);
3653         } else {
3654                 /*
3655                  * There is only an Rx CPU for the 5750 derivative in the
3656                  * BCM4785.
3657                  */
3658                 if (tg3_flag(tp, IS_SSB_CORE))
3659                         return 0;
3660
3661                 rc = tg3_txcpu_pause(tp);
3662         }
3663
3664         if (rc) {
3665                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3666                            __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3667                 return -ENODEV;
3668         }
3669
3670         /* Clear firmware's nvram arbitration. */
3671         if (tg3_flag(tp, NVRAM))
3672                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3673         return 0;
3674 }
3675
3676 static int tg3_fw_data_len(struct tg3 *tp,
3677                            const struct tg3_firmware_hdr *fw_hdr)
3678 {
3679         int fw_len;
3680
3681         /* Non fragmented firmware have one firmware header followed by a
3682          * contiguous chunk of data to be written. The length field in that
3683          * header is not the length of data to be written but the complete
3684          * length of the bss. The data length is determined based on
3685          * tp->fw->size minus headers.
3686          *
3687          * Fragmented firmware have a main header followed by multiple
3688          * fragments. Each fragment is identical to non fragmented firmware
3689          * with a firmware header followed by a contiguous chunk of data. In
3690          * the main header, the length field is unused and set to 0xffffffff.
3691          * In each fragment header the length is the entire size of that
3692          * fragment i.e. fragment data + header length. Data length is
3693          * therefore length field in the header minus TG3_FW_HDR_LEN.
3694          */
3695         if (tp->fw_len == 0xffffffff)
3696                 fw_len = be32_to_cpu(fw_hdr->len);
3697         else
3698                 fw_len = tp->fw->size;
3699
3700         return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3701 }
3702
3703 /* tp->lock is held. */
3704 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3705                                  u32 cpu_scratch_base, int cpu_scratch_size,
3706                                  const struct tg3_firmware_hdr *fw_hdr)
3707 {
3708         int err, i;
3709         void (*write_op)(struct tg3 *, u32, u32);
3710         int total_len = tp->fw->size;
3711
3712         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3713                 netdev_err(tp->dev,
3714                            "%s: Trying to load TX cpu firmware which is 5705\n",
3715                            __func__);
3716                 return -EINVAL;
3717         }
3718
3719         if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3720                 write_op = tg3_write_mem;
3721         else
3722                 write_op = tg3_write_indirect_reg32;
3723
3724         if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3725                 /* It is possible that bootcode is still loading at this point.
3726                  * Get the nvram lock first before halting the cpu.
3727                  */
3728                 int lock_err = tg3_nvram_lock(tp);
3729                 err = tg3_halt_cpu(tp, cpu_base);
3730                 if (!lock_err)
3731                         tg3_nvram_unlock(tp);
3732                 if (err)
3733                         goto out;
3734
3735                 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3736                         write_op(tp, cpu_scratch_base + i, 0);
3737                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3738                 tw32(cpu_base + CPU_MODE,
3739                      tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3740         } else {
3741                 /* Subtract additional main header for fragmented firmware and
3742                  * advance to the first fragment
3743                  */
3744                 total_len -= TG3_FW_HDR_LEN;
3745                 fw_hdr++;
3746         }
3747
3748         do {
3749                 u32 *fw_data = (u32 *)(fw_hdr + 1);
3750                 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3751                         write_op(tp, cpu_scratch_base +
3752                                      (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3753                                      (i * sizeof(u32)),
3754                                  be32_to_cpu(fw_data[i]));
3755
3756                 total_len -= be32_to_cpu(fw_hdr->len);
3757
3758                 /* Advance to next fragment */
3759                 fw_hdr = (struct tg3_firmware_hdr *)
3760                          ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3761         } while (total_len > 0);
3762
3763         err = 0;
3764
3765 out:
3766         return err;
3767 }
3768
3769 /* tp->lock is held. */
3770 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3771 {
3772         int i;
3773         const int iters = 5;
3774
3775         tw32(cpu_base + CPU_STATE, 0xffffffff);
3776         tw32_f(cpu_base + CPU_PC, pc);
3777
3778         for (i = 0; i < iters; i++) {
3779                 if (tr32(cpu_base + CPU_PC) == pc)
3780                         break;
3781                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3782                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3783                 tw32_f(cpu_base + CPU_PC, pc);
3784                 udelay(1000);
3785         }
3786
3787         return (i == iters) ? -EBUSY : 0;
3788 }
3789
3790 /* tp->lock is held. */
3791 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3792 {
3793         const struct tg3_firmware_hdr *fw_hdr;
3794         int err;
3795
3796         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3797
3798         /* Firmware blob starts with version numbers, followed by
3799            start address and length. We are setting complete length.
3800            length = end_address_of_bss - start_address_of_text.
3801            Remainder is the blob to be loaded contiguously
3802            from start address. */
3803
3804         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3805                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3806                                     fw_hdr);
3807         if (err)
3808                 return err;
3809
3810         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3811                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3812                                     fw_hdr);
3813         if (err)
3814                 return err;
3815
3816         /* Now startup only the RX cpu. */
3817         err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3818                                        be32_to_cpu(fw_hdr->base_addr));
3819         if (err) {
3820                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3821                            "should be %08x\n", __func__,
3822                            tr32(RX_CPU_BASE + CPU_PC),
3823                                 be32_to_cpu(fw_hdr->base_addr));
3824                 return -ENODEV;
3825         }
3826
3827         tg3_rxcpu_resume(tp);
3828
3829         return 0;
3830 }
3831
3832 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3833 {
3834         const int iters = 1000;
3835         int i;
3836         u32 val;
3837
3838         /* Wait for boot code to complete initialization and enter service
3839          * loop. It is then safe to download service patches
3840          */
3841         for (i = 0; i < iters; i++) {
3842                 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3843                         break;
3844
3845                 udelay(10);
3846         }
3847
3848         if (i == iters) {
3849                 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3850                 return -EBUSY;
3851         }
3852
3853         val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3854         if (val & 0xff) {
3855                 netdev_warn(tp->dev,
3856                             "Other patches exist. Not downloading EEE patch\n");
3857                 return -EEXIST;
3858         }
3859
3860         return 0;
3861 }
3862
3863 /* tp->lock is held. */
3864 static void tg3_load_57766_firmware(struct tg3 *tp)
3865 {
3866         struct tg3_firmware_hdr *fw_hdr;
3867
3868         if (!tg3_flag(tp, NO_NVRAM))
3869                 return;
3870
3871         if (tg3_validate_rxcpu_state(tp))
3872                 return;
3873
3874         if (!tp->fw)
3875                 return;
3876
3877         /* This firmware blob has a different format than older firmware
3878          * releases as given below. The main difference is we have fragmented
3879          * data to be written to non-contiguous locations.
3880          *
3881          * In the beginning we have a firmware header identical to other
3882          * firmware which consists of version, base addr and length. The length
3883          * here is unused and set to 0xffffffff.
3884          *
3885          * This is followed by a series of firmware fragments which are
3886          * individually identical to previous firmware. i.e. they have the
3887          * firmware header and followed by data for that fragment. The version
3888          * field of the individual fragment header is unused.
3889          */
3890
3891         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3892         if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3893                 return;
3894
3895         if (tg3_rxcpu_pause(tp))
3896                 return;
3897
3898         /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3899         tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3900
3901         tg3_rxcpu_resume(tp);
3902 }
3903
3904 /* tp->lock is held. */
3905 static int tg3_load_tso_firmware(struct tg3 *tp)
3906 {
3907         const struct tg3_firmware_hdr *fw_hdr;
3908         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3909         int err;
3910
3911         if (!tg3_flag(tp, FW_TSO))
3912                 return 0;
3913
3914         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3915
3916         /* Firmware blob starts with version numbers, followed by
3917            start address and length. We are setting complete length.
3918            length = end_address_of_bss - start_address_of_text.
3919            Remainder is the blob to be loaded contiguously
3920            from start address. */
3921
3922         cpu_scratch_size = tp->fw_len;
3923
3924         if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3925                 cpu_base = RX_CPU_BASE;
3926                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3927         } else {
3928                 cpu_base = TX_CPU_BASE;
3929                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3930                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3931         }
3932
3933         err = tg3_load_firmware_cpu(tp, cpu_base,
3934                                     cpu_scratch_base, cpu_scratch_size,
3935                                     fw_hdr);
3936         if (err)
3937                 return err;
3938
3939         /* Now startup the cpu. */
3940         err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3941                                        be32_to_cpu(fw_hdr->base_addr));
3942         if (err) {
3943                 netdev_err(tp->dev,
3944                            "%s fails to set CPU PC, is %08x should be %08x\n",
3945                            __func__, tr32(cpu_base + CPU_PC),
3946                            be32_to_cpu(fw_hdr->base_addr));
3947                 return -ENODEV;
3948         }
3949
3950         tg3_resume_cpu(tp, cpu_base);
3951         return 0;
3952 }
3953
3954 /* tp->lock is held. */
3955 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3956 {
3957         u32 addr_high, addr_low;
3958
3959         addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3960         addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3961                     (mac_addr[4] <<  8) | mac_addr[5]);
3962
3963         if (index < 4) {
3964                 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3965                 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3966         } else {
3967                 index -= 4;
3968                 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3969                 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3970         }
3971 }
3972
3973 /* tp->lock is held. */
3974 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3975 {
3976         u32 addr_high;
3977         int i;
3978
3979         for (i = 0; i < 4; i++) {
3980                 if (i == 1 && skip_mac_1)
3981                         continue;
3982                 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3983         }
3984
3985         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3986             tg3_asic_rev(tp) == ASIC_REV_5704) {
3987                 for (i = 4; i < 16; i++)
3988                         __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3989         }
3990
3991         addr_high = (tp->dev->dev_addr[0] +
3992                      tp->dev->dev_addr[1] +
3993                      tp->dev->dev_addr[2] +
3994                      tp->dev->dev_addr[3] +
3995                      tp->dev->dev_addr[4] +
3996                      tp->dev->dev_addr[5]) &
3997                 TX_BACKOFF_SEED_MASK;
3998         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3999 }
4000
4001 static void tg3_enable_register_access(struct tg3 *tp)
4002 {
4003         /*
4004          * Make sure register accesses (indirect or otherwise) will function
4005          * correctly.
4006          */
4007         pci_write_config_dword(tp->pdev,
4008                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4009 }
4010
4011 static int tg3_power_up(struct tg3 *tp)
4012 {
4013         int err;
4014
4015         tg3_enable_register_access(tp);
4016
4017         err = pci_set_power_state(tp->pdev, PCI_D0);
4018         if (!err) {
4019                 /* Switch out of Vaux if it is a NIC */
4020                 tg3_pwrsrc_switch_to_vmain(tp);
4021         } else {
4022                 netdev_err(tp->dev, "Transition to D0 failed\n");
4023         }
4024
4025         return err;
4026 }
4027
4028 static int tg3_setup_phy(struct tg3 *, bool);
4029
4030 static int tg3_power_down_prepare(struct tg3 *tp)
4031 {
4032         u32 misc_host_ctrl;
4033         bool device_should_wake, do_low_power;
4034
4035         tg3_enable_register_access(tp);
4036
4037         /* Restore the CLKREQ setting. */
4038         if (tg3_flag(tp, CLKREQ_BUG))
4039                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4040                                          PCI_EXP_LNKCTL_CLKREQ_EN);
4041
4042         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4043         tw32(TG3PCI_MISC_HOST_CTRL,
4044              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4045
4046         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4047                              tg3_flag(tp, WOL_ENABLE);
4048
4049         if (tg3_flag(tp, USE_PHYLIB)) {
4050                 do_low_power = false;
4051                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4052                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4053                         struct phy_device *phydev;
4054                         u32 phyid, advertising;
4055
4056                         phydev = tp->mdio_bus->phy_map[tp->phy_addr];
4057
4058                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4059
4060                         tp->link_config.speed = phydev->speed;
4061                         tp->link_config.duplex = phydev->duplex;
4062                         tp->link_config.autoneg = phydev->autoneg;
4063                         tp->link_config.advertising = phydev->advertising;
4064
4065                         advertising = ADVERTISED_TP |
4066                                       ADVERTISED_Pause |
4067                                       ADVERTISED_Autoneg |
4068                                       ADVERTISED_10baseT_Half;
4069
4070                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4071                                 if (tg3_flag(tp, WOL_SPEED_100MB))
4072                                         advertising |=
4073                                                 ADVERTISED_100baseT_Half |
4074                                                 ADVERTISED_100baseT_Full |
4075                                                 ADVERTISED_10baseT_Full;
4076                                 else
4077                                         advertising |= ADVERTISED_10baseT_Full;
4078                         }
4079
4080                         phydev->advertising = advertising;
4081
4082                         phy_start_aneg(phydev);
4083
4084                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4085                         if (phyid != PHY_ID_BCMAC131) {
4086                                 phyid &= PHY_BCM_OUI_MASK;
4087                                 if (phyid == PHY_BCM_OUI_1 ||
4088                                     phyid == PHY_BCM_OUI_2 ||
4089                                     phyid == PHY_BCM_OUI_3)
4090                                         do_low_power = true;
4091                         }
4092                 }
4093         } else {
4094                 do_low_power = true;
4095
4096                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4097                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4098
4099                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4100                         tg3_setup_phy(tp, false);
4101         }
4102
4103         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4104                 u32 val;
4105
4106                 val = tr32(GRC_VCPU_EXT_CTRL);
4107                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4108         } else if (!tg3_flag(tp, ENABLE_ASF)) {
4109                 int i;
4110                 u32 val;
4111
4112                 for (i = 0; i < 200; i++) {
4113                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4114                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4115                                 break;
4116                         msleep(1);
4117                 }
4118         }
4119         if (tg3_flag(tp, WOL_CAP))
4120                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4121                                                      WOL_DRV_STATE_SHUTDOWN |
4122                                                      WOL_DRV_WOL |
4123                                                      WOL_SET_MAGIC_PKT);
4124
4125         if (device_should_wake) {
4126                 u32 mac_mode;
4127
4128                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4129                         if (do_low_power &&
4130                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4131                                 tg3_phy_auxctl_write(tp,
4132                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4133                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
4134                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4135                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4136                                 udelay(40);
4137                         }
4138
4139                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4140                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
4141                         else if (tp->phy_flags &
4142                                  TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4143                                 if (tp->link_config.active_speed == SPEED_1000)
4144                                         mac_mode = MAC_MODE_PORT_MODE_GMII;
4145                                 else
4146                                         mac_mode = MAC_MODE_PORT_MODE_MII;
4147                         } else
4148                                 mac_mode = MAC_MODE_PORT_MODE_MII;
4149
4150                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4151                         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4152                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4153                                              SPEED_100 : SPEED_10;
4154                                 if (tg3_5700_link_polarity(tp, speed))
4155                                         mac_mode |= MAC_MODE_LINK_POLARITY;
4156                                 else
4157                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
4158                         }
4159                 } else {
4160                         mac_mode = MAC_MODE_PORT_MODE_TBI;
4161                 }
4162
4163                 if (!tg3_flag(tp, 5750_PLUS))
4164                         tw32(MAC_LED_CTRL, tp->led_ctrl);
4165
4166                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4167                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4168                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4169                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4170
4171                 if (tg3_flag(tp, ENABLE_APE))
4172                         mac_mode |= MAC_MODE_APE_TX_EN |
4173                                     MAC_MODE_APE_RX_EN |
4174                                     MAC_MODE_TDE_ENABLE;
4175
4176                 tw32_f(MAC_MODE, mac_mode);
4177                 udelay(100);
4178
4179                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4180                 udelay(10);
4181         }
4182
4183         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4184             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4185              tg3_asic_rev(tp) == ASIC_REV_5701)) {
4186                 u32 base_val;
4187
4188                 base_val = tp->pci_clock_ctrl;
4189                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4190                              CLOCK_CTRL_TXCLK_DISABLE);
4191
4192                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4193                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
4194         } else if (tg3_flag(tp, 5780_CLASS) ||
4195                    tg3_flag(tp, CPMU_PRESENT) ||
4196                    tg3_asic_rev(tp) == ASIC_REV_5906) {
4197                 /* do nothing */
4198         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4199                 u32 newbits1, newbits2;
4200
4201                 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4202                     tg3_asic_rev(tp) == ASIC_REV_5701) {
4203                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4204                                     CLOCK_CTRL_TXCLK_DISABLE |
4205                                     CLOCK_CTRL_ALTCLK);
4206                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4207                 } else if (tg3_flag(tp, 5705_PLUS)) {
4208                         newbits1 = CLOCK_CTRL_625_CORE;
4209                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4210                 } else {
4211                         newbits1 = CLOCK_CTRL_ALTCLK;
4212                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4213                 }
4214
4215                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4216                             40);
4217
4218                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4219                             40);
4220
4221                 if (!tg3_flag(tp, 5705_PLUS)) {
4222                         u32 newbits3;
4223
4224                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4225                             tg3_asic_rev(tp) == ASIC_REV_5701) {
4226                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4227                                             CLOCK_CTRL_TXCLK_DISABLE |
4228                                             CLOCK_CTRL_44MHZ_CORE);
4229                         } else {
4230                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4231                         }
4232
4233                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
4234                                     tp->pci_clock_ctrl | newbits3, 40);
4235                 }
4236         }
4237
4238         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4239                 tg3_power_down_phy(tp, do_low_power);
4240
4241         tg3_frob_aux_power(tp, true);
4242
4243         /* Workaround for unstable PLL clock */
4244         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4245             ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4246              (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4247                 u32 val = tr32(0x7d00);
4248
4249                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4250                 tw32(0x7d00, val);
4251                 if (!tg3_flag(tp, ENABLE_ASF)) {
4252                         int err;
4253
4254                         err = tg3_nvram_lock(tp);
4255                         tg3_halt_cpu(tp, RX_CPU_BASE);
4256                         if (!err)
4257                                 tg3_nvram_unlock(tp);
4258                 }
4259         }
4260
4261         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4262
4263         tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4264
4265         return 0;
4266 }
4267
4268 static void tg3_power_down(struct tg3 *tp)
4269 {
4270         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4271         pci_set_power_state(tp->pdev, PCI_D3hot);
4272 }
4273
4274 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4275 {
4276         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4277         case MII_TG3_AUX_STAT_10HALF:
4278                 *speed = SPEED_10;
4279                 *duplex = DUPLEX_HALF;
4280                 break;
4281
4282         case MII_TG3_AUX_STAT_10FULL:
4283                 *speed = SPEED_10;
4284                 *duplex = DUPLEX_FULL;
4285                 break;
4286
4287         case MII_TG3_AUX_STAT_100HALF:
4288                 *speed = SPEED_100;
4289                 *duplex = DUPLEX_HALF;
4290                 break;
4291
4292         case MII_TG3_AUX_STAT_100FULL:
4293                 *speed = SPEED_100;
4294                 *duplex = DUPLEX_FULL;
4295                 break;
4296
4297         case MII_TG3_AUX_STAT_1000HALF:
4298                 *speed = SPEED_1000;
4299                 *duplex = DUPLEX_HALF;
4300                 break;
4301
4302         case MII_TG3_AUX_STAT_1000FULL:
4303                 *speed = SPEED_1000;
4304                 *duplex = DUPLEX_FULL;
4305                 break;
4306
4307         default:
4308                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4309                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4310                                  SPEED_10;
4311                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4312                                   DUPLEX_HALF;
4313                         break;
4314                 }
4315                 *speed = SPEED_UNKNOWN;
4316                 *duplex = DUPLEX_UNKNOWN;
4317                 break;
4318         }
4319 }
4320
4321 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4322 {
4323         int err = 0;
4324         u32 val, new_adv;
4325
4326         new_adv = ADVERTISE_CSMA;
4327         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4328         new_adv |= mii_advertise_flowctrl(flowctrl);
4329
4330         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4331         if (err)
4332                 goto done;
4333
4334         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4335                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4336
4337                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4338                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4339                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4340
4341                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4342                 if (err)
4343                         goto done;
4344         }
4345
4346         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4347                 goto done;
4348
4349         tw32(TG3_CPMU_EEE_MODE,
4350              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4351
4352         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4353         if (!err) {
4354                 u32 err2;
4355
4356                 val = 0;
4357                 /* Advertise 100-BaseTX EEE ability */
4358                 if (advertise & ADVERTISED_100baseT_Full)
4359                         val |= MDIO_AN_EEE_ADV_100TX;
4360                 /* Advertise 1000-BaseT EEE ability */
4361                 if (advertise & ADVERTISED_1000baseT_Full)
4362                         val |= MDIO_AN_EEE_ADV_1000T;
4363
4364                 if (!tp->eee.eee_enabled) {
4365                         val = 0;
4366                         tp->eee.advertised = 0;
4367                 } else {
4368                         tp->eee.advertised = advertise &
4369                                              (ADVERTISED_100baseT_Full |
4370                                               ADVERTISED_1000baseT_Full);
4371                 }
4372
4373                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4374                 if (err)
4375                         val = 0;
4376
4377                 switch (tg3_asic_rev(tp)) {
4378                 case ASIC_REV_5717:
4379                 case ASIC_REV_57765:
4380                 case ASIC_REV_57766:
4381                 case ASIC_REV_5719:
4382                         /* If we advertised any eee advertisements above... */
4383                         if (val)
4384                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4385                                       MII_TG3_DSP_TAP26_RMRXSTO |
4386                                       MII_TG3_DSP_TAP26_OPCSINPT;
4387                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4388                         /* Fall through */
4389                 case ASIC_REV_5720:
4390                 case ASIC_REV_5762:
4391                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4392                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4393                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4394                 }
4395
4396                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4397                 if (!err)
4398                         err = err2;
4399         }
4400
4401 done:
4402         return err;
4403 }
4404
4405 static void tg3_phy_copper_begin(struct tg3 *tp)
4406 {
4407         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4408             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4409                 u32 adv, fc;
4410
4411                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4412                     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4413                         adv = ADVERTISED_10baseT_Half |
4414                               ADVERTISED_10baseT_Full;
4415                         if (tg3_flag(tp, WOL_SPEED_100MB))
4416                                 adv |= ADVERTISED_100baseT_Half |
4417                                        ADVERTISED_100baseT_Full;
4418                         if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4419                                 if (!(tp->phy_flags &
4420                                       TG3_PHYFLG_DISABLE_1G_HD_ADV))
4421                                         adv |= ADVERTISED_1000baseT_Half;
4422                                 adv |= ADVERTISED_1000baseT_Full;
4423                         }
4424
4425                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4426                 } else {
4427                         adv = tp->link_config.advertising;
4428                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4429                                 adv &= ~(ADVERTISED_1000baseT_Half |
4430                                          ADVERTISED_1000baseT_Full);
4431
4432                         fc = tp->link_config.flowctrl;
4433                 }
4434
4435                 tg3_phy_autoneg_cfg(tp, adv, fc);
4436
4437                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4438                     (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4439                         /* Normally during power down we want to autonegotiate
4440                          * the lowest possible speed for WOL. However, to avoid
4441                          * link flap, we leave it untouched.
4442                          */
4443                         return;
4444                 }
4445
4446                 tg3_writephy(tp, MII_BMCR,
4447                              BMCR_ANENABLE | BMCR_ANRESTART);
4448         } else {
4449                 int i;
4450                 u32 bmcr, orig_bmcr;
4451
4452                 tp->link_config.active_speed = tp->link_config.speed;
4453                 tp->link_config.active_duplex = tp->link_config.duplex;
4454
4455                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4456                         /* With autoneg disabled, 5715 only links up when the
4457                          * advertisement register has the configured speed
4458                          * enabled.
4459                          */
4460                         tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4461                 }
4462
4463                 bmcr = 0;
4464                 switch (tp->link_config.speed) {
4465                 default:
4466                 case SPEED_10:
4467                         break;
4468
4469                 case SPEED_100:
4470                         bmcr |= BMCR_SPEED100;
4471                         break;
4472
4473                 case SPEED_1000:
4474                         bmcr |= BMCR_SPEED1000;
4475                         break;
4476                 }
4477
4478                 if (tp->link_config.duplex == DUPLEX_FULL)
4479                         bmcr |= BMCR_FULLDPLX;
4480
4481                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4482                     (bmcr != orig_bmcr)) {
4483                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4484                         for (i = 0; i < 1500; i++) {
4485                                 u32 tmp;
4486
4487                                 udelay(10);
4488                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4489                                     tg3_readphy(tp, MII_BMSR, &tmp))
4490                                         continue;
4491                                 if (!(tmp & BMSR_LSTATUS)) {
4492                                         udelay(40);
4493                                         break;
4494                                 }
4495                         }
4496                         tg3_writephy(tp, MII_BMCR, bmcr);
4497                         udelay(40);
4498                 }
4499         }
4500 }
4501
4502 static int tg3_phy_pull_config(struct tg3 *tp)
4503 {
4504         int err;
4505         u32 val;
4506
4507         err = tg3_readphy(tp, MII_BMCR, &val);
4508         if (err)
4509                 goto done;
4510
4511         if (!(val & BMCR_ANENABLE)) {
4512                 tp->link_config.autoneg = AUTONEG_DISABLE;
4513                 tp->link_config.advertising = 0;
4514                 tg3_flag_clear(tp, PAUSE_AUTONEG);
4515
4516                 err = -EIO;
4517
4518                 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4519                 case 0:
4520                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4521                                 goto done;
4522
4523                         tp->link_config.speed = SPEED_10;
4524                         break;
4525                 case BMCR_SPEED100:
4526                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4527                                 goto done;
4528
4529                         tp->link_config.speed = SPEED_100;
4530                         break;
4531                 case BMCR_SPEED1000:
4532                         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4533                                 tp->link_config.speed = SPEED_1000;
4534                                 break;
4535                         }
4536                         /* Fall through */
4537                 default:
4538                         goto done;
4539                 }
4540
4541                 if (val & BMCR_FULLDPLX)
4542                         tp->link_config.duplex = DUPLEX_FULL;
4543                 else
4544                         tp->link_config.duplex = DUPLEX_HALF;
4545
4546                 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4547
4548                 err = 0;
4549                 goto done;
4550         }
4551
4552         tp->link_config.autoneg = AUTONEG_ENABLE;
4553         tp->link_config.advertising = ADVERTISED_Autoneg;
4554         tg3_flag_set(tp, PAUSE_AUTONEG);
4555
4556         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4557                 u32 adv;
4558
4559                 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4560                 if (err)
4561                         goto done;
4562
4563                 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4564                 tp->link_config.advertising |= adv | ADVERTISED_TP;
4565
4566                 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4567         } else {
4568                 tp->link_config.advertising |= ADVERTISED_FIBRE;
4569         }
4570
4571         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4572                 u32 adv;
4573
4574                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4575                         err = tg3_readphy(tp, MII_CTRL1000, &val);
4576                         if (err)
4577                                 goto done;
4578
4579                         adv = mii_ctrl1000_to_ethtool_adv_t(val);
4580                 } else {
4581                         err = tg3_readphy(tp, MII_ADVERTISE, &val);
4582                         if (err)
4583                                 goto done;
4584
4585                         adv = tg3_decode_flowctrl_1000X(val);
4586                         tp->link_config.flowctrl = adv;
4587
4588                         val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4589                         adv = mii_adv_to_ethtool_adv_x(val);
4590                 }
4591
4592                 tp->link_config.advertising |= adv;
4593         }
4594
4595 done:
4596         return err;
4597 }
4598
4599 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4600 {
4601         int err;
4602
4603         /* Turn off tap power management. */
4604         /* Set Extended packet length bit */
4605         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4606
4607         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4608         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4609         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4610         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4611         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4612
4613         udelay(40);
4614
4615         return err;
4616 }
4617
4618 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4619 {
4620         struct ethtool_eee eee;
4621
4622         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4623                 return true;
4624
4625         tg3_eee_pull_config(tp, &eee);
4626
4627         if (tp->eee.eee_enabled) {
4628                 if (tp->eee.advertised != eee.advertised ||
4629                     tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4630                     tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4631                         return false;
4632         } else {
4633                 /* EEE is disabled but we're advertising */
4634                 if (eee.advertised)
4635                         return false;
4636         }
4637
4638         return true;
4639 }
4640
4641 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4642 {
4643         u32 advmsk, tgtadv, advertising;
4644
4645         advertising = tp->link_config.advertising;
4646         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4647
4648         advmsk = ADVERTISE_ALL;
4649         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4650                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4651                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4652         }
4653
4654         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4655                 return false;
4656
4657         if ((*lcladv & advmsk) != tgtadv)
4658                 return false;
4659
4660         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4661                 u32 tg3_ctrl;
4662
4663                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4664
4665                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4666                         return false;
4667
4668                 if (tgtadv &&
4669                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4670                      tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4671                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4672                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4673                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4674                 } else {
4675                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4676                 }
4677
4678                 if (tg3_ctrl != tgtadv)
4679                         return false;
4680         }
4681
4682         return true;
4683 }
4684
4685 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4686 {
4687         u32 lpeth = 0;
4688
4689         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4690                 u32 val;
4691
4692                 if (tg3_readphy(tp, MII_STAT1000, &val))
4693                         return false;
4694
4695                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4696         }
4697
4698         if (tg3_readphy(tp, MII_LPA, rmtadv))
4699                 return false;
4700
4701         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4702         tp->link_config.rmt_adv = lpeth;
4703
4704         return true;
4705 }
4706
4707 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4708 {
4709         if (curr_link_up != tp->link_up) {
4710                 if (curr_link_up) {
4711                         netif_carrier_on(tp->dev);
4712                 } else {
4713                         netif_carrier_off(tp->dev);
4714                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4715                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4716                 }
4717
4718                 tg3_link_report(tp);
4719                 return true;
4720         }
4721
4722         return false;
4723 }
4724
4725 static void tg3_clear_mac_status(struct tg3 *tp)
4726 {
4727         tw32(MAC_EVENT, 0);
4728
4729         tw32_f(MAC_STATUS,
4730                MAC_STATUS_SYNC_CHANGED |
4731                MAC_STATUS_CFG_CHANGED |
4732                MAC_STATUS_MI_COMPLETION |
4733                MAC_STATUS_LNKSTATE_CHANGED);
4734         udelay(40);
4735 }
4736
4737 static void tg3_setup_eee(struct tg3 *tp)
4738 {
4739         u32 val;
4740
4741         val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4742               TG3_CPMU_EEE_LNKIDL_UART_IDL;
4743         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4744                 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4745
4746         tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4747
4748         tw32_f(TG3_CPMU_EEE_CTRL,
4749                TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4750
4751         val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4752               (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4753               TG3_CPMU_EEEMD_LPI_IN_RX |
4754               TG3_CPMU_EEEMD_EEE_ENABLE;
4755
4756         if (tg3_asic_rev(tp) != ASIC_REV_5717)
4757                 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4758
4759         if (tg3_flag(tp, ENABLE_APE))
4760                 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4761
4762         tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4763
4764         tw32_f(TG3_CPMU_EEE_DBTMR1,
4765                TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4766                (tp->eee.tx_lpi_timer & 0xffff));
4767
4768         tw32_f(TG3_CPMU_EEE_DBTMR2,
4769                TG3_CPMU_DBTMR2_APE_TX_2047US |
4770                TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4771 }
4772
4773 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4774 {
4775         bool current_link_up;
4776         u32 bmsr, val;
4777         u32 lcl_adv, rmt_adv;
4778         u16 current_speed;
4779         u8 current_duplex;
4780         int i, err;
4781
4782         tg3_clear_mac_status(tp);
4783
4784         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4785                 tw32_f(MAC_MI_MODE,
4786                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4787                 udelay(80);
4788         }
4789
4790         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4791
4792         /* Some third-party PHYs need to be reset on link going
4793          * down.
4794          */
4795         if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4796              tg3_asic_rev(tp) == ASIC_REV_5704 ||
4797              tg3_asic_rev(tp) == ASIC_REV_5705) &&
4798             tp->link_up) {
4799                 tg3_readphy(tp, MII_BMSR, &bmsr);
4800                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4801                     !(bmsr & BMSR_LSTATUS))
4802                         force_reset = true;
4803         }
4804         if (force_reset)
4805                 tg3_phy_reset(tp);
4806
4807         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4808                 tg3_readphy(tp, MII_BMSR, &bmsr);
4809                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4810                     !tg3_flag(tp, INIT_COMPLETE))
4811                         bmsr = 0;
4812
4813                 if (!(bmsr & BMSR_LSTATUS)) {
4814                         err = tg3_init_5401phy_dsp(tp);
4815                         if (err)
4816                                 return err;
4817
4818                         tg3_readphy(tp, MII_BMSR, &bmsr);
4819                         for (i = 0; i < 1000; i++) {
4820                                 udelay(10);
4821                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4822                                     (bmsr & BMSR_LSTATUS)) {
4823                                         udelay(40);
4824                                         break;
4825                                 }
4826                         }
4827
4828                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4829                             TG3_PHY_REV_BCM5401_B0 &&
4830                             !(bmsr & BMSR_LSTATUS) &&
4831                             tp->link_config.active_speed == SPEED_1000) {
4832                                 err = tg3_phy_reset(tp);
4833                                 if (!err)
4834                                         err = tg3_init_5401phy_dsp(tp);
4835                                 if (err)
4836                                         return err;
4837                         }
4838                 }
4839         } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4840                    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4841                 /* 5701 {A0,B0} CRC bug workaround */
4842                 tg3_writephy(tp, 0x15, 0x0a75);
4843                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4844                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4845                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4846         }
4847
4848         /* Clear pending interrupts... */
4849         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4850         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4851
4852         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4853                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4854         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4855                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4856
4857         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4858             tg3_asic_rev(tp) == ASIC_REV_5701) {
4859                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4860                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4861                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4862                 else
4863                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4864         }
4865
4866         current_link_up = false;
4867         current_speed = SPEED_UNKNOWN;
4868         current_duplex = DUPLEX_UNKNOWN;
4869         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4870         tp->link_config.rmt_adv = 0;
4871
4872         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4873                 err = tg3_phy_auxctl_read(tp,
4874                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4875                                           &val);
4876                 if (!err && !(val & (1 << 10))) {
4877                         tg3_phy_auxctl_write(tp,
4878                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4879                                              val | (1 << 10));
4880                         goto relink;
4881                 }
4882         }
4883
4884         bmsr = 0;
4885         for (i = 0; i < 100; i++) {
4886                 tg3_readphy(tp, MII_BMSR, &bmsr);
4887                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4888                     (bmsr & BMSR_LSTATUS))
4889                         break;
4890                 udelay(40);
4891         }
4892
4893         if (bmsr & BMSR_LSTATUS) {
4894                 u32 aux_stat, bmcr;
4895
4896                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4897                 for (i = 0; i < 2000; i++) {
4898                         udelay(10);
4899                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4900                             aux_stat)
4901                                 break;
4902                 }
4903
4904                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4905                                              &current_speed,
4906                                              &current_duplex);
4907
4908                 bmcr = 0;
4909                 for (i = 0; i < 200; i++) {
4910                         tg3_readphy(tp, MII_BMCR, &bmcr);
4911                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4912                                 continue;
4913                         if (bmcr && bmcr != 0x7fff)
4914                                 break;
4915                         udelay(10);
4916                 }
4917
4918                 lcl_adv = 0;
4919                 rmt_adv = 0;
4920
4921                 tp->link_config.active_speed = current_speed;
4922                 tp->link_config.active_duplex = current_duplex;
4923
4924                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4925                         bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4926
4927                         if ((bmcr & BMCR_ANENABLE) &&
4928                             eee_config_ok &&
4929                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4930                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4931                                 current_link_up = true;
4932
4933                         /* EEE settings changes take effect only after a phy
4934                          * reset.  If we have skipped a reset due to Link Flap
4935                          * Avoidance being enabled, do it now.
4936                          */
4937                         if (!eee_config_ok &&
4938                             (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4939                             !force_reset) {
4940                                 tg3_setup_eee(tp);
4941                                 tg3_phy_reset(tp);
4942                         }
4943                 } else {
4944                         if (!(bmcr & BMCR_ANENABLE) &&
4945                             tp->link_config.speed == current_speed &&
4946                             tp->link_config.duplex == current_duplex) {
4947                                 current_link_up = true;
4948                         }
4949                 }
4950
4951                 if (current_link_up &&
4952                     tp->link_config.active_duplex == DUPLEX_FULL) {
4953                         u32 reg, bit;
4954
4955                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4956                                 reg = MII_TG3_FET_GEN_STAT;
4957                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4958                         } else {
4959                                 reg = MII_TG3_EXT_STAT;
4960                                 bit = MII_TG3_EXT_STAT_MDIX;
4961                         }
4962
4963                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4964                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4965
4966                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4967                 }
4968         }
4969
4970 relink:
4971         if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4972                 tg3_phy_copper_begin(tp);
4973
4974                 if (tg3_flag(tp, ROBOSWITCH)) {
4975                         current_link_up = true;
4976                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4977                         current_speed = SPEED_1000;
4978                         current_duplex = DUPLEX_FULL;
4979                         tp->link_config.active_speed = current_speed;
4980                         tp->link_config.active_duplex = current_duplex;
4981                 }
4982
4983                 tg3_readphy(tp, MII_BMSR, &bmsr);
4984                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4985                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4986                         current_link_up = true;
4987         }
4988
4989         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4990         if (current_link_up) {
4991                 if (tp->link_config.active_speed == SPEED_100 ||
4992                     tp->link_config.active_speed == SPEED_10)
4993                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4994                 else
4995                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4996         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4997                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4998         else
4999                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5000
5001         /* In order for the 5750 core in BCM4785 chip to work properly
5002          * in RGMII mode, the Led Control Register must be set up.
5003          */
5004         if (tg3_flag(tp, RGMII_MODE)) {
5005                 u32 led_ctrl = tr32(MAC_LED_CTRL);
5006                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5007
5008                 if (tp->link_config.active_speed == SPEED_10)
5009                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5010                 else if (tp->link_config.active_speed == SPEED_100)
5011                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5012                                      LED_CTRL_100MBPS_ON);
5013                 else if (tp->link_config.active_speed == SPEED_1000)
5014                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5015                                      LED_CTRL_1000MBPS_ON);
5016
5017                 tw32(MAC_LED_CTRL, led_ctrl);
5018                 udelay(40);
5019         }
5020
5021         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5022         if (tp->link_config.active_duplex == DUPLEX_HALF)
5023                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5024
5025         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5026                 if (current_link_up &&
5027                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5028                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5029                 else
5030                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5031         }
5032
5033         /* ??? Without this setting Netgear GA302T PHY does not
5034          * ??? send/receive packets...
5035          */
5036         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5037             tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5038                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5039                 tw32_f(MAC_MI_MODE, tp->mi_mode);
5040                 udelay(80);
5041         }
5042
5043         tw32_f(MAC_MODE, tp->mac_mode);
5044         udelay(40);
5045
5046         tg3_phy_eee_adjust(tp, current_link_up);
5047
5048         if (tg3_flag(tp, USE_LINKCHG_REG)) {
5049                 /* Polled via timer. */
5050                 tw32_f(MAC_EVENT, 0);
5051         } else {
5052                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5053         }
5054         udelay(40);
5055
5056         if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5057             current_link_up &&
5058             tp->link_config.active_speed == SPEED_1000 &&
5059             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5060                 udelay(120);
5061                 tw32_f(MAC_STATUS,
5062                      (MAC_STATUS_SYNC_CHANGED |
5063                       MAC_STATUS_CFG_CHANGED));
5064                 udelay(40);
5065                 tg3_write_mem(tp,
5066                               NIC_SRAM_FIRMWARE_MBOX,
5067                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5068         }
5069
5070         /* Prevent send BD corruption. */
5071         if (tg3_flag(tp, CLKREQ_BUG)) {
5072                 if (tp->link_config.active_speed == SPEED_100 ||
5073                     tp->link_config.active_speed == SPEED_10)
5074                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5075                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
5076                 else
5077                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5078                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
5079         }
5080
5081         tg3_test_and_report_link_chg(tp, current_link_up);
5082
5083         return 0;
5084 }
5085
5086 struct tg3_fiber_aneginfo {
5087         int state;
5088 #define ANEG_STATE_UNKNOWN              0
5089 #define ANEG_STATE_AN_ENABLE            1
5090 #define ANEG_STATE_RESTART_INIT         2
5091 #define ANEG_STATE_RESTART              3
5092 #define ANEG_STATE_DISABLE_LINK_OK      4
5093 #define ANEG_STATE_ABILITY_DETECT_INIT  5
5094 #define ANEG_STATE_ABILITY_DETECT       6
5095 #define ANEG_STATE_ACK_DETECT_INIT      7
5096 #define ANEG_STATE_ACK_DETECT           8
5097 #define ANEG_STATE_COMPLETE_ACK_INIT    9
5098 #define ANEG_STATE_COMPLETE_ACK         10
5099 #define ANEG_STATE_IDLE_DETECT_INIT     11
5100 #define ANEG_STATE_IDLE_DETECT          12
5101 #define ANEG_STATE_LINK_OK              13
5102 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
5103 #define ANEG_STATE_NEXT_PAGE_WAIT       15
5104
5105         u32 flags;
5106 #define MR_AN_ENABLE            0x00000001
5107 #define MR_RESTART_AN           0x00000002
5108 #define MR_AN_COMPLETE          0x00000004
5109 #define MR_PAGE_RX              0x00000008
5110 #define MR_NP_LOADED            0x00000010
5111 #define MR_TOGGLE_TX            0x00000020
5112 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
5113 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
5114 #define MR_LP_ADV_SYM_PAUSE     0x00000100
5115 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
5116 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5117 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5118 #define MR_LP_ADV_NEXT_PAGE     0x00001000
5119 #define MR_TOGGLE_RX            0x00002000
5120 #define MR_NP_RX                0x00004000
5121
5122 #define MR_LINK_OK              0x80000000
5123
5124         unsigned long link_time, cur_time;
5125
5126         u32 ability_match_cfg;
5127         int ability_match_count;
5128
5129         char ability_match, idle_match, ack_match;
5130
5131         u32 txconfig, rxconfig;
5132 #define ANEG_CFG_NP             0x00000080
5133 #define ANEG_CFG_ACK            0x00000040
5134 #define ANEG_CFG_RF2            0x00000020
5135 #define ANEG_CFG_RF1            0x00000010
5136 #define ANEG_CFG_PS2            0x00000001
5137 #define ANEG_CFG_PS1            0x00008000
5138 #define ANEG_CFG_HD             0x00004000
5139 #define ANEG_CFG_FD             0x00002000
5140 #define ANEG_CFG_INVAL          0x00001f06
5141
5142 };
5143 #define ANEG_OK         0
5144 #define ANEG_DONE       1
5145 #define ANEG_TIMER_ENAB 2
5146 #define ANEG_FAILED     -1
5147
5148 #define ANEG_STATE_SETTLE_TIME  10000
5149
5150 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5151                                    struct tg3_fiber_aneginfo *ap)
5152 {
5153         u16 flowctrl;
5154         unsigned long delta;
5155         u32 rx_cfg_reg;
5156         int ret;
5157
5158         if (ap->state == ANEG_STATE_UNKNOWN) {
5159                 ap->rxconfig = 0;
5160                 ap->link_time = 0;
5161                 ap->cur_time = 0;
5162                 ap->ability_match_cfg = 0;
5163                 ap->ability_match_count = 0;
5164                 ap->ability_match = 0;
5165                 ap->idle_match = 0;
5166                 ap->ack_match = 0;
5167         }
5168         ap->cur_time++;
5169
5170         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5171                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5172
5173                 if (rx_cfg_reg != ap->ability_match_cfg) {
5174                         ap->ability_match_cfg = rx_cfg_reg;
5175                         ap->ability_match = 0;
5176                         ap->ability_match_count = 0;
5177                 } else {
5178                         if (++ap->ability_match_count > 1) {
5179                                 ap->ability_match = 1;
5180                                 ap->ability_match_cfg = rx_cfg_reg;
5181                         }
5182                 }
5183                 if (rx_cfg_reg & ANEG_CFG_ACK)
5184                         ap->ack_match = 1;
5185                 else
5186                         ap->ack_match = 0;
5187
5188                 ap->idle_match = 0;
5189         } else {
5190                 ap->idle_match = 1;
5191                 ap->ability_match_cfg = 0;
5192                 ap->ability_match_count = 0;
5193                 ap->ability_match = 0;
5194                 ap->ack_match = 0;
5195
5196                 rx_cfg_reg = 0;
5197         }
5198
5199         ap->rxconfig = rx_cfg_reg;
5200         ret = ANEG_OK;
5201
5202         switch (ap->state) {
5203         case ANEG_STATE_UNKNOWN:
5204                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5205                         ap->state = ANEG_STATE_AN_ENABLE;
5206
5207                 /* fallthru */
5208         case ANEG_STATE_AN_ENABLE:
5209                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5210                 if (ap->flags & MR_AN_ENABLE) {
5211                         ap->link_time = 0;
5212                         ap->cur_time = 0;
5213                         ap->ability_match_cfg = 0;
5214                         ap->ability_match_count = 0;
5215                         ap->ability_match = 0;
5216                         ap->idle_match = 0;
5217                         ap->ack_match = 0;
5218
5219                         ap->state = ANEG_STATE_RESTART_INIT;
5220                 } else {
5221                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
5222                 }
5223                 break;
5224
5225         case ANEG_STATE_RESTART_INIT:
5226                 ap->link_time = ap->cur_time;
5227                 ap->flags &= ~(MR_NP_LOADED);
5228                 ap->txconfig = 0;
5229                 tw32(MAC_TX_AUTO_NEG, 0);
5230                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5231                 tw32_f(MAC_MODE, tp->mac_mode);
5232                 udelay(40);
5233
5234                 ret = ANEG_TIMER_ENAB;
5235                 ap->state = ANEG_STATE_RESTART;
5236
5237                 /* fallthru */
5238         case ANEG_STATE_RESTART:
5239                 delta = ap->cur_time - ap->link_time;
5240                 if (delta > ANEG_STATE_SETTLE_TIME)
5241                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5242                 else
5243                         ret = ANEG_TIMER_ENAB;
5244                 break;
5245
5246         case ANEG_STATE_DISABLE_LINK_OK:
5247                 ret = ANEG_DONE;
5248                 break;
5249
5250         case ANEG_STATE_ABILITY_DETECT_INIT:
5251                 ap->flags &= ~(MR_TOGGLE_TX);
5252                 ap->txconfig = ANEG_CFG_FD;
5253                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5254                 if (flowctrl & ADVERTISE_1000XPAUSE)
5255                         ap->txconfig |= ANEG_CFG_PS1;
5256                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5257                         ap->txconfig |= ANEG_CFG_PS2;
5258                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5259                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5260                 tw32_f(MAC_MODE, tp->mac_mode);
5261                 udelay(40);
5262
5263                 ap->state = ANEG_STATE_ABILITY_DETECT;
5264                 break;
5265
5266         case ANEG_STATE_ABILITY_DETECT:
5267                 if (ap->ability_match != 0 && ap->rxconfig != 0)
5268                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
5269                 break;
5270
5271         case ANEG_STATE_ACK_DETECT_INIT:
5272                 ap->txconfig |= ANEG_CFG_ACK;
5273                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5274                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5275                 tw32_f(MAC_MODE, tp->mac_mode);
5276                 udelay(40);
5277
5278                 ap->state = ANEG_STATE_ACK_DETECT;
5279
5280                 /* fallthru */
5281         case ANEG_STATE_ACK_DETECT:
5282                 if (ap->ack_match != 0) {
5283                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5284                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5285                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5286                         } else {
5287                                 ap->state = ANEG_STATE_AN_ENABLE;
5288                         }
5289                 } else if (ap->ability_match != 0 &&
5290                            ap->rxconfig == 0) {
5291                         ap->state = ANEG_STATE_AN_ENABLE;
5292                 }
5293                 break;
5294
5295         case ANEG_STATE_COMPLETE_ACK_INIT:
5296                 if (ap->rxconfig & ANEG_CFG_INVAL) {
5297                         ret = ANEG_FAILED;
5298                         break;
5299                 }
5300                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5301                                MR_LP_ADV_HALF_DUPLEX |
5302                                MR_LP_ADV_SYM_PAUSE |
5303                                MR_LP_ADV_ASYM_PAUSE |
5304                                MR_LP_ADV_REMOTE_FAULT1 |
5305                                MR_LP_ADV_REMOTE_FAULT2 |
5306                                MR_LP_ADV_NEXT_PAGE |
5307                                MR_TOGGLE_RX |
5308                                MR_NP_RX);
5309                 if (ap->rxconfig & ANEG_CFG_FD)
5310                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5311                 if (ap->rxconfig & ANEG_CFG_HD)
5312                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5313                 if (ap->rxconfig & ANEG_CFG_PS1)
5314                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
5315                 if (ap->rxconfig & ANEG_CFG_PS2)
5316                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5317                 if (ap->rxconfig & ANEG_CFG_RF1)
5318                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5319                 if (ap->rxconfig & ANEG_CFG_RF2)
5320                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5321                 if (ap->rxconfig & ANEG_CFG_NP)
5322                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
5323
5324                 ap->link_time = ap->cur_time;
5325
5326                 ap->flags ^= (MR_TOGGLE_TX);
5327                 if (ap->rxconfig & 0x0008)
5328                         ap->flags |= MR_TOGGLE_RX;
5329                 if (ap->rxconfig & ANEG_CFG_NP)
5330                         ap->flags |= MR_NP_RX;
5331                 ap->flags |= MR_PAGE_RX;
5332
5333                 ap->state = ANEG_STATE_COMPLETE_ACK;
5334                 ret = ANEG_TIMER_ENAB;
5335                 break;
5336
5337         case ANEG_STATE_COMPLETE_ACK:
5338                 if (ap->ability_match != 0 &&
5339                     ap->rxconfig == 0) {
5340                         ap->state = ANEG_STATE_AN_ENABLE;
5341                         break;
5342                 }
5343                 delta = ap->cur_time - ap->link_time;
5344                 if (delta > ANEG_STATE_SETTLE_TIME) {
5345                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5346                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5347                         } else {
5348                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5349                                     !(ap->flags & MR_NP_RX)) {
5350                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5351                                 } else {
5352                                         ret = ANEG_FAILED;
5353                                 }
5354                         }
5355                 }
5356                 break;
5357
5358         case ANEG_STATE_IDLE_DETECT_INIT:
5359                 ap->link_time = ap->cur_time;
5360                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5361                 tw32_f(MAC_MODE, tp->mac_mode);
5362                 udelay(40);
5363
5364                 ap->state = ANEG_STATE_IDLE_DETECT;
5365                 ret = ANEG_TIMER_ENAB;
5366                 break;
5367
5368         case ANEG_STATE_IDLE_DETECT:
5369                 if (ap->ability_match != 0 &&
5370                     ap->rxconfig == 0) {
5371                         ap->state = ANEG_STATE_AN_ENABLE;
5372                         break;
5373                 }
5374                 delta = ap->cur_time - ap->link_time;
5375                 if (delta > ANEG_STATE_SETTLE_TIME) {
5376                         /* XXX another gem from the Broadcom driver :( */
5377                         ap->state = ANEG_STATE_LINK_OK;
5378                 }
5379                 break;
5380
5381         case ANEG_STATE_LINK_OK:
5382                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5383                 ret = ANEG_DONE;
5384                 break;
5385
5386         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5387                 /* ??? unimplemented */
5388                 break;
5389
5390         case ANEG_STATE_NEXT_PAGE_WAIT:
5391                 /* ??? unimplemented */
5392                 break;
5393
5394         default:
5395                 ret = ANEG_FAILED;
5396                 break;
5397         }
5398
5399         return ret;
5400 }
5401
5402 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5403 {
5404         int res = 0;
5405         struct tg3_fiber_aneginfo aninfo;
5406         int status = ANEG_FAILED;
5407         unsigned int tick;
5408         u32 tmp;
5409
5410         tw32_f(MAC_TX_AUTO_NEG, 0);
5411
5412         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5413         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5414         udelay(40);
5415
5416         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5417         udelay(40);
5418
5419         memset(&aninfo, 0, sizeof(aninfo));
5420         aninfo.flags |= MR_AN_ENABLE;
5421         aninfo.state = ANEG_STATE_UNKNOWN;
5422         aninfo.cur_time = 0;
5423         tick = 0;
5424         while (++tick < 195000) {
5425                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5426                 if (status == ANEG_DONE || status == ANEG_FAILED)
5427                         break;
5428
5429                 udelay(1);
5430         }
5431
5432         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5433         tw32_f(MAC_MODE, tp->mac_mode);
5434         udelay(40);
5435
5436         *txflags = aninfo.txconfig;
5437         *rxflags = aninfo.flags;
5438
5439         if (status == ANEG_DONE &&
5440             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5441                              MR_LP_ADV_FULL_DUPLEX)))
5442                 res = 1;
5443
5444         return res;
5445 }
5446
5447 static void tg3_init_bcm8002(struct tg3 *tp)
5448 {
5449         u32 mac_status = tr32(MAC_STATUS);
5450         int i;
5451
5452         /* Reset when initting first time or we have a link. */
5453         if (tg3_flag(tp, INIT_COMPLETE) &&
5454             !(mac_status & MAC_STATUS_PCS_SYNCED))
5455                 return;
5456
5457         /* Set PLL lock range. */
5458         tg3_writephy(tp, 0x16, 0x8007);
5459
5460         /* SW reset */
5461         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5462
5463         /* Wait for reset to complete. */
5464         /* XXX schedule_timeout() ... */
5465         for (i = 0; i < 500; i++)
5466                 udelay(10);
5467
5468         /* Config mode; select PMA/Ch 1 regs. */
5469         tg3_writephy(tp, 0x10, 0x8411);
5470
5471         /* Enable auto-lock and comdet, select txclk for tx. */
5472         tg3_writephy(tp, 0x11, 0x0a10);
5473
5474         tg3_writephy(tp, 0x18, 0x00a0);
5475         tg3_writephy(tp, 0x16, 0x41ff);
5476
5477         /* Assert and deassert POR. */
5478         tg3_writephy(tp, 0x13, 0x0400);
5479         udelay(40);
5480         tg3_writephy(tp, 0x13, 0x0000);
5481
5482         tg3_writephy(tp, 0x11, 0x0a50);
5483         udelay(40);
5484         tg3_writephy(tp, 0x11, 0x0a10);
5485
5486         /* Wait for signal to stabilize */
5487         /* XXX schedule_timeout() ... */
5488         for (i = 0; i < 15000; i++)
5489                 udelay(10);
5490
5491         /* Deselect the channel register so we can read the PHYID
5492          * later.
5493          */
5494         tg3_writephy(tp, 0x10, 0x8011);
5495 }
5496
5497 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5498 {
5499         u16 flowctrl;
5500         bool current_link_up;
5501         u32 sg_dig_ctrl, sg_dig_status;
5502         u32 serdes_cfg, expected_sg_dig_ctrl;
5503         int workaround, port_a;
5504
5505         serdes_cfg = 0;
5506         expected_sg_dig_ctrl = 0;
5507         workaround = 0;
5508         port_a = 1;
5509         current_link_up = false;
5510
5511         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5512             tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5513                 workaround = 1;
5514                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5515                         port_a = 0;
5516
5517                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5518                 /* preserve bits 20-23 for voltage regulator */
5519                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5520         }
5521
5522         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5523
5524         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5525                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5526                         if (workaround) {
5527                                 u32 val = serdes_cfg;
5528
5529                                 if (port_a)
5530                                         val |= 0xc010000;
5531                                 else
5532                                         val |= 0x4010000;
5533                                 tw32_f(MAC_SERDES_CFG, val);
5534                         }
5535
5536                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5537                 }
5538                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5539                         tg3_setup_flow_control(tp, 0, 0);
5540                         current_link_up = true;
5541                 }
5542                 goto out;
5543         }
5544
5545         /* Want auto-negotiation.  */
5546         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5547
5548         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5549         if (flowctrl & ADVERTISE_1000XPAUSE)
5550                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5551         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5552                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5553
5554         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5555                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5556                     tp->serdes_counter &&
5557                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5558                                     MAC_STATUS_RCVD_CFG)) ==
5559                      MAC_STATUS_PCS_SYNCED)) {
5560                         tp->serdes_counter--;
5561                         current_link_up = true;
5562                         goto out;
5563                 }
5564 restart_autoneg:
5565                 if (workaround)
5566                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5567                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5568                 udelay(5);
5569                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5570
5571                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5572                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5573         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5574                                  MAC_STATUS_SIGNAL_DET)) {
5575                 sg_dig_status = tr32(SG_DIG_STATUS);
5576                 mac_status = tr32(MAC_STATUS);
5577
5578                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5579                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5580                         u32 local_adv = 0, remote_adv = 0;
5581
5582                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5583                                 local_adv |= ADVERTISE_1000XPAUSE;
5584                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5585                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5586
5587                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5588                                 remote_adv |= LPA_1000XPAUSE;
5589                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5590                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5591
5592                         tp->link_config.rmt_adv =
5593                                            mii_adv_to_ethtool_adv_x(remote_adv);
5594
5595                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5596                         current_link_up = true;
5597                         tp->serdes_counter = 0;
5598                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5599                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5600                         if (tp->serdes_counter)
5601                                 tp->serdes_counter--;
5602                         else {
5603                                 if (workaround) {
5604                                         u32 val = serdes_cfg;
5605
5606                                         if (port_a)
5607                                                 val |= 0xc010000;
5608                                         else
5609                                                 val |= 0x4010000;
5610
5611                                         tw32_f(MAC_SERDES_CFG, val);
5612                                 }
5613
5614                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5615                                 udelay(40);
5616
5617                                 /* Link parallel detection - link is up */
5618                                 /* only if we have PCS_SYNC and not */
5619                                 /* receiving config code words */
5620                                 mac_status = tr32(MAC_STATUS);
5621                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5622                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5623                                         tg3_setup_flow_control(tp, 0, 0);
5624                                         current_link_up = true;
5625                                         tp->phy_flags |=
5626                                                 TG3_PHYFLG_PARALLEL_DETECT;
5627                                         tp->serdes_counter =
5628                                                 SERDES_PARALLEL_DET_TIMEOUT;
5629                                 } else
5630                                         goto restart_autoneg;
5631                         }
5632                 }
5633         } else {
5634                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5635                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5636         }
5637
5638 out:
5639         return current_link_up;
5640 }
5641
5642 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5643 {
5644         bool current_link_up = false;
5645
5646         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5647                 goto out;
5648
5649         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5650                 u32 txflags, rxflags;
5651                 int i;
5652
5653                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5654                         u32 local_adv = 0, remote_adv = 0;
5655
5656                         if (txflags & ANEG_CFG_PS1)
5657                                 local_adv |= ADVERTISE_1000XPAUSE;
5658                         if (txflags & ANEG_CFG_PS2)
5659                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5660
5661                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5662                                 remote_adv |= LPA_1000XPAUSE;
5663                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5664                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5665
5666                         tp->link_config.rmt_adv =
5667                                            mii_adv_to_ethtool_adv_x(remote_adv);
5668
5669                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5670
5671                         current_link_up = true;
5672                 }
5673                 for (i = 0; i < 30; i++) {
5674                         udelay(20);
5675                         tw32_f(MAC_STATUS,
5676                                (MAC_STATUS_SYNC_CHANGED |
5677                                 MAC_STATUS_CFG_CHANGED));
5678                         udelay(40);
5679                         if ((tr32(MAC_STATUS) &
5680                              (MAC_STATUS_SYNC_CHANGED |
5681                               MAC_STATUS_CFG_CHANGED)) == 0)
5682                                 break;
5683                 }
5684
5685                 mac_status = tr32(MAC_STATUS);
5686                 if (!current_link_up &&
5687                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5688                     !(mac_status & MAC_STATUS_RCVD_CFG))
5689                         current_link_up = true;
5690         } else {
5691                 tg3_setup_flow_control(tp, 0, 0);
5692
5693                 /* Forcing 1000FD link up. */
5694                 current_link_up = true;
5695
5696                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5697                 udelay(40);
5698
5699                 tw32_f(MAC_MODE, tp->mac_mode);
5700                 udelay(40);
5701         }
5702
5703 out:
5704         return current_link_up;
5705 }
5706
5707 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5708 {
5709         u32 orig_pause_cfg;
5710         u16 orig_active_speed;
5711         u8 orig_active_duplex;
5712         u32 mac_status;
5713         bool current_link_up;
5714         int i;
5715
5716         orig_pause_cfg = tp->link_config.active_flowctrl;
5717         orig_active_speed = tp->link_config.active_speed;
5718         orig_active_duplex = tp->link_config.active_duplex;
5719
5720         if (!tg3_flag(tp, HW_AUTONEG) &&
5721             tp->link_up &&
5722             tg3_flag(tp, INIT_COMPLETE)) {
5723                 mac_status = tr32(MAC_STATUS);
5724                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5725                                MAC_STATUS_SIGNAL_DET |
5726                                MAC_STATUS_CFG_CHANGED |
5727                                MAC_STATUS_RCVD_CFG);
5728                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5729                                    MAC_STATUS_SIGNAL_DET)) {
5730                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5731                                             MAC_STATUS_CFG_CHANGED));
5732                         return 0;
5733                 }
5734         }
5735
5736         tw32_f(MAC_TX_AUTO_NEG, 0);
5737
5738         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5739         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5740         tw32_f(MAC_MODE, tp->mac_mode);
5741         udelay(40);
5742
5743         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5744                 tg3_init_bcm8002(tp);
5745
5746         /* Enable link change event even when serdes polling.  */
5747         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5748         udelay(40);
5749
5750         current_link_up = false;
5751         tp->link_config.rmt_adv = 0;
5752         mac_status = tr32(MAC_STATUS);
5753
5754         if (tg3_flag(tp, HW_AUTONEG))
5755                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5756         else
5757                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5758
5759         tp->napi[0].hw_status->status =
5760                 (SD_STATUS_UPDATED |
5761                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5762
5763         for (i = 0; i < 100; i++) {
5764                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5765                                     MAC_STATUS_CFG_CHANGED));
5766                 udelay(5);
5767                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5768                                          MAC_STATUS_CFG_CHANGED |
5769                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5770                         break;
5771         }
5772
5773         mac_status = tr32(MAC_STATUS);
5774         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5775                 current_link_up = false;
5776                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5777                     tp->serdes_counter == 0) {
5778                         tw32_f(MAC_MODE, (tp->mac_mode |
5779                                           MAC_MODE_SEND_CONFIGS));
5780                         udelay(1);
5781                         tw32_f(MAC_MODE, tp->mac_mode);
5782                 }
5783         }
5784
5785         if (current_link_up) {
5786                 tp->link_config.active_speed = SPEED_1000;
5787                 tp->link_config.active_duplex = DUPLEX_FULL;
5788                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5789                                     LED_CTRL_LNKLED_OVERRIDE |
5790                                     LED_CTRL_1000MBPS_ON));
5791         } else {
5792                 tp->link_config.active_speed = SPEED_UNKNOWN;
5793                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5794                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5795                                     LED_CTRL_LNKLED_OVERRIDE |
5796                                     LED_CTRL_TRAFFIC_OVERRIDE));
5797         }
5798
5799         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5800                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5801                 if (orig_pause_cfg != now_pause_cfg ||
5802                     orig_active_speed != tp->link_config.active_speed ||
5803                     orig_active_duplex != tp->link_config.active_duplex)
5804                         tg3_link_report(tp);
5805         }
5806
5807         return 0;
5808 }
5809
5810 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5811 {
5812         int err = 0;
5813         u32 bmsr, bmcr;
5814         u16 current_speed = SPEED_UNKNOWN;
5815         u8 current_duplex = DUPLEX_UNKNOWN;
5816         bool current_link_up = false;
5817         u32 local_adv, remote_adv, sgsr;
5818
5819         if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5820              tg3_asic_rev(tp) == ASIC_REV_5720) &&
5821              !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5822              (sgsr & SERDES_TG3_SGMII_MODE)) {
5823
5824                 if (force_reset)
5825                         tg3_phy_reset(tp);
5826
5827                 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5828
5829                 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5830                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5831                 } else {
5832                         current_link_up = true;
5833                         if (sgsr & SERDES_TG3_SPEED_1000) {
5834                                 current_speed = SPEED_1000;
5835                                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5836                         } else if (sgsr & SERDES_TG3_SPEED_100) {
5837                                 current_speed = SPEED_100;
5838                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5839                         } else {
5840                                 current_speed = SPEED_10;
5841                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5842                         }
5843
5844                         if (sgsr & SERDES_TG3_FULL_DUPLEX)
5845                                 current_duplex = DUPLEX_FULL;
5846                         else
5847                                 current_duplex = DUPLEX_HALF;
5848                 }
5849
5850                 tw32_f(MAC_MODE, tp->mac_mode);
5851                 udelay(40);
5852
5853                 tg3_clear_mac_status(tp);
5854
5855                 goto fiber_setup_done;
5856         }
5857
5858         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5859         tw32_f(MAC_MODE, tp->mac_mode);
5860         udelay(40);
5861
5862         tg3_clear_mac_status(tp);
5863
5864         if (force_reset)
5865                 tg3_phy_reset(tp);
5866
5867         tp->link_config.rmt_adv = 0;
5868
5869         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5870         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5871         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5872                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5873                         bmsr |= BMSR_LSTATUS;
5874                 else
5875                         bmsr &= ~BMSR_LSTATUS;
5876         }
5877
5878         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5879
5880         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5881             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5882                 /* do nothing, just check for link up at the end */
5883         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5884                 u32 adv, newadv;
5885
5886                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5887                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5888                                  ADVERTISE_1000XPAUSE |
5889                                  ADVERTISE_1000XPSE_ASYM |
5890                                  ADVERTISE_SLCT);
5891
5892                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5893                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5894
5895                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5896                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5897                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5898                         tg3_writephy(tp, MII_BMCR, bmcr);
5899
5900                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5901                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5902                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5903
5904                         return err;
5905                 }
5906         } else {
5907                 u32 new_bmcr;
5908
5909                 bmcr &= ~BMCR_SPEED1000;
5910                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5911
5912                 if (tp->link_config.duplex == DUPLEX_FULL)
5913                         new_bmcr |= BMCR_FULLDPLX;
5914
5915                 if (new_bmcr != bmcr) {
5916                         /* BMCR_SPEED1000 is a reserved bit that needs
5917                          * to be set on write.
5918                          */
5919                         new_bmcr |= BMCR_SPEED1000;
5920
5921                         /* Force a linkdown */
5922                         if (tp->link_up) {
5923                                 u32 adv;
5924
5925                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5926                                 adv &= ~(ADVERTISE_1000XFULL |
5927                                          ADVERTISE_1000XHALF |
5928                                          ADVERTISE_SLCT);
5929                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5930                                 tg3_writephy(tp, MII_BMCR, bmcr |
5931                                                            BMCR_ANRESTART |
5932                                                            BMCR_ANENABLE);
5933                                 udelay(10);
5934                                 tg3_carrier_off(tp);
5935                         }
5936                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5937                         bmcr = new_bmcr;
5938                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5939                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5940                         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5941                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5942                                         bmsr |= BMSR_LSTATUS;
5943                                 else
5944                                         bmsr &= ~BMSR_LSTATUS;
5945                         }
5946                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5947                 }
5948         }
5949
5950         if (bmsr & BMSR_LSTATUS) {
5951                 current_speed = SPEED_1000;
5952                 current_link_up = true;
5953                 if (bmcr & BMCR_FULLDPLX)
5954                         current_duplex = DUPLEX_FULL;
5955                 else
5956                         current_duplex = DUPLEX_HALF;
5957
5958                 local_adv = 0;
5959                 remote_adv = 0;
5960
5961                 if (bmcr & BMCR_ANENABLE) {
5962                         u32 common;
5963
5964                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5965                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5966                         common = local_adv & remote_adv;
5967                         if (common & (ADVERTISE_1000XHALF |
5968                                       ADVERTISE_1000XFULL)) {
5969                                 if (common & ADVERTISE_1000XFULL)
5970                                         current_duplex = DUPLEX_FULL;
5971                                 else
5972                                         current_duplex = DUPLEX_HALF;
5973
5974                                 tp->link_config.rmt_adv =
5975                                            mii_adv_to_ethtool_adv_x(remote_adv);
5976                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5977                                 /* Link is up via parallel detect */
5978                         } else {
5979                                 current_link_up = false;
5980                         }
5981                 }
5982         }
5983
5984 fiber_setup_done:
5985         if (current_link_up && current_duplex == DUPLEX_FULL)
5986                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5987
5988         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5989         if (tp->link_config.active_duplex == DUPLEX_HALF)
5990                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5991
5992         tw32_f(MAC_MODE, tp->mac_mode);
5993         udelay(40);
5994
5995         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5996
5997         tp->link_config.active_speed = current_speed;
5998         tp->link_config.active_duplex = current_duplex;
5999
6000         tg3_test_and_report_link_chg(tp, current_link_up);
6001         return err;
6002 }
6003
6004 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6005 {
6006         if (tp->serdes_counter) {
6007                 /* Give autoneg time to complete. */
6008                 tp->serdes_counter--;
6009                 return;
6010         }
6011
6012         if (!tp->link_up &&
6013             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6014                 u32 bmcr;
6015
6016                 tg3_readphy(tp, MII_BMCR, &bmcr);
6017                 if (bmcr & BMCR_ANENABLE) {
6018                         u32 phy1, phy2;
6019
6020                         /* Select shadow register 0x1f */
6021                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6022                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6023
6024                         /* Select expansion interrupt status register */
6025                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6026                                          MII_TG3_DSP_EXP1_INT_STAT);
6027                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6028                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6029
6030                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6031                                 /* We have signal detect and not receiving
6032                                  * config code words, link is up by parallel
6033                                  * detection.
6034                                  */
6035
6036                                 bmcr &= ~BMCR_ANENABLE;
6037                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6038                                 tg3_writephy(tp, MII_BMCR, bmcr);
6039                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6040                         }
6041                 }
6042         } else if (tp->link_up &&
6043                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6044                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6045                 u32 phy2;
6046
6047                 /* Select expansion interrupt status register */
6048                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6049                                  MII_TG3_DSP_EXP1_INT_STAT);
6050                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6051                 if (phy2 & 0x20) {
6052                         u32 bmcr;
6053
6054                         /* Config code words received, turn on autoneg. */
6055                         tg3_readphy(tp, MII_BMCR, &bmcr);
6056                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6057
6058                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6059
6060                 }
6061         }
6062 }
6063
6064 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6065 {
6066         u32 val;
6067         int err;
6068
6069         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6070                 err = tg3_setup_fiber_phy(tp, force_reset);
6071         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6072                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6073         else
6074                 err = tg3_setup_copper_phy(tp, force_reset);
6075
6076         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6077                 u32 scale;
6078
6079                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6080                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6081                         scale = 65;
6082                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6083                         scale = 6;
6084                 else
6085                         scale = 12;
6086
6087                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6088                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6089                 tw32(GRC_MISC_CFG, val);
6090         }
6091
6092         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6093               (6 << TX_LENGTHS_IPG_SHIFT);
6094         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6095             tg3_asic_rev(tp) == ASIC_REV_5762)
6096                 val |= tr32(MAC_TX_LENGTHS) &
6097                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
6098                         TX_LENGTHS_CNT_DWN_VAL_MSK);
6099
6100         if (tp->link_config.active_speed == SPEED_1000 &&
6101             tp->link_config.active_duplex == DUPLEX_HALF)
6102                 tw32(MAC_TX_LENGTHS, val |
6103                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6104         else
6105                 tw32(MAC_TX_LENGTHS, val |
6106                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6107
6108         if (!tg3_flag(tp, 5705_PLUS)) {
6109                 if (tp->link_up) {
6110                         tw32(HOSTCC_STAT_COAL_TICKS,
6111                              tp->coal.stats_block_coalesce_usecs);
6112                 } else {
6113                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
6114                 }
6115         }
6116
6117         if (tg3_flag(tp, ASPM_WORKAROUND)) {
6118                 val = tr32(PCIE_PWR_MGMT_THRESH);
6119                 if (!tp->link_up)
6120                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6121                               tp->pwrmgmt_thresh;
6122                 else
6123                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6124                 tw32(PCIE_PWR_MGMT_THRESH, val);
6125         }
6126
6127         return err;
6128 }
6129
6130 /* tp->lock must be held */
6131 static u64 tg3_refclk_read(struct tg3 *tp)
6132 {
6133         u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6134         return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6135 }
6136
6137 /* tp->lock must be held */
6138 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6139 {
6140         u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6141
6142         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6143         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6144         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6145         tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6146 }
6147
6148 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6149 static inline void tg3_full_unlock(struct tg3 *tp);
6150 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6151 {
6152         struct tg3 *tp = netdev_priv(dev);
6153
6154         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6155                                 SOF_TIMESTAMPING_RX_SOFTWARE |
6156                                 SOF_TIMESTAMPING_SOFTWARE;
6157
6158         if (tg3_flag(tp, PTP_CAPABLE)) {
6159                 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6160                                         SOF_TIMESTAMPING_RX_HARDWARE |
6161                                         SOF_TIMESTAMPING_RAW_HARDWARE;
6162         }
6163
6164         if (tp->ptp_clock)
6165                 info->phc_index = ptp_clock_index(tp->ptp_clock);
6166         else
6167                 info->phc_index = -1;
6168
6169         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6170
6171         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6172                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6173                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6174                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6175         return 0;
6176 }
6177
6178 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6179 {
6180         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6181         bool neg_adj = false;
6182         u32 correction = 0;
6183
6184         if (ppb < 0) {
6185                 neg_adj = true;
6186                 ppb = -ppb;
6187         }
6188
6189         /* Frequency adjustment is performed using hardware with a 24 bit
6190          * accumulator and a programmable correction value. On each clk, the
6191          * correction value gets added to the accumulator and when it
6192          * overflows, the time counter is incremented/decremented.
6193          *
6194          * So conversion from ppb to correction value is
6195          *              ppb * (1 << 24) / 1000000000
6196          */
6197         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6198                      TG3_EAV_REF_CLK_CORRECT_MASK;
6199
6200         tg3_full_lock(tp, 0);
6201
6202         if (correction)
6203                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6204                      TG3_EAV_REF_CLK_CORRECT_EN |
6205                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6206         else
6207                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6208
6209         tg3_full_unlock(tp);
6210
6211         return 0;
6212 }
6213
6214 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6215 {
6216         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6217
6218         tg3_full_lock(tp, 0);
6219         tp->ptp_adjust += delta;
6220         tg3_full_unlock(tp);
6221
6222         return 0;
6223 }
6224
6225 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
6226 {
6227         u64 ns;
6228         u32 remainder;
6229         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6230
6231         tg3_full_lock(tp, 0);
6232         ns = tg3_refclk_read(tp);
6233         ns += tp->ptp_adjust;
6234         tg3_full_unlock(tp);
6235
6236         ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
6237         ts->tv_nsec = remainder;
6238
6239         return 0;
6240 }
6241
6242 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6243                            const struct timespec *ts)
6244 {
6245         u64 ns;
6246         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6247
6248         ns = timespec_to_ns(ts);
6249
6250         tg3_full_lock(tp, 0);
6251         tg3_refclk_write(tp, ns);
6252         tp->ptp_adjust = 0;
6253         tg3_full_unlock(tp);
6254
6255         return 0;
6256 }
6257
6258 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6259                           struct ptp_clock_request *rq, int on)
6260 {
6261         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6262         u32 clock_ctl;
6263         int rval = 0;
6264
6265         switch (rq->type) {
6266         case PTP_CLK_REQ_PEROUT:
6267                 if (rq->perout.index != 0)
6268                         return -EINVAL;
6269
6270                 tg3_full_lock(tp, 0);
6271                 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6272                 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6273
6274                 if (on) {
6275                         u64 nsec;
6276
6277                         nsec = rq->perout.start.sec * 1000000000ULL +
6278                                rq->perout.start.nsec;
6279
6280                         if (rq->perout.period.sec || rq->perout.period.nsec) {
6281                                 netdev_warn(tp->dev,
6282                                             "Device supports only a one-shot timesync output, period must be 0\n");
6283                                 rval = -EINVAL;
6284                                 goto err_out;
6285                         }
6286
6287                         if (nsec & (1ULL << 63)) {
6288                                 netdev_warn(tp->dev,
6289                                             "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6290                                 rval = -EINVAL;
6291                                 goto err_out;
6292                         }
6293
6294                         tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6295                         tw32(TG3_EAV_WATCHDOG0_MSB,
6296                              TG3_EAV_WATCHDOG0_EN |
6297                              ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6298
6299                         tw32(TG3_EAV_REF_CLCK_CTL,
6300                              clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6301                 } else {
6302                         tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6303                         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6304                 }
6305
6306 err_out:
6307                 tg3_full_unlock(tp);
6308                 return rval;
6309
6310         default:
6311                 break;
6312         }
6313
6314         return -EOPNOTSUPP;
6315 }
6316
6317 static const struct ptp_clock_info tg3_ptp_caps = {
6318         .owner          = THIS_MODULE,
6319         .name           = "tg3 clock",
6320         .max_adj        = 250000000,
6321         .n_alarm        = 0,
6322         .n_ext_ts       = 0,
6323         .n_per_out      = 1,
6324         .pps            = 0,
6325         .adjfreq        = tg3_ptp_adjfreq,
6326         .adjtime        = tg3_ptp_adjtime,
6327         .gettime        = tg3_ptp_gettime,
6328         .settime        = tg3_ptp_settime,
6329         .enable         = tg3_ptp_enable,
6330 };
6331
6332 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6333                                      struct skb_shared_hwtstamps *timestamp)
6334 {
6335         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6336         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6337                                            tp->ptp_adjust);
6338 }
6339
6340 /* tp->lock must be held */
6341 static void tg3_ptp_init(struct tg3 *tp)
6342 {
6343         if (!tg3_flag(tp, PTP_CAPABLE))
6344                 return;
6345
6346         /* Initialize the hardware clock to the system time. */
6347         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6348         tp->ptp_adjust = 0;
6349         tp->ptp_info = tg3_ptp_caps;
6350 }
6351
6352 /* tp->lock must be held */
6353 static void tg3_ptp_resume(struct tg3 *tp)
6354 {
6355         if (!tg3_flag(tp, PTP_CAPABLE))
6356                 return;
6357
6358         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6359         tp->ptp_adjust = 0;
6360 }
6361
6362 static void tg3_ptp_fini(struct tg3 *tp)
6363 {
6364         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6365                 return;
6366
6367         ptp_clock_unregister(tp->ptp_clock);
6368         tp->ptp_clock = NULL;
6369         tp->ptp_adjust = 0;
6370 }
6371
6372 static inline int tg3_irq_sync(struct tg3 *tp)
6373 {
6374         return tp->irq_sync;
6375 }
6376
6377 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6378 {
6379         int i;
6380
6381         dst = (u32 *)((u8 *)dst + off);
6382         for (i = 0; i < len; i += sizeof(u32))
6383                 *dst++ = tr32(off + i);
6384 }
6385
6386 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6387 {
6388         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6389         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6390         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6391         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6392         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6393         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6394         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6395         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6396         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6397         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6398         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6399         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6400         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6401         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6402         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6403         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6404         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6405         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6406         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6407
6408         if (tg3_flag(tp, SUPPORT_MSIX))
6409                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6410
6411         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6412         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6413         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6414         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6415         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6416         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6417         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6418         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6419
6420         if (!tg3_flag(tp, 5705_PLUS)) {
6421                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6422                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6423                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6424         }
6425
6426         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6427         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6428         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6429         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6430         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6431
6432         if (tg3_flag(tp, NVRAM))
6433                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6434 }
6435
6436 static void tg3_dump_state(struct tg3 *tp)
6437 {
6438         int i;
6439         u32 *regs;
6440
6441         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6442         if (!regs)
6443                 return;
6444
6445         if (tg3_flag(tp, PCI_EXPRESS)) {
6446                 /* Read up to but not including private PCI registers */
6447                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6448                         regs[i / sizeof(u32)] = tr32(i);
6449         } else
6450                 tg3_dump_legacy_regs(tp, regs);
6451
6452         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6453                 if (!regs[i + 0] && !regs[i + 1] &&
6454                     !regs[i + 2] && !regs[i + 3])
6455                         continue;
6456
6457                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6458                            i * 4,
6459                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6460         }
6461
6462         kfree(regs);
6463
6464         for (i = 0; i < tp->irq_cnt; i++) {
6465                 struct tg3_napi *tnapi = &tp->napi[i];
6466
6467                 /* SW status block */
6468                 netdev_err(tp->dev,
6469                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6470                            i,
6471                            tnapi->hw_status->status,
6472                            tnapi->hw_status->status_tag,
6473                            tnapi->hw_status->rx_jumbo_consumer,
6474                            tnapi->hw_status->rx_consumer,
6475                            tnapi->hw_status->rx_mini_consumer,
6476                            tnapi->hw_status->idx[0].rx_producer,
6477                            tnapi->hw_status->idx[0].tx_consumer);
6478
6479                 netdev_err(tp->dev,
6480                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6481                            i,
6482                            tnapi->last_tag, tnapi->last_irq_tag,
6483                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6484                            tnapi->rx_rcb_ptr,
6485                            tnapi->prodring.rx_std_prod_idx,
6486                            tnapi->prodring.rx_std_cons_idx,
6487                            tnapi->prodring.rx_jmb_prod_idx,
6488                            tnapi->prodring.rx_jmb_cons_idx);
6489         }
6490 }
6491
6492 /* This is called whenever we suspect that the system chipset is re-
6493  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6494  * is bogus tx completions. We try to recover by setting the
6495  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6496  * in the workqueue.
6497  */
6498 static void tg3_tx_recover(struct tg3 *tp)
6499 {
6500         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6501                tp->write32_tx_mbox == tg3_write_indirect_mbox);
6502
6503         netdev_warn(tp->dev,
6504                     "The system may be re-ordering memory-mapped I/O "
6505                     "cycles to the network device, attempting to recover. "
6506                     "Please report the problem to the driver maintainer "
6507                     "and include system chipset information.\n");
6508
6509         tg3_flag_set(tp, TX_RECOVERY_PENDING);
6510 }
6511
6512 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6513 {
6514         /* Tell compiler to fetch tx indices from memory. */
6515         barrier();
6516         return tnapi->tx_pending -
6517                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6518 }
6519
6520 /* Tigon3 never reports partial packet sends.  So we do not
6521  * need special logic to handle SKBs that have not had all
6522  * of their frags sent yet, like SunGEM does.
6523  */
6524 static void tg3_tx(struct tg3_napi *tnapi)
6525 {
6526         struct tg3 *tp = tnapi->tp;
6527         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6528         u32 sw_idx = tnapi->tx_cons;
6529         struct netdev_queue *txq;
6530         int index = tnapi - tp->napi;
6531         unsigned int pkts_compl = 0, bytes_compl = 0;
6532
6533         if (tg3_flag(tp, ENABLE_TSS))
6534                 index--;
6535
6536         txq = netdev_get_tx_queue(tp->dev, index);
6537
6538         while (sw_idx != hw_idx) {
6539                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6540                 struct sk_buff *skb = ri->skb;
6541                 int i, tx_bug = 0;
6542
6543                 if (unlikely(skb == NULL)) {
6544                         tg3_tx_recover(tp);
6545                         return;
6546                 }
6547
6548                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6549                         struct skb_shared_hwtstamps timestamp;
6550                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6551                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6552
6553                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6554
6555                         skb_tstamp_tx(skb, &timestamp);
6556                 }
6557
6558                 pci_unmap_single(tp->pdev,
6559                                  dma_unmap_addr(ri, mapping),
6560                                  skb_headlen(skb),
6561                                  PCI_DMA_TODEVICE);
6562
6563                 ri->skb = NULL;
6564
6565                 while (ri->fragmented) {
6566                         ri->fragmented = false;
6567                         sw_idx = NEXT_TX(sw_idx);
6568                         ri = &tnapi->tx_buffers[sw_idx];
6569                 }
6570
6571                 sw_idx = NEXT_TX(sw_idx);
6572
6573                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6574                         ri = &tnapi->tx_buffers[sw_idx];
6575                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6576                                 tx_bug = 1;
6577
6578                         pci_unmap_page(tp->pdev,
6579                                        dma_unmap_addr(ri, mapping),
6580                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
6581                                        PCI_DMA_TODEVICE);
6582
6583                         while (ri->fragmented) {
6584                                 ri->fragmented = false;
6585                                 sw_idx = NEXT_TX(sw_idx);
6586                                 ri = &tnapi->tx_buffers[sw_idx];
6587                         }
6588
6589                         sw_idx = NEXT_TX(sw_idx);
6590                 }
6591
6592                 pkts_compl++;
6593                 bytes_compl += skb->len;
6594
6595                 dev_kfree_skb(skb);
6596
6597                 if (unlikely(tx_bug)) {
6598                         tg3_tx_recover(tp);
6599                         return;
6600                 }
6601         }
6602
6603         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6604
6605         tnapi->tx_cons = sw_idx;
6606
6607         /* Need to make the tx_cons update visible to tg3_start_xmit()
6608          * before checking for netif_queue_stopped().  Without the
6609          * memory barrier, there is a small possibility that tg3_start_xmit()
6610          * will miss it and cause the queue to be stopped forever.
6611          */
6612         smp_mb();
6613
6614         if (unlikely(netif_tx_queue_stopped(txq) &&
6615                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6616                 __netif_tx_lock(txq, smp_processor_id());
6617                 if (netif_tx_queue_stopped(txq) &&
6618                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6619                         netif_tx_wake_queue(txq);
6620                 __netif_tx_unlock(txq);
6621         }
6622 }
6623
6624 static void tg3_frag_free(bool is_frag, void *data)
6625 {
6626         if (is_frag)
6627                 put_page(virt_to_head_page(data));
6628         else
6629                 kfree(data);
6630 }
6631
6632 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6633 {
6634         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6635                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6636
6637         if (!ri->data)
6638                 return;
6639
6640         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6641                          map_sz, PCI_DMA_FROMDEVICE);
6642         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6643         ri->data = NULL;
6644 }
6645
6646
6647 /* Returns size of skb allocated or < 0 on error.
6648  *
6649  * We only need to fill in the address because the other members
6650  * of the RX descriptor are invariant, see tg3_init_rings.
6651  *
6652  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6653  * posting buffers we only dirty the first cache line of the RX
6654  * descriptor (containing the address).  Whereas for the RX status
6655  * buffers the cpu only reads the last cacheline of the RX descriptor
6656  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6657  */
6658 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6659                              u32 opaque_key, u32 dest_idx_unmasked,
6660                              unsigned int *frag_size)
6661 {
6662         struct tg3_rx_buffer_desc *desc;
6663         struct ring_info *map;
6664         u8 *data;
6665         dma_addr_t mapping;
6666         int skb_size, data_size, dest_idx;
6667
6668         switch (opaque_key) {
6669         case RXD_OPAQUE_RING_STD:
6670                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6671                 desc = &tpr->rx_std[dest_idx];
6672                 map = &tpr->rx_std_buffers[dest_idx];
6673                 data_size = tp->rx_pkt_map_sz;
6674                 break;
6675
6676         case RXD_OPAQUE_RING_JUMBO:
6677                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6678                 desc = &tpr->rx_jmb[dest_idx].std;
6679                 map = &tpr->rx_jmb_buffers[dest_idx];
6680                 data_size = TG3_RX_JMB_MAP_SZ;
6681                 break;
6682
6683         default:
6684                 return -EINVAL;
6685         }
6686
6687         /* Do not overwrite any of the map or rp information
6688          * until we are sure we can commit to a new buffer.
6689          *
6690          * Callers depend upon this behavior and assume that
6691          * we leave everything unchanged if we fail.
6692          */
6693         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6694                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6695         if (skb_size <= PAGE_SIZE) {
6696                 data = netdev_alloc_frag(skb_size);
6697                 *frag_size = skb_size;
6698         } else {
6699                 data = kmalloc(skb_size, GFP_ATOMIC);
6700                 *frag_size = 0;
6701         }
6702         if (!data)
6703                 return -ENOMEM;
6704
6705         mapping = pci_map_single(tp->pdev,
6706                                  data + TG3_RX_OFFSET(tp),
6707                                  data_size,
6708                                  PCI_DMA_FROMDEVICE);
6709         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6710                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6711                 return -EIO;
6712         }
6713
6714         map->data = data;
6715         dma_unmap_addr_set(map, mapping, mapping);
6716
6717         desc->addr_hi = ((u64)mapping >> 32);
6718         desc->addr_lo = ((u64)mapping & 0xffffffff);
6719
6720         return data_size;
6721 }
6722
6723 /* We only need to move over in the address because the other
6724  * members of the RX descriptor are invariant.  See notes above
6725  * tg3_alloc_rx_data for full details.
6726  */
6727 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6728                            struct tg3_rx_prodring_set *dpr,
6729                            u32 opaque_key, int src_idx,
6730                            u32 dest_idx_unmasked)
6731 {
6732         struct tg3 *tp = tnapi->tp;
6733         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6734         struct ring_info *src_map, *dest_map;
6735         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6736         int dest_idx;
6737
6738         switch (opaque_key) {
6739         case RXD_OPAQUE_RING_STD:
6740                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6741                 dest_desc = &dpr->rx_std[dest_idx];
6742                 dest_map = &dpr->rx_std_buffers[dest_idx];
6743                 src_desc = &spr->rx_std[src_idx];
6744                 src_map = &spr->rx_std_buffers[src_idx];
6745                 break;
6746
6747         case RXD_OPAQUE_RING_JUMBO:
6748                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6749                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6750                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6751                 src_desc = &spr->rx_jmb[src_idx].std;
6752                 src_map = &spr->rx_jmb_buffers[src_idx];
6753                 break;
6754
6755         default:
6756                 return;
6757         }
6758
6759         dest_map->data = src_map->data;
6760         dma_unmap_addr_set(dest_map, mapping,
6761                            dma_unmap_addr(src_map, mapping));
6762         dest_desc->addr_hi = src_desc->addr_hi;
6763         dest_desc->addr_lo = src_desc->addr_lo;
6764
6765         /* Ensure that the update to the skb happens after the physical
6766          * addresses have been transferred to the new BD location.
6767          */
6768         smp_wmb();
6769
6770         src_map->data = NULL;
6771 }
6772
6773 /* The RX ring scheme is composed of multiple rings which post fresh
6774  * buffers to the chip, and one special ring the chip uses to report
6775  * status back to the host.
6776  *
6777  * The special ring reports the status of received packets to the
6778  * host.  The chip does not write into the original descriptor the
6779  * RX buffer was obtained from.  The chip simply takes the original
6780  * descriptor as provided by the host, updates the status and length
6781  * field, then writes this into the next status ring entry.
6782  *
6783  * Each ring the host uses to post buffers to the chip is described
6784  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6785  * it is first placed into the on-chip ram.  When the packet's length
6786  * is known, it walks down the TG3_BDINFO entries to select the ring.
6787  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6788  * which is within the range of the new packet's length is chosen.
6789  *
6790  * The "separate ring for rx status" scheme may sound queer, but it makes
6791  * sense from a cache coherency perspective.  If only the host writes
6792  * to the buffer post rings, and only the chip writes to the rx status
6793  * rings, then cache lines never move beyond shared-modified state.
6794  * If both the host and chip were to write into the same ring, cache line
6795  * eviction could occur since both entities want it in an exclusive state.
6796  */
6797 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6798 {
6799         struct tg3 *tp = tnapi->tp;
6800         u32 work_mask, rx_std_posted = 0;
6801         u32 std_prod_idx, jmb_prod_idx;
6802         u32 sw_idx = tnapi->rx_rcb_ptr;
6803         u16 hw_idx;
6804         int received;
6805         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6806
6807         hw_idx = *(tnapi->rx_rcb_prod_idx);
6808         /*
6809          * We need to order the read of hw_idx and the read of
6810          * the opaque cookie.
6811          */
6812         rmb();
6813         work_mask = 0;
6814         received = 0;
6815         std_prod_idx = tpr->rx_std_prod_idx;
6816         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6817         while (sw_idx != hw_idx && budget > 0) {
6818                 struct ring_info *ri;
6819                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6820                 unsigned int len;
6821                 struct sk_buff *skb;
6822                 dma_addr_t dma_addr;
6823                 u32 opaque_key, desc_idx, *post_ptr;
6824                 u8 *data;
6825                 u64 tstamp = 0;
6826
6827                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6828                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6829                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6830                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6831                         dma_addr = dma_unmap_addr(ri, mapping);
6832                         data = ri->data;
6833                         post_ptr = &std_prod_idx;
6834                         rx_std_posted++;
6835                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6836                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6837                         dma_addr = dma_unmap_addr(ri, mapping);
6838                         data = ri->data;
6839                         post_ptr = &jmb_prod_idx;
6840                 } else
6841                         goto next_pkt_nopost;
6842
6843                 work_mask |= opaque_key;
6844
6845                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6846                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6847                 drop_it:
6848                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6849                                        desc_idx, *post_ptr);
6850                 drop_it_no_recycle:
6851                         /* Other statistics kept track of by card. */
6852                         tp->rx_dropped++;
6853                         goto next_pkt;
6854                 }
6855
6856                 prefetch(data + TG3_RX_OFFSET(tp));
6857                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6858                       ETH_FCS_LEN;
6859
6860                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6861                      RXD_FLAG_PTPSTAT_PTPV1 ||
6862                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6863                      RXD_FLAG_PTPSTAT_PTPV2) {
6864                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6865                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6866                 }
6867
6868                 if (len > TG3_RX_COPY_THRESH(tp)) {
6869                         int skb_size;
6870                         unsigned int frag_size;
6871
6872                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6873                                                     *post_ptr, &frag_size);
6874                         if (skb_size < 0)
6875                                 goto drop_it;
6876
6877                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6878                                          PCI_DMA_FROMDEVICE);
6879
6880                         /* Ensure that the update to the data happens
6881                          * after the usage of the old DMA mapping.
6882                          */
6883                         smp_wmb();
6884
6885                         ri->data = NULL;
6886
6887                         skb = build_skb(data, frag_size);
6888                         if (!skb) {
6889                                 tg3_frag_free(frag_size != 0, data);
6890                                 goto drop_it_no_recycle;
6891                         }
6892                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6893                 } else {
6894                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6895                                        desc_idx, *post_ptr);
6896
6897                         skb = netdev_alloc_skb(tp->dev,
6898                                                len + TG3_RAW_IP_ALIGN);
6899                         if (skb == NULL)
6900                                 goto drop_it_no_recycle;
6901
6902                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6903                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6904                         memcpy(skb->data,
6905                                data + TG3_RX_OFFSET(tp),
6906                                len);
6907                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6908                 }
6909
6910                 skb_put(skb, len);
6911                 if (tstamp)
6912                         tg3_hwclock_to_timestamp(tp, tstamp,
6913                                                  skb_hwtstamps(skb));
6914
6915                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6916                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6917                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6918                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6919                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6920                 else
6921                         skb_checksum_none_assert(skb);
6922
6923                 skb->protocol = eth_type_trans(skb, tp->dev);
6924
6925                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6926                     skb->protocol != htons(ETH_P_8021Q)) {
6927                         dev_kfree_skb(skb);
6928                         goto drop_it_no_recycle;
6929                 }
6930
6931                 if (desc->type_flags & RXD_FLAG_VLAN &&
6932                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6933                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6934                                                desc->err_vlan & RXD_VLAN_MASK);
6935
6936                 napi_gro_receive(&tnapi->napi, skb);
6937
6938                 received++;
6939                 budget--;
6940
6941 next_pkt:
6942                 (*post_ptr)++;
6943
6944                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6945                         tpr->rx_std_prod_idx = std_prod_idx &
6946                                                tp->rx_std_ring_mask;
6947                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6948                                      tpr->rx_std_prod_idx);
6949                         work_mask &= ~RXD_OPAQUE_RING_STD;
6950                         rx_std_posted = 0;
6951                 }
6952 next_pkt_nopost:
6953                 sw_idx++;
6954                 sw_idx &= tp->rx_ret_ring_mask;
6955
6956                 /* Refresh hw_idx to see if there is new work */
6957                 if (sw_idx == hw_idx) {
6958                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6959                         rmb();
6960                 }
6961         }
6962
6963         /* ACK the status ring. */
6964         tnapi->rx_rcb_ptr = sw_idx;
6965         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6966
6967         /* Refill RX ring(s). */
6968         if (!tg3_flag(tp, ENABLE_RSS)) {
6969                 /* Sync BD data before updating mailbox */
6970                 wmb();
6971
6972                 if (work_mask & RXD_OPAQUE_RING_STD) {
6973                         tpr->rx_std_prod_idx = std_prod_idx &
6974                                                tp->rx_std_ring_mask;
6975                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6976                                      tpr->rx_std_prod_idx);
6977                 }
6978                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6979                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6980                                                tp->rx_jmb_ring_mask;
6981                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6982                                      tpr->rx_jmb_prod_idx);
6983                 }
6984                 mmiowb();
6985         } else if (work_mask) {
6986                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6987                  * updated before the producer indices can be updated.
6988                  */
6989                 smp_wmb();
6990
6991                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6992                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6993
6994                 if (tnapi != &tp->napi[1]) {
6995                         tp->rx_refill = true;
6996                         napi_schedule(&tp->napi[1].napi);
6997                 }
6998         }
6999
7000         return received;
7001 }
7002
7003 static void tg3_poll_link(struct tg3 *tp)
7004 {
7005         /* handle link change and other phy events */
7006         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7007                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7008
7009                 if (sblk->status & SD_STATUS_LINK_CHG) {
7010                         sblk->status = SD_STATUS_UPDATED |
7011                                        (sblk->status & ~SD_STATUS_LINK_CHG);
7012                         spin_lock(&tp->lock);
7013                         if (tg3_flag(tp, USE_PHYLIB)) {
7014                                 tw32_f(MAC_STATUS,
7015                                      (MAC_STATUS_SYNC_CHANGED |
7016                                       MAC_STATUS_CFG_CHANGED |
7017                                       MAC_STATUS_MI_COMPLETION |
7018                                       MAC_STATUS_LNKSTATE_CHANGED));
7019                                 udelay(40);
7020                         } else
7021                                 tg3_setup_phy(tp, false);
7022                         spin_unlock(&tp->lock);
7023                 }
7024         }
7025 }
7026
7027 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7028                                 struct tg3_rx_prodring_set *dpr,
7029                                 struct tg3_rx_prodring_set *spr)
7030 {
7031         u32 si, di, cpycnt, src_prod_idx;
7032         int i, err = 0;
7033
7034         while (1) {
7035                 src_prod_idx = spr->rx_std_prod_idx;
7036
7037                 /* Make sure updates to the rx_std_buffers[] entries and the
7038                  * standard producer index are seen in the correct order.
7039                  */
7040                 smp_rmb();
7041
7042                 if (spr->rx_std_cons_idx == src_prod_idx)
7043                         break;
7044
7045                 if (spr->rx_std_cons_idx < src_prod_idx)
7046                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7047                 else
7048                         cpycnt = tp->rx_std_ring_mask + 1 -
7049                                  spr->rx_std_cons_idx;
7050
7051                 cpycnt = min(cpycnt,
7052                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7053
7054                 si = spr->rx_std_cons_idx;
7055                 di = dpr->rx_std_prod_idx;
7056
7057                 for (i = di; i < di + cpycnt; i++) {
7058                         if (dpr->rx_std_buffers[i].data) {
7059                                 cpycnt = i - di;
7060                                 err = -ENOSPC;
7061                                 break;
7062                         }
7063                 }
7064
7065                 if (!cpycnt)
7066                         break;
7067
7068                 /* Ensure that updates to the rx_std_buffers ring and the
7069                  * shadowed hardware producer ring from tg3_recycle_skb() are
7070                  * ordered correctly WRT the skb check above.
7071                  */
7072                 smp_rmb();
7073
7074                 memcpy(&dpr->rx_std_buffers[di],
7075                        &spr->rx_std_buffers[si],
7076                        cpycnt * sizeof(struct ring_info));
7077
7078                 for (i = 0; i < cpycnt; i++, di++, si++) {
7079                         struct tg3_rx_buffer_desc *sbd, *dbd;
7080                         sbd = &spr->rx_std[si];
7081                         dbd = &dpr->rx_std[di];
7082                         dbd->addr_hi = sbd->addr_hi;
7083                         dbd->addr_lo = sbd->addr_lo;
7084                 }
7085
7086                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7087                                        tp->rx_std_ring_mask;
7088                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7089                                        tp->rx_std_ring_mask;
7090         }
7091
7092         while (1) {
7093                 src_prod_idx = spr->rx_jmb_prod_idx;
7094
7095                 /* Make sure updates to the rx_jmb_buffers[] entries and
7096                  * the jumbo producer index are seen in the correct order.
7097                  */
7098                 smp_rmb();
7099
7100                 if (spr->rx_jmb_cons_idx == src_prod_idx)
7101                         break;
7102
7103                 if (spr->rx_jmb_cons_idx < src_prod_idx)
7104                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7105                 else
7106                         cpycnt = tp->rx_jmb_ring_mask + 1 -
7107                                  spr->rx_jmb_cons_idx;
7108
7109                 cpycnt = min(cpycnt,
7110                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7111
7112                 si = spr->rx_jmb_cons_idx;
7113                 di = dpr->rx_jmb_prod_idx;
7114
7115                 for (i = di; i < di + cpycnt; i++) {
7116                         if (dpr->rx_jmb_buffers[i].data) {
7117                                 cpycnt = i - di;
7118                                 err = -ENOSPC;
7119                                 break;
7120                         }
7121                 }
7122
7123                 if (!cpycnt)
7124                         break;
7125
7126                 /* Ensure that updates to the rx_jmb_buffers ring and the
7127                  * shadowed hardware producer ring from tg3_recycle_skb() are
7128                  * ordered correctly WRT the skb check above.
7129                  */
7130                 smp_rmb();
7131
7132                 memcpy(&dpr->rx_jmb_buffers[di],
7133                        &spr->rx_jmb_buffers[si],
7134                        cpycnt * sizeof(struct ring_info));
7135
7136                 for (i = 0; i < cpycnt; i++, di++, si++) {
7137                         struct tg3_rx_buffer_desc *sbd, *dbd;
7138                         sbd = &spr->rx_jmb[si].std;
7139                         dbd = &dpr->rx_jmb[di].std;
7140                         dbd->addr_hi = sbd->addr_hi;
7141                         dbd->addr_lo = sbd->addr_lo;
7142                 }
7143
7144                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7145                                        tp->rx_jmb_ring_mask;
7146                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7147                                        tp->rx_jmb_ring_mask;
7148         }
7149
7150         return err;
7151 }
7152
7153 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7154 {
7155         struct tg3 *tp = tnapi->tp;
7156
7157         /* run TX completion thread */
7158         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7159                 tg3_tx(tnapi);
7160                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7161                         return work_done;
7162         }
7163
7164         if (!tnapi->rx_rcb_prod_idx)
7165                 return work_done;
7166
7167         /* run RX thread, within the bounds set by NAPI.
7168          * All RX "locking" is done by ensuring outside
7169          * code synchronizes with tg3->napi.poll()
7170          */
7171         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7172                 work_done += tg3_rx(tnapi, budget - work_done);
7173
7174         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7175                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7176                 int i, err = 0;
7177                 u32 std_prod_idx = dpr->rx_std_prod_idx;
7178                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7179
7180                 tp->rx_refill = false;
7181                 for (i = 1; i <= tp->rxq_cnt; i++)
7182                         err |= tg3_rx_prodring_xfer(tp, dpr,
7183                                                     &tp->napi[i].prodring);
7184
7185                 wmb();
7186
7187                 if (std_prod_idx != dpr->rx_std_prod_idx)
7188                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7189                                      dpr->rx_std_prod_idx);
7190
7191                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7192                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7193                                      dpr->rx_jmb_prod_idx);
7194
7195                 mmiowb();
7196
7197                 if (err)
7198                         tw32_f(HOSTCC_MODE, tp->coal_now);
7199         }
7200
7201         return work_done;
7202 }
7203
7204 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7205 {
7206         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7207                 schedule_work(&tp->reset_task);
7208 }
7209
7210 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7211 {
7212         cancel_work_sync(&tp->reset_task);
7213         tg3_flag_clear(tp, RESET_TASK_PENDING);
7214         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7215 }
7216
7217 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7218 {
7219         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7220         struct tg3 *tp = tnapi->tp;
7221         int work_done = 0;
7222         struct tg3_hw_status *sblk = tnapi->hw_status;
7223
7224         while (1) {
7225                 work_done = tg3_poll_work(tnapi, work_done, budget);
7226
7227                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7228                         goto tx_recovery;
7229
7230                 if (unlikely(work_done >= budget))
7231                         break;
7232
7233                 /* tp->last_tag is used in tg3_int_reenable() below
7234                  * to tell the hw how much work has been processed,
7235                  * so we must read it before checking for more work.
7236                  */
7237                 tnapi->last_tag = sblk->status_tag;
7238                 tnapi->last_irq_tag = tnapi->last_tag;
7239                 rmb();
7240
7241                 /* check for RX/TX work to do */
7242                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7243                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7244
7245                         /* This test here is not race free, but will reduce
7246                          * the number of interrupts by looping again.
7247                          */
7248                         if (tnapi == &tp->napi[1] && tp->rx_refill)
7249                                 continue;
7250
7251                         napi_complete(napi);
7252                         /* Reenable interrupts. */
7253                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7254
7255                         /* This test here is synchronized by napi_schedule()
7256                          * and napi_complete() to close the race condition.
7257                          */
7258                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7259                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
7260                                                   HOSTCC_MODE_ENABLE |
7261                                                   tnapi->coal_now);
7262                         }
7263                         mmiowb();
7264                         break;
7265                 }
7266         }
7267
7268         return work_done;
7269
7270 tx_recovery:
7271         /* work_done is guaranteed to be less than budget. */
7272         napi_complete(napi);
7273         tg3_reset_task_schedule(tp);
7274         return work_done;
7275 }
7276
7277 static void tg3_process_error(struct tg3 *tp)
7278 {
7279         u32 val;
7280         bool real_error = false;
7281
7282         if (tg3_flag(tp, ERROR_PROCESSED))
7283                 return;
7284
7285         /* Check Flow Attention register */
7286         val = tr32(HOSTCC_FLOW_ATTN);
7287         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7288                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7289                 real_error = true;
7290         }
7291
7292         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7293                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7294                 real_error = true;
7295         }
7296
7297         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7298                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7299                 real_error = true;
7300         }
7301
7302         if (!real_error)
7303                 return;
7304
7305         tg3_dump_state(tp);
7306
7307         tg3_flag_set(tp, ERROR_PROCESSED);
7308         tg3_reset_task_schedule(tp);
7309 }
7310
7311 static int tg3_poll(struct napi_struct *napi, int budget)
7312 {
7313         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7314         struct tg3 *tp = tnapi->tp;
7315         int work_done = 0;
7316         struct tg3_hw_status *sblk = tnapi->hw_status;
7317
7318         while (1) {
7319                 if (sblk->status & SD_STATUS_ERROR)
7320                         tg3_process_error(tp);
7321
7322                 tg3_poll_link(tp);
7323
7324                 work_done = tg3_poll_work(tnapi, work_done, budget);
7325
7326                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7327                         goto tx_recovery;
7328
7329                 if (unlikely(work_done >= budget))
7330                         break;
7331
7332                 if (tg3_flag(tp, TAGGED_STATUS)) {
7333                         /* tp->last_tag is used in tg3_int_reenable() below
7334                          * to tell the hw how much work has been processed,
7335                          * so we must read it before checking for more work.
7336                          */
7337                         tnapi->last_tag = sblk->status_tag;
7338                         tnapi->last_irq_tag = tnapi->last_tag;
7339                         rmb();
7340                 } else
7341                         sblk->status &= ~SD_STATUS_UPDATED;
7342
7343                 if (likely(!tg3_has_work(tnapi))) {
7344                         napi_complete(napi);
7345                         tg3_int_reenable(tnapi);
7346                         break;
7347                 }
7348         }
7349
7350         return work_done;
7351
7352 tx_recovery:
7353         /* work_done is guaranteed to be less than budget. */
7354         napi_complete(napi);
7355         tg3_reset_task_schedule(tp);
7356         return work_done;
7357 }
7358
7359 static void tg3_napi_disable(struct tg3 *tp)
7360 {
7361         int i;
7362
7363         for (i = tp->irq_cnt - 1; i >= 0; i--)
7364                 napi_disable(&tp->napi[i].napi);
7365 }
7366
7367 static void tg3_napi_enable(struct tg3 *tp)
7368 {
7369         int i;
7370
7371         for (i = 0; i < tp->irq_cnt; i++)
7372                 napi_enable(&tp->napi[i].napi);
7373 }
7374
7375 static void tg3_napi_init(struct tg3 *tp)
7376 {
7377         int i;
7378
7379         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7380         for (i = 1; i < tp->irq_cnt; i++)
7381                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7382 }
7383
7384 static void tg3_napi_fini(struct tg3 *tp)
7385 {
7386         int i;
7387
7388         for (i = 0; i < tp->irq_cnt; i++)
7389                 netif_napi_del(&tp->napi[i].napi);
7390 }
7391
7392 static inline void tg3_netif_stop(struct tg3 *tp)
7393 {
7394         tp->dev->trans_start = jiffies; /* prevent tx timeout */
7395         tg3_napi_disable(tp);
7396         netif_carrier_off(tp->dev);
7397         netif_tx_disable(tp->dev);
7398 }
7399
7400 /* tp->lock must be held */
7401 static inline void tg3_netif_start(struct tg3 *tp)
7402 {
7403         tg3_ptp_resume(tp);
7404
7405         /* NOTE: unconditional netif_tx_wake_all_queues is only
7406          * appropriate so long as all callers are assured to
7407          * have free tx slots (such as after tg3_init_hw)
7408          */
7409         netif_tx_wake_all_queues(tp->dev);
7410
7411         if (tp->link_up)
7412                 netif_carrier_on(tp->dev);
7413
7414         tg3_napi_enable(tp);
7415         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7416         tg3_enable_ints(tp);
7417 }
7418
7419 static void tg3_irq_quiesce(struct tg3 *tp)
7420 {
7421         int i;
7422
7423         BUG_ON(tp->irq_sync);
7424
7425         tp->irq_sync = 1;
7426         smp_mb();
7427
7428         for (i = 0; i < tp->irq_cnt; i++)
7429                 synchronize_irq(tp->napi[i].irq_vec);
7430 }
7431
7432 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7433  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7434  * with as well.  Most of the time, this is not necessary except when
7435  * shutting down the device.
7436  */
7437 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7438 {
7439         spin_lock_bh(&tp->lock);
7440         if (irq_sync)
7441                 tg3_irq_quiesce(tp);
7442 }
7443
7444 static inline void tg3_full_unlock(struct tg3 *tp)
7445 {
7446         spin_unlock_bh(&tp->lock);
7447 }
7448
7449 /* One-shot MSI handler - Chip automatically disables interrupt
7450  * after sending MSI so driver doesn't have to do it.
7451  */
7452 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7453 {
7454         struct tg3_napi *tnapi = dev_id;
7455         struct tg3 *tp = tnapi->tp;
7456
7457         prefetch(tnapi->hw_status);
7458         if (tnapi->rx_rcb)
7459                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7460
7461         if (likely(!tg3_irq_sync(tp)))
7462                 napi_schedule(&tnapi->napi);
7463
7464         return IRQ_HANDLED;
7465 }
7466
7467 /* MSI ISR - No need to check for interrupt sharing and no need to
7468  * flush status block and interrupt mailbox. PCI ordering rules
7469  * guarantee that MSI will arrive after the status block.
7470  */
7471 static irqreturn_t tg3_msi(int irq, void *dev_id)
7472 {
7473         struct tg3_napi *tnapi = dev_id;
7474         struct tg3 *tp = tnapi->tp;
7475
7476         prefetch(tnapi->hw_status);
7477         if (tnapi->rx_rcb)
7478                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7479         /*
7480          * Writing any value to intr-mbox-0 clears PCI INTA# and
7481          * chip-internal interrupt pending events.
7482          * Writing non-zero to intr-mbox-0 additional tells the
7483          * NIC to stop sending us irqs, engaging "in-intr-handler"
7484          * event coalescing.
7485          */
7486         tw32_mailbox(tnapi->int_mbox, 0x00000001);
7487         if (likely(!tg3_irq_sync(tp)))
7488                 napi_schedule(&tnapi->napi);
7489
7490         return IRQ_RETVAL(1);
7491 }
7492
7493 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7494 {
7495         struct tg3_napi *tnapi = dev_id;
7496         struct tg3 *tp = tnapi->tp;
7497         struct tg3_hw_status *sblk = tnapi->hw_status;
7498         unsigned int handled = 1;
7499
7500         /* In INTx mode, it is possible for the interrupt to arrive at
7501          * the CPU before the status block posted prior to the interrupt.
7502          * Reading the PCI State register will confirm whether the
7503          * interrupt is ours and will flush the status block.
7504          */
7505         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7506                 if (tg3_flag(tp, CHIP_RESETTING) ||
7507                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7508                         handled = 0;
7509                         goto out;
7510                 }
7511         }
7512
7513         /*
7514          * Writing any value to intr-mbox-0 clears PCI INTA# and
7515          * chip-internal interrupt pending events.
7516          * Writing non-zero to intr-mbox-0 additional tells the
7517          * NIC to stop sending us irqs, engaging "in-intr-handler"
7518          * event coalescing.
7519          *
7520          * Flush the mailbox to de-assert the IRQ immediately to prevent
7521          * spurious interrupts.  The flush impacts performance but
7522          * excessive spurious interrupts can be worse in some cases.
7523          */
7524         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7525         if (tg3_irq_sync(tp))
7526                 goto out;
7527         sblk->status &= ~SD_STATUS_UPDATED;
7528         if (likely(tg3_has_work(tnapi))) {
7529                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7530                 napi_schedule(&tnapi->napi);
7531         } else {
7532                 /* No work, shared interrupt perhaps?  re-enable
7533                  * interrupts, and flush that PCI write
7534                  */
7535                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7536                                0x00000000);
7537         }
7538 out:
7539         return IRQ_RETVAL(handled);
7540 }
7541
7542 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7543 {
7544         struct tg3_napi *tnapi = dev_id;
7545         struct tg3 *tp = tnapi->tp;
7546         struct tg3_hw_status *sblk = tnapi->hw_status;
7547         unsigned int handled = 1;
7548
7549         /* In INTx mode, it is possible for the interrupt to arrive at
7550          * the CPU before the status block posted prior to the interrupt.
7551          * Reading the PCI State register will confirm whether the
7552          * interrupt is ours and will flush the status block.
7553          */
7554         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7555                 if (tg3_flag(tp, CHIP_RESETTING) ||
7556                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7557                         handled = 0;
7558                         goto out;
7559                 }
7560         }
7561
7562         /*
7563          * writing any value to intr-mbox-0 clears PCI INTA# and
7564          * chip-internal interrupt pending events.
7565          * writing non-zero to intr-mbox-0 additional tells the
7566          * NIC to stop sending us irqs, engaging "in-intr-handler"
7567          * event coalescing.
7568          *
7569          * Flush the mailbox to de-assert the IRQ immediately to prevent
7570          * spurious interrupts.  The flush impacts performance but
7571          * excessive spurious interrupts can be worse in some cases.
7572          */
7573         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7574
7575         /*
7576          * In a shared interrupt configuration, sometimes other devices'
7577          * interrupts will scream.  We record the current status tag here
7578          * so that the above check can report that the screaming interrupts
7579          * are unhandled.  Eventually they will be silenced.
7580          */
7581         tnapi->last_irq_tag = sblk->status_tag;
7582
7583         if (tg3_irq_sync(tp))
7584                 goto out;
7585
7586         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7587
7588         napi_schedule(&tnapi->napi);
7589
7590 out:
7591         return IRQ_RETVAL(handled);
7592 }
7593
7594 /* ISR for interrupt test */
7595 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7596 {
7597         struct tg3_napi *tnapi = dev_id;
7598         struct tg3 *tp = tnapi->tp;
7599         struct tg3_hw_status *sblk = tnapi->hw_status;
7600
7601         if ((sblk->status & SD_STATUS_UPDATED) ||
7602             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7603                 tg3_disable_ints(tp);
7604                 return IRQ_RETVAL(1);
7605         }
7606         return IRQ_RETVAL(0);
7607 }
7608
7609 #ifdef CONFIG_NET_POLL_CONTROLLER
7610 static void tg3_poll_controller(struct net_device *dev)
7611 {
7612         int i;
7613         struct tg3 *tp = netdev_priv(dev);
7614
7615         if (tg3_irq_sync(tp))
7616                 return;
7617
7618         for (i = 0; i < tp->irq_cnt; i++)
7619                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7620 }
7621 #endif
7622
7623 static void tg3_tx_timeout(struct net_device *dev)
7624 {
7625         struct tg3 *tp = netdev_priv(dev);
7626
7627         if (netif_msg_tx_err(tp)) {
7628                 netdev_err(dev, "transmit timed out, resetting\n");
7629                 tg3_dump_state(tp);
7630         }
7631
7632         tg3_reset_task_schedule(tp);
7633 }
7634
7635 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7636 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7637 {
7638         u32 base = (u32) mapping & 0xffffffff;
7639
7640         return base + len + 8 < base;
7641 }
7642
7643 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7644  * of any 4GB boundaries: 4G, 8G, etc
7645  */
7646 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7647                                            u32 len, u32 mss)
7648 {
7649         if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7650                 u32 base = (u32) mapping & 0xffffffff;
7651
7652                 return ((base + len + (mss & 0x3fff)) < base);
7653         }
7654         return 0;
7655 }
7656
7657 /* Test for DMA addresses > 40-bit */
7658 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7659                                           int len)
7660 {
7661 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7662         if (tg3_flag(tp, 40BIT_DMA_BUG))
7663                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7664         return 0;
7665 #else
7666         return 0;
7667 #endif
7668 }
7669
7670 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7671                                  dma_addr_t mapping, u32 len, u32 flags,
7672                                  u32 mss, u32 vlan)
7673 {
7674         txbd->addr_hi = ((u64) mapping >> 32);
7675         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7676         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7677         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7678 }
7679
7680 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7681                             dma_addr_t map, u32 len, u32 flags,
7682                             u32 mss, u32 vlan)
7683 {
7684         struct tg3 *tp = tnapi->tp;
7685         bool hwbug = false;
7686
7687         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7688                 hwbug = true;
7689
7690         if (tg3_4g_overflow_test(map, len))
7691                 hwbug = true;
7692
7693         if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7694                 hwbug = true;
7695
7696         if (tg3_40bit_overflow_test(tp, map, len))
7697                 hwbug = true;
7698
7699         if (tp->dma_limit) {
7700                 u32 prvidx = *entry;
7701                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7702                 while (len > tp->dma_limit && *budget) {
7703                         u32 frag_len = tp->dma_limit;
7704                         len -= tp->dma_limit;
7705
7706                         /* Avoid the 8byte DMA problem */
7707                         if (len <= 8) {
7708                                 len += tp->dma_limit / 2;
7709                                 frag_len = tp->dma_limit / 2;
7710                         }
7711
7712                         tnapi->tx_buffers[*entry].fragmented = true;
7713
7714                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7715                                       frag_len, tmp_flag, mss, vlan);
7716                         *budget -= 1;
7717                         prvidx = *entry;
7718                         *entry = NEXT_TX(*entry);
7719
7720                         map += frag_len;
7721                 }
7722
7723                 if (len) {
7724                         if (*budget) {
7725                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7726                                               len, flags, mss, vlan);
7727                                 *budget -= 1;
7728                                 *entry = NEXT_TX(*entry);
7729                         } else {
7730                                 hwbug = true;
7731                                 tnapi->tx_buffers[prvidx].fragmented = false;
7732                         }
7733                 }
7734         } else {
7735                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7736                               len, flags, mss, vlan);
7737                 *entry = NEXT_TX(*entry);
7738         }
7739
7740         return hwbug;
7741 }
7742
7743 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7744 {
7745         int i;
7746         struct sk_buff *skb;
7747         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7748
7749         skb = txb->skb;
7750         txb->skb = NULL;
7751
7752         pci_unmap_single(tnapi->tp->pdev,
7753                          dma_unmap_addr(txb, mapping),
7754                          skb_headlen(skb),
7755                          PCI_DMA_TODEVICE);
7756
7757         while (txb->fragmented) {
7758                 txb->fragmented = false;
7759                 entry = NEXT_TX(entry);
7760                 txb = &tnapi->tx_buffers[entry];
7761         }
7762
7763         for (i = 0; i <= last; i++) {
7764                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7765
7766                 entry = NEXT_TX(entry);
7767                 txb = &tnapi->tx_buffers[entry];
7768
7769                 pci_unmap_page(tnapi->tp->pdev,
7770                                dma_unmap_addr(txb, mapping),
7771                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7772
7773                 while (txb->fragmented) {
7774                         txb->fragmented = false;
7775                         entry = NEXT_TX(entry);
7776                         txb = &tnapi->tx_buffers[entry];
7777                 }
7778         }
7779 }
7780
7781 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7782 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7783                                        struct sk_buff **pskb,
7784                                        u32 *entry, u32 *budget,
7785                                        u32 base_flags, u32 mss, u32 vlan)
7786 {
7787         struct tg3 *tp = tnapi->tp;
7788         struct sk_buff *new_skb, *skb = *pskb;
7789         dma_addr_t new_addr = 0;
7790         int ret = 0;
7791
7792         if (tg3_asic_rev(tp) != ASIC_REV_5701)
7793                 new_skb = skb_copy(skb, GFP_ATOMIC);
7794         else {
7795                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7796
7797                 new_skb = skb_copy_expand(skb,
7798                                           skb_headroom(skb) + more_headroom,
7799                                           skb_tailroom(skb), GFP_ATOMIC);
7800         }
7801
7802         if (!new_skb) {
7803                 ret = -1;
7804         } else {
7805                 /* New SKB is guaranteed to be linear. */
7806                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7807                                           PCI_DMA_TODEVICE);
7808                 /* Make sure the mapping succeeded */
7809                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7810                         dev_kfree_skb(new_skb);
7811                         ret = -1;
7812                 } else {
7813                         u32 save_entry = *entry;
7814
7815                         base_flags |= TXD_FLAG_END;
7816
7817                         tnapi->tx_buffers[*entry].skb = new_skb;
7818                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7819                                            mapping, new_addr);
7820
7821                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7822                                             new_skb->len, base_flags,
7823                                             mss, vlan)) {
7824                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7825                                 dev_kfree_skb(new_skb);
7826                                 ret = -1;
7827                         }
7828                 }
7829         }
7830
7831         dev_kfree_skb(skb);
7832         *pskb = new_skb;
7833         return ret;
7834 }
7835
7836 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7837
7838 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7839  * TSO header is greater than 80 bytes.
7840  */
7841 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7842 {
7843         struct sk_buff *segs, *nskb;
7844         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7845
7846         /* Estimate the number of fragments in the worst case */
7847         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7848                 netif_stop_queue(tp->dev);
7849
7850                 /* netif_tx_stop_queue() must be done before checking
7851                  * checking tx index in tg3_tx_avail() below, because in
7852                  * tg3_tx(), we update tx index before checking for
7853                  * netif_tx_queue_stopped().
7854                  */
7855                 smp_mb();
7856                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7857                         return NETDEV_TX_BUSY;
7858
7859                 netif_wake_queue(tp->dev);
7860         }
7861
7862         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7863         if (IS_ERR(segs))
7864                 goto tg3_tso_bug_end;
7865
7866         do {
7867                 nskb = segs;
7868                 segs = segs->next;
7869                 nskb->next = NULL;
7870                 tg3_start_xmit(nskb, tp->dev);
7871         } while (segs);
7872
7873 tg3_tso_bug_end:
7874         dev_kfree_skb(skb);
7875
7876         return NETDEV_TX_OK;
7877 }
7878
7879 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7880  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7881  */
7882 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7883 {
7884         struct tg3 *tp = netdev_priv(dev);
7885         u32 len, entry, base_flags, mss, vlan = 0;
7886         u32 budget;
7887         int i = -1, would_hit_hwbug;
7888         dma_addr_t mapping;
7889         struct tg3_napi *tnapi;
7890         struct netdev_queue *txq;
7891         unsigned int last;
7892
7893         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7894         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7895         if (tg3_flag(tp, ENABLE_TSS))
7896                 tnapi++;
7897
7898         budget = tg3_tx_avail(tnapi);
7899
7900         /* We are running in BH disabled context with netif_tx_lock
7901          * and TX reclaim runs via tp->napi.poll inside of a software
7902          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7903          * no IRQ context deadlocks to worry about either.  Rejoice!
7904          */
7905         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7906                 if (!netif_tx_queue_stopped(txq)) {
7907                         netif_tx_stop_queue(txq);
7908
7909                         /* This is a hard error, log it. */
7910                         netdev_err(dev,
7911                                    "BUG! Tx Ring full when queue awake!\n");
7912                 }
7913                 return NETDEV_TX_BUSY;
7914         }
7915
7916         entry = tnapi->tx_prod;
7917         base_flags = 0;
7918         if (skb->ip_summed == CHECKSUM_PARTIAL)
7919                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7920
7921         mss = skb_shinfo(skb)->gso_size;
7922         if (mss) {
7923                 struct iphdr *iph;
7924                 u32 tcp_opt_len, hdr_len;
7925
7926                 if (skb_header_cloned(skb) &&
7927                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7928                         goto drop;
7929
7930                 iph = ip_hdr(skb);
7931                 tcp_opt_len = tcp_optlen(skb);
7932
7933                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7934
7935                 if (!skb_is_gso_v6(skb)) {
7936                         iph->check = 0;
7937                         iph->tot_len = htons(mss + hdr_len);
7938                 }
7939
7940                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7941                     tg3_flag(tp, TSO_BUG))
7942                         return tg3_tso_bug(tp, skb);
7943
7944                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7945                                TXD_FLAG_CPU_POST_DMA);
7946
7947                 if (tg3_flag(tp, HW_TSO_1) ||
7948                     tg3_flag(tp, HW_TSO_2) ||
7949                     tg3_flag(tp, HW_TSO_3)) {
7950                         tcp_hdr(skb)->check = 0;
7951                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7952                 } else
7953                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7954                                                                  iph->daddr, 0,
7955                                                                  IPPROTO_TCP,
7956                                                                  0);
7957
7958                 if (tg3_flag(tp, HW_TSO_3)) {
7959                         mss |= (hdr_len & 0xc) << 12;
7960                         if (hdr_len & 0x10)
7961                                 base_flags |= 0x00000010;
7962                         base_flags |= (hdr_len & 0x3e0) << 5;
7963                 } else if (tg3_flag(tp, HW_TSO_2))
7964                         mss |= hdr_len << 9;
7965                 else if (tg3_flag(tp, HW_TSO_1) ||
7966                          tg3_asic_rev(tp) == ASIC_REV_5705) {
7967                         if (tcp_opt_len || iph->ihl > 5) {
7968                                 int tsflags;
7969
7970                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7971                                 mss |= (tsflags << 11);
7972                         }
7973                 } else {
7974                         if (tcp_opt_len || iph->ihl > 5) {
7975                                 int tsflags;
7976
7977                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7978                                 base_flags |= tsflags << 12;
7979                         }
7980                 }
7981         }
7982
7983         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7984             !mss && skb->len > VLAN_ETH_FRAME_LEN)
7985                 base_flags |= TXD_FLAG_JMB_PKT;
7986
7987         if (vlan_tx_tag_present(skb)) {
7988                 base_flags |= TXD_FLAG_VLAN;
7989                 vlan = vlan_tx_tag_get(skb);
7990         }
7991
7992         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7993             tg3_flag(tp, TX_TSTAMP_EN)) {
7994                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7995                 base_flags |= TXD_FLAG_HWTSTAMP;
7996         }
7997
7998         len = skb_headlen(skb);
7999
8000         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8001         if (pci_dma_mapping_error(tp->pdev, mapping))
8002                 goto drop;
8003
8004
8005         tnapi->tx_buffers[entry].skb = skb;
8006         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8007
8008         would_hit_hwbug = 0;
8009
8010         if (tg3_flag(tp, 5701_DMA_BUG))
8011                 would_hit_hwbug = 1;
8012
8013         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8014                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8015                             mss, vlan)) {
8016                 would_hit_hwbug = 1;
8017         } else if (skb_shinfo(skb)->nr_frags > 0) {
8018                 u32 tmp_mss = mss;
8019
8020                 if (!tg3_flag(tp, HW_TSO_1) &&
8021                     !tg3_flag(tp, HW_TSO_2) &&
8022                     !tg3_flag(tp, HW_TSO_3))
8023                         tmp_mss = 0;
8024
8025                 /* Now loop through additional data
8026                  * fragments, and queue them.
8027                  */
8028                 last = skb_shinfo(skb)->nr_frags - 1;
8029                 for (i = 0; i <= last; i++) {
8030                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8031
8032                         len = skb_frag_size(frag);
8033                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8034                                                    len, DMA_TO_DEVICE);
8035
8036                         tnapi->tx_buffers[entry].skb = NULL;
8037                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8038                                            mapping);
8039                         if (dma_mapping_error(&tp->pdev->dev, mapping))
8040                                 goto dma_error;
8041
8042                         if (!budget ||
8043                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8044                                             len, base_flags |
8045                                             ((i == last) ? TXD_FLAG_END : 0),
8046                                             tmp_mss, vlan)) {
8047                                 would_hit_hwbug = 1;
8048                                 break;
8049                         }
8050                 }
8051         }
8052
8053         if (would_hit_hwbug) {
8054                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8055
8056                 /* If the workaround fails due to memory/mapping
8057                  * failure, silently drop this packet.
8058                  */
8059                 entry = tnapi->tx_prod;
8060                 budget = tg3_tx_avail(tnapi);
8061                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8062                                                 base_flags, mss, vlan))
8063                         goto drop_nofree;
8064         }
8065
8066         skb_tx_timestamp(skb);
8067         netdev_tx_sent_queue(txq, skb->len);
8068
8069         /* Sync BD data before updating mailbox */
8070         wmb();
8071
8072         /* Packets are ready, update Tx producer idx local and on card. */
8073         tw32_tx_mbox(tnapi->prodmbox, entry);
8074
8075         tnapi->tx_prod = entry;
8076         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8077                 netif_tx_stop_queue(txq);
8078
8079                 /* netif_tx_stop_queue() must be done before checking
8080                  * checking tx index in tg3_tx_avail() below, because in
8081                  * tg3_tx(), we update tx index before checking for
8082                  * netif_tx_queue_stopped().
8083                  */
8084                 smp_mb();
8085                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8086                         netif_tx_wake_queue(txq);
8087         }
8088
8089         mmiowb();
8090         return NETDEV_TX_OK;
8091
8092 dma_error:
8093         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8094         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8095 drop:
8096         dev_kfree_skb(skb);
8097 drop_nofree:
8098         tp->tx_dropped++;
8099         return NETDEV_TX_OK;
8100 }
8101
8102 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8103 {
8104         if (enable) {
8105                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8106                                   MAC_MODE_PORT_MODE_MASK);
8107
8108                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8109
8110                 if (!tg3_flag(tp, 5705_PLUS))
8111                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8112
8113                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8114                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8115                 else
8116                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8117         } else {
8118                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8119
8120                 if (tg3_flag(tp, 5705_PLUS) ||
8121                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8122                     tg3_asic_rev(tp) == ASIC_REV_5700)
8123                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8124         }
8125
8126         tw32(MAC_MODE, tp->mac_mode);
8127         udelay(40);
8128 }
8129
8130 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8131 {
8132         u32 val, bmcr, mac_mode, ptest = 0;
8133
8134         tg3_phy_toggle_apd(tp, false);
8135         tg3_phy_toggle_automdix(tp, false);
8136
8137         if (extlpbk && tg3_phy_set_extloopbk(tp))
8138                 return -EIO;
8139
8140         bmcr = BMCR_FULLDPLX;
8141         switch (speed) {
8142         case SPEED_10:
8143                 break;
8144         case SPEED_100:
8145                 bmcr |= BMCR_SPEED100;
8146                 break;
8147         case SPEED_1000:
8148         default:
8149                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8150                         speed = SPEED_100;
8151                         bmcr |= BMCR_SPEED100;
8152                 } else {
8153                         speed = SPEED_1000;
8154                         bmcr |= BMCR_SPEED1000;
8155                 }
8156         }
8157
8158         if (extlpbk) {
8159                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8160                         tg3_readphy(tp, MII_CTRL1000, &val);
8161                         val |= CTL1000_AS_MASTER |
8162                                CTL1000_ENABLE_MASTER;
8163                         tg3_writephy(tp, MII_CTRL1000, val);
8164                 } else {
8165                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8166                                 MII_TG3_FET_PTEST_TRIM_2;
8167                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8168                 }
8169         } else
8170                 bmcr |= BMCR_LOOPBACK;
8171
8172         tg3_writephy(tp, MII_BMCR, bmcr);
8173
8174         /* The write needs to be flushed for the FETs */
8175         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8176                 tg3_readphy(tp, MII_BMCR, &bmcr);
8177
8178         udelay(40);
8179
8180         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8181             tg3_asic_rev(tp) == ASIC_REV_5785) {
8182                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8183                              MII_TG3_FET_PTEST_FRC_TX_LINK |
8184                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
8185
8186                 /* The write needs to be flushed for the AC131 */
8187                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8188         }
8189
8190         /* Reset to prevent losing 1st rx packet intermittently */
8191         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8192             tg3_flag(tp, 5780_CLASS)) {
8193                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8194                 udelay(10);
8195                 tw32_f(MAC_RX_MODE, tp->rx_mode);
8196         }
8197
8198         mac_mode = tp->mac_mode &
8199                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8200         if (speed == SPEED_1000)
8201                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8202         else
8203                 mac_mode |= MAC_MODE_PORT_MODE_MII;
8204
8205         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8206                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8207
8208                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8209                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8210                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8211                         mac_mode |= MAC_MODE_LINK_POLARITY;
8212
8213                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8214                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8215         }
8216
8217         tw32(MAC_MODE, mac_mode);
8218         udelay(40);
8219
8220         return 0;
8221 }
8222
8223 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8224 {
8225         struct tg3 *tp = netdev_priv(dev);
8226
8227         if (features & NETIF_F_LOOPBACK) {
8228                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8229                         return;
8230
8231                 spin_lock_bh(&tp->lock);
8232                 tg3_mac_loopback(tp, true);
8233                 netif_carrier_on(tp->dev);
8234                 spin_unlock_bh(&tp->lock);
8235                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8236         } else {
8237                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8238                         return;
8239
8240                 spin_lock_bh(&tp->lock);
8241                 tg3_mac_loopback(tp, false);
8242                 /* Force link status check */
8243                 tg3_setup_phy(tp, true);
8244                 spin_unlock_bh(&tp->lock);
8245                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8246         }
8247 }
8248
8249 static netdev_features_t tg3_fix_features(struct net_device *dev,
8250         netdev_features_t features)
8251 {
8252         struct tg3 *tp = netdev_priv(dev);
8253
8254         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8255                 features &= ~NETIF_F_ALL_TSO;
8256
8257         return features;
8258 }
8259
8260 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8261 {
8262         netdev_features_t changed = dev->features ^ features;
8263
8264         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8265                 tg3_set_loopback(dev, features);
8266
8267         return 0;
8268 }
8269
8270 static void tg3_rx_prodring_free(struct tg3 *tp,
8271                                  struct tg3_rx_prodring_set *tpr)
8272 {
8273         int i;
8274
8275         if (tpr != &tp->napi[0].prodring) {
8276                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8277                      i = (i + 1) & tp->rx_std_ring_mask)
8278                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8279                                         tp->rx_pkt_map_sz);
8280
8281                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8282                         for (i = tpr->rx_jmb_cons_idx;
8283                              i != tpr->rx_jmb_prod_idx;
8284                              i = (i + 1) & tp->rx_jmb_ring_mask) {
8285                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8286                                                 TG3_RX_JMB_MAP_SZ);
8287                         }
8288                 }
8289
8290                 return;
8291         }
8292
8293         for (i = 0; i <= tp->rx_std_ring_mask; i++)
8294                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8295                                 tp->rx_pkt_map_sz);
8296
8297         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8298                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8299                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8300                                         TG3_RX_JMB_MAP_SZ);
8301         }
8302 }
8303
8304 /* Initialize rx rings for packet processing.
8305  *
8306  * The chip has been shut down and the driver detached from
8307  * the networking, so no interrupts or new tx packets will
8308  * end up in the driver.  tp->{tx,}lock are held and thus
8309  * we may not sleep.
8310  */
8311 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8312                                  struct tg3_rx_prodring_set *tpr)
8313 {
8314         u32 i, rx_pkt_dma_sz;
8315
8316         tpr->rx_std_cons_idx = 0;
8317         tpr->rx_std_prod_idx = 0;
8318         tpr->rx_jmb_cons_idx = 0;
8319         tpr->rx_jmb_prod_idx = 0;
8320
8321         if (tpr != &tp->napi[0].prodring) {
8322                 memset(&tpr->rx_std_buffers[0], 0,
8323                        TG3_RX_STD_BUFF_RING_SIZE(tp));
8324                 if (tpr->rx_jmb_buffers)
8325                         memset(&tpr->rx_jmb_buffers[0], 0,
8326                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
8327                 goto done;
8328         }
8329
8330         /* Zero out all descriptors. */
8331         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8332
8333         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8334         if (tg3_flag(tp, 5780_CLASS) &&
8335             tp->dev->mtu > ETH_DATA_LEN)
8336                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8337         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8338
8339         /* Initialize invariants of the rings, we only set this
8340          * stuff once.  This works because the card does not
8341          * write into the rx buffer posting rings.
8342          */
8343         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8344                 struct tg3_rx_buffer_desc *rxd;
8345
8346                 rxd = &tpr->rx_std[i];
8347                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8348                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8349                 rxd->opaque = (RXD_OPAQUE_RING_STD |
8350                                (i << RXD_OPAQUE_INDEX_SHIFT));
8351         }
8352
8353         /* Now allocate fresh SKBs for each rx ring. */
8354         for (i = 0; i < tp->rx_pending; i++) {
8355                 unsigned int frag_size;
8356
8357                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8358                                       &frag_size) < 0) {
8359                         netdev_warn(tp->dev,
8360                                     "Using a smaller RX standard ring. Only "
8361                                     "%d out of %d buffers were allocated "
8362                                     "successfully\n", i, tp->rx_pending);
8363                         if (i == 0)
8364                                 goto initfail;
8365                         tp->rx_pending = i;
8366                         break;
8367                 }
8368         }
8369
8370         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8371                 goto done;
8372
8373         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8374
8375         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8376                 goto done;
8377
8378         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8379                 struct tg3_rx_buffer_desc *rxd;
8380
8381                 rxd = &tpr->rx_jmb[i].std;
8382                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8383                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8384                                   RXD_FLAG_JUMBO;
8385                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8386                        (i << RXD_OPAQUE_INDEX_SHIFT));
8387         }
8388
8389         for (i = 0; i < tp->rx_jumbo_pending; i++) {
8390                 unsigned int frag_size;
8391
8392                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8393                                       &frag_size) < 0) {
8394                         netdev_warn(tp->dev,
8395                                     "Using a smaller RX jumbo ring. Only %d "
8396                                     "out of %d buffers were allocated "
8397                                     "successfully\n", i, tp->rx_jumbo_pending);
8398                         if (i == 0)
8399                                 goto initfail;
8400                         tp->rx_jumbo_pending = i;
8401                         break;
8402                 }
8403         }
8404
8405 done:
8406         return 0;
8407
8408 initfail:
8409         tg3_rx_prodring_free(tp, tpr);
8410         return -ENOMEM;
8411 }
8412
8413 static void tg3_rx_prodring_fini(struct tg3 *tp,
8414                                  struct tg3_rx_prodring_set *tpr)
8415 {
8416         kfree(tpr->rx_std_buffers);
8417         tpr->rx_std_buffers = NULL;
8418         kfree(tpr->rx_jmb_buffers);
8419         tpr->rx_jmb_buffers = NULL;
8420         if (tpr->rx_std) {
8421                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8422                                   tpr->rx_std, tpr->rx_std_mapping);
8423                 tpr->rx_std = NULL;
8424         }
8425         if (tpr->rx_jmb) {
8426                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8427                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
8428                 tpr->rx_jmb = NULL;
8429         }
8430 }
8431
8432 static int tg3_rx_prodring_init(struct tg3 *tp,
8433                                 struct tg3_rx_prodring_set *tpr)
8434 {
8435         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8436                                       GFP_KERNEL);
8437         if (!tpr->rx_std_buffers)
8438                 return -ENOMEM;
8439
8440         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8441                                          TG3_RX_STD_RING_BYTES(tp),
8442                                          &tpr->rx_std_mapping,
8443                                          GFP_KERNEL);
8444         if (!tpr->rx_std)
8445                 goto err_out;
8446
8447         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8448                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8449                                               GFP_KERNEL);
8450                 if (!tpr->rx_jmb_buffers)
8451                         goto err_out;
8452
8453                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8454                                                  TG3_RX_JMB_RING_BYTES(tp),
8455                                                  &tpr->rx_jmb_mapping,
8456                                                  GFP_KERNEL);
8457                 if (!tpr->rx_jmb)
8458                         goto err_out;
8459         }
8460
8461         return 0;
8462
8463 err_out:
8464         tg3_rx_prodring_fini(tp, tpr);
8465         return -ENOMEM;
8466 }
8467
8468 /* Free up pending packets in all rx/tx rings.
8469  *
8470  * The chip has been shut down and the driver detached from
8471  * the networking, so no interrupts or new tx packets will
8472  * end up in the driver.  tp->{tx,}lock is not held and we are not
8473  * in an interrupt context and thus may sleep.
8474  */
8475 static void tg3_free_rings(struct tg3 *tp)
8476 {
8477         int i, j;
8478
8479         for (j = 0; j < tp->irq_cnt; j++) {
8480                 struct tg3_napi *tnapi = &tp->napi[j];
8481
8482                 tg3_rx_prodring_free(tp, &tnapi->prodring);
8483
8484                 if (!tnapi->tx_buffers)
8485                         continue;
8486
8487                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8488                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8489
8490                         if (!skb)
8491                                 continue;
8492
8493                         tg3_tx_skb_unmap(tnapi, i,
8494                                          skb_shinfo(skb)->nr_frags - 1);
8495
8496                         dev_kfree_skb_any(skb);
8497                 }
8498                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8499         }
8500 }
8501
8502 /* Initialize tx/rx rings for packet processing.
8503  *
8504  * The chip has been shut down and the driver detached from
8505  * the networking, so no interrupts or new tx packets will
8506  * end up in the driver.  tp->{tx,}lock are held and thus
8507  * we may not sleep.
8508  */
8509 static int tg3_init_rings(struct tg3 *tp)
8510 {
8511         int i;
8512
8513         /* Free up all the SKBs. */
8514         tg3_free_rings(tp);
8515
8516         for (i = 0; i < tp->irq_cnt; i++) {
8517                 struct tg3_napi *tnapi = &tp->napi[i];
8518
8519                 tnapi->last_tag = 0;
8520                 tnapi->last_irq_tag = 0;
8521                 tnapi->hw_status->status = 0;
8522                 tnapi->hw_status->status_tag = 0;
8523                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8524
8525                 tnapi->tx_prod = 0;
8526                 tnapi->tx_cons = 0;
8527                 if (tnapi->tx_ring)
8528                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8529
8530                 tnapi->rx_rcb_ptr = 0;
8531                 if (tnapi->rx_rcb)
8532                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8533
8534                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8535                         tg3_free_rings(tp);
8536                         return -ENOMEM;
8537                 }
8538         }
8539
8540         return 0;
8541 }
8542
8543 static void tg3_mem_tx_release(struct tg3 *tp)
8544 {
8545         int i;
8546
8547         for (i = 0; i < tp->irq_max; i++) {
8548                 struct tg3_napi *tnapi = &tp->napi[i];
8549
8550                 if (tnapi->tx_ring) {
8551                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8552                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
8553                         tnapi->tx_ring = NULL;
8554                 }
8555
8556                 kfree(tnapi->tx_buffers);
8557                 tnapi->tx_buffers = NULL;
8558         }
8559 }
8560
8561 static int tg3_mem_tx_acquire(struct tg3 *tp)
8562 {
8563         int i;
8564         struct tg3_napi *tnapi = &tp->napi[0];
8565
8566         /* If multivector TSS is enabled, vector 0 does not handle
8567          * tx interrupts.  Don't allocate any resources for it.
8568          */
8569         if (tg3_flag(tp, ENABLE_TSS))
8570                 tnapi++;
8571
8572         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8573                 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8574                                             TG3_TX_RING_SIZE, GFP_KERNEL);
8575                 if (!tnapi->tx_buffers)
8576                         goto err_out;
8577
8578                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8579                                                     TG3_TX_RING_BYTES,
8580                                                     &tnapi->tx_desc_mapping,
8581                                                     GFP_KERNEL);
8582                 if (!tnapi->tx_ring)
8583                         goto err_out;
8584         }
8585
8586         return 0;
8587
8588 err_out:
8589         tg3_mem_tx_release(tp);
8590         return -ENOMEM;
8591 }
8592
8593 static void tg3_mem_rx_release(struct tg3 *tp)
8594 {
8595         int i;
8596
8597         for (i = 0; i < tp->irq_max; i++) {
8598                 struct tg3_napi *tnapi = &tp->napi[i];
8599
8600                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8601
8602                 if (!tnapi->rx_rcb)
8603                         continue;
8604
8605                 dma_free_coherent(&tp->pdev->dev,
8606                                   TG3_RX_RCB_RING_BYTES(tp),
8607                                   tnapi->rx_rcb,
8608                                   tnapi->rx_rcb_mapping);
8609                 tnapi->rx_rcb = NULL;
8610         }
8611 }
8612
8613 static int tg3_mem_rx_acquire(struct tg3 *tp)
8614 {
8615         unsigned int i, limit;
8616
8617         limit = tp->rxq_cnt;
8618
8619         /* If RSS is enabled, we need a (dummy) producer ring
8620          * set on vector zero.  This is the true hw prodring.
8621          */
8622         if (tg3_flag(tp, ENABLE_RSS))
8623                 limit++;
8624
8625         for (i = 0; i < limit; i++) {
8626                 struct tg3_napi *tnapi = &tp->napi[i];
8627
8628                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8629                         goto err_out;
8630
8631                 /* If multivector RSS is enabled, vector 0
8632                  * does not handle rx or tx interrupts.
8633                  * Don't allocate any resources for it.
8634                  */
8635                 if (!i && tg3_flag(tp, ENABLE_RSS))
8636                         continue;
8637
8638                 tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8639                                                     TG3_RX_RCB_RING_BYTES(tp),
8640                                                     &tnapi->rx_rcb_mapping,
8641                                                     GFP_KERNEL);
8642                 if (!tnapi->rx_rcb)
8643                         goto err_out;
8644         }
8645
8646         return 0;
8647
8648 err_out:
8649         tg3_mem_rx_release(tp);
8650         return -ENOMEM;
8651 }
8652
8653 /*
8654  * Must not be invoked with interrupt sources disabled and
8655  * the hardware shutdown down.
8656  */
8657 static void tg3_free_consistent(struct tg3 *tp)
8658 {
8659         int i;
8660
8661         for (i = 0; i < tp->irq_cnt; i++) {
8662                 struct tg3_napi *tnapi = &tp->napi[i];
8663
8664                 if (tnapi->hw_status) {
8665                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8666                                           tnapi->hw_status,
8667                                           tnapi->status_mapping);
8668                         tnapi->hw_status = NULL;
8669                 }
8670         }
8671
8672         tg3_mem_rx_release(tp);
8673         tg3_mem_tx_release(tp);
8674
8675         if (tp->hw_stats) {
8676                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8677                                   tp->hw_stats, tp->stats_mapping);
8678                 tp->hw_stats = NULL;
8679         }
8680 }
8681
8682 /*
8683  * Must not be invoked with interrupt sources disabled and
8684  * the hardware shutdown down.  Can sleep.
8685  */
8686 static int tg3_alloc_consistent(struct tg3 *tp)
8687 {
8688         int i;
8689
8690         tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8691                                            sizeof(struct tg3_hw_stats),
8692                                            &tp->stats_mapping, GFP_KERNEL);
8693         if (!tp->hw_stats)
8694                 goto err_out;
8695
8696         for (i = 0; i < tp->irq_cnt; i++) {
8697                 struct tg3_napi *tnapi = &tp->napi[i];
8698                 struct tg3_hw_status *sblk;
8699
8700                 tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8701                                                        TG3_HW_STATUS_SIZE,
8702                                                        &tnapi->status_mapping,
8703                                                        GFP_KERNEL);
8704                 if (!tnapi->hw_status)
8705                         goto err_out;
8706
8707                 sblk = tnapi->hw_status;
8708
8709                 if (tg3_flag(tp, ENABLE_RSS)) {
8710                         u16 *prodptr = NULL;
8711
8712                         /*
8713                          * When RSS is enabled, the status block format changes
8714                          * slightly.  The "rx_jumbo_consumer", "reserved",
8715                          * and "rx_mini_consumer" members get mapped to the
8716                          * other three rx return ring producer indexes.
8717                          */
8718                         switch (i) {
8719                         case 1:
8720                                 prodptr = &sblk->idx[0].rx_producer;
8721                                 break;
8722                         case 2:
8723                                 prodptr = &sblk->rx_jumbo_consumer;
8724                                 break;
8725                         case 3:
8726                                 prodptr = &sblk->reserved;
8727                                 break;
8728                         case 4:
8729                                 prodptr = &sblk->rx_mini_consumer;
8730                                 break;
8731                         }
8732                         tnapi->rx_rcb_prod_idx = prodptr;
8733                 } else {
8734                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8735                 }
8736         }
8737
8738         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8739                 goto err_out;
8740
8741         return 0;
8742
8743 err_out:
8744         tg3_free_consistent(tp);
8745         return -ENOMEM;
8746 }
8747
8748 #define MAX_WAIT_CNT 1000
8749
8750 /* To stop a block, clear the enable bit and poll till it
8751  * clears.  tp->lock is held.
8752  */
8753 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8754 {
8755         unsigned int i;
8756         u32 val;
8757
8758         if (tg3_flag(tp, 5705_PLUS)) {
8759                 switch (ofs) {
8760                 case RCVLSC_MODE:
8761                 case DMAC_MODE:
8762                 case MBFREE_MODE:
8763                 case BUFMGR_MODE:
8764                 case MEMARB_MODE:
8765                         /* We can't enable/disable these bits of the
8766                          * 5705/5750, just say success.
8767                          */
8768                         return 0;
8769
8770                 default:
8771                         break;
8772                 }
8773         }
8774
8775         val = tr32(ofs);
8776         val &= ~enable_bit;
8777         tw32_f(ofs, val);
8778
8779         for (i = 0; i < MAX_WAIT_CNT; i++) {
8780                 if (pci_channel_offline(tp->pdev)) {
8781                         dev_err(&tp->pdev->dev,
8782                                 "tg3_stop_block device offline, "
8783                                 "ofs=%lx enable_bit=%x\n",
8784                                 ofs, enable_bit);
8785                         return -ENODEV;
8786                 }
8787
8788                 udelay(100);
8789                 val = tr32(ofs);
8790                 if ((val & enable_bit) == 0)
8791                         break;
8792         }
8793
8794         if (i == MAX_WAIT_CNT && !silent) {
8795                 dev_err(&tp->pdev->dev,
8796                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8797                         ofs, enable_bit);
8798                 return -ENODEV;
8799         }
8800
8801         return 0;
8802 }
8803
8804 /* tp->lock is held. */
8805 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8806 {
8807         int i, err;
8808
8809         tg3_disable_ints(tp);
8810
8811         if (pci_channel_offline(tp->pdev)) {
8812                 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8813                 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8814                 err = -ENODEV;
8815                 goto err_no_dev;
8816         }
8817
8818         tp->rx_mode &= ~RX_MODE_ENABLE;
8819         tw32_f(MAC_RX_MODE, tp->rx_mode);
8820         udelay(10);
8821
8822         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8823         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8824         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8825         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8826         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8827         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8828
8829         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8830         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8831         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8832         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8833         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8834         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8835         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8836
8837         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8838         tw32_f(MAC_MODE, tp->mac_mode);
8839         udelay(40);
8840
8841         tp->tx_mode &= ~TX_MODE_ENABLE;
8842         tw32_f(MAC_TX_MODE, tp->tx_mode);
8843
8844         for (i = 0; i < MAX_WAIT_CNT; i++) {
8845                 udelay(100);
8846                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8847                         break;
8848         }
8849         if (i >= MAX_WAIT_CNT) {
8850                 dev_err(&tp->pdev->dev,
8851                         "%s timed out, TX_MODE_ENABLE will not clear "
8852                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8853                 err |= -ENODEV;
8854         }
8855
8856         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8857         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8858         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8859
8860         tw32(FTQ_RESET, 0xffffffff);
8861         tw32(FTQ_RESET, 0x00000000);
8862
8863         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8864         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8865
8866 err_no_dev:
8867         for (i = 0; i < tp->irq_cnt; i++) {
8868                 struct tg3_napi *tnapi = &tp->napi[i];
8869                 if (tnapi->hw_status)
8870                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8871         }
8872
8873         return err;
8874 }
8875
8876 /* Save PCI command register before chip reset */
8877 static void tg3_save_pci_state(struct tg3 *tp)
8878 {
8879         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8880 }
8881
8882 /* Restore PCI state after chip reset */
8883 static void tg3_restore_pci_state(struct tg3 *tp)
8884 {
8885         u32 val;
8886
8887         /* Re-enable indirect register accesses. */
8888         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8889                                tp->misc_host_ctrl);
8890
8891         /* Set MAX PCI retry to zero. */
8892         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8893         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8894             tg3_flag(tp, PCIX_MODE))
8895                 val |= PCISTATE_RETRY_SAME_DMA;
8896         /* Allow reads and writes to the APE register and memory space. */
8897         if (tg3_flag(tp, ENABLE_APE))
8898                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8899                        PCISTATE_ALLOW_APE_SHMEM_WR |
8900                        PCISTATE_ALLOW_APE_PSPACE_WR;
8901         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8902
8903         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8904
8905         if (!tg3_flag(tp, PCI_EXPRESS)) {
8906                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8907                                       tp->pci_cacheline_sz);
8908                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8909                                       tp->pci_lat_timer);
8910         }
8911
8912         /* Make sure PCI-X relaxed ordering bit is clear. */
8913         if (tg3_flag(tp, PCIX_MODE)) {
8914                 u16 pcix_cmd;
8915
8916                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8917                                      &pcix_cmd);
8918                 pcix_cmd &= ~PCI_X_CMD_ERO;
8919                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8920                                       pcix_cmd);
8921         }
8922
8923         if (tg3_flag(tp, 5780_CLASS)) {
8924
8925                 /* Chip reset on 5780 will reset MSI enable bit,
8926                  * so need to restore it.
8927                  */
8928                 if (tg3_flag(tp, USING_MSI)) {
8929                         u16 ctrl;
8930
8931                         pci_read_config_word(tp->pdev,
8932                                              tp->msi_cap + PCI_MSI_FLAGS,
8933                                              &ctrl);
8934                         pci_write_config_word(tp->pdev,
8935                                               tp->msi_cap + PCI_MSI_FLAGS,
8936                                               ctrl | PCI_MSI_FLAGS_ENABLE);
8937                         val = tr32(MSGINT_MODE);
8938                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8939                 }
8940         }
8941 }
8942
8943 static void tg3_override_clk(struct tg3 *tp)
8944 {
8945         u32 val;
8946
8947         switch (tg3_asic_rev(tp)) {
8948         case ASIC_REV_5717:
8949                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
8950                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
8951                      TG3_CPMU_MAC_ORIDE_ENABLE);
8952                 break;
8953
8954         case ASIC_REV_5719:
8955         case ASIC_REV_5720:
8956                 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8957                 break;
8958
8959         default:
8960                 return;
8961         }
8962 }
8963
8964 static void tg3_restore_clk(struct tg3 *tp)
8965 {
8966         u32 val;
8967
8968         switch (tg3_asic_rev(tp)) {
8969         case ASIC_REV_5717:
8970                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
8971                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
8972                      val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
8973                 break;
8974
8975         case ASIC_REV_5719:
8976         case ASIC_REV_5720:
8977                 val = tr32(TG3_CPMU_CLCK_ORIDE);
8978                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8979                 break;
8980
8981         default:
8982                 return;
8983         }
8984 }
8985
8986 /* tp->lock is held. */
8987 static int tg3_chip_reset(struct tg3 *tp)
8988 {
8989         u32 val;
8990         void (*write_op)(struct tg3 *, u32, u32);
8991         int i, err;
8992
8993         if (!pci_device_is_present(tp->pdev))
8994                 return -ENODEV;
8995
8996         tg3_nvram_lock(tp);
8997
8998         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8999
9000         /* No matching tg3_nvram_unlock() after this because
9001          * chip reset below will undo the nvram lock.
9002          */
9003         tp->nvram_lock_cnt = 0;
9004
9005         /* GRC_MISC_CFG core clock reset will clear the memory
9006          * enable bit in PCI register 4 and the MSI enable bit
9007          * on some chips, so we save relevant registers here.
9008          */
9009         tg3_save_pci_state(tp);
9010
9011         if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9012             tg3_flag(tp, 5755_PLUS))
9013                 tw32(GRC_FASTBOOT_PC, 0);
9014
9015         /*
9016          * We must avoid the readl() that normally takes place.
9017          * It locks machines, causes machine checks, and other
9018          * fun things.  So, temporarily disable the 5701
9019          * hardware workaround, while we do the reset.
9020          */
9021         write_op = tp->write32;
9022         if (write_op == tg3_write_flush_reg32)
9023                 tp->write32 = tg3_write32;
9024
9025         /* Prevent the irq handler from reading or writing PCI registers
9026          * during chip reset when the memory enable bit in the PCI command
9027          * register may be cleared.  The chip does not generate interrupt
9028          * at this time, but the irq handler may still be called due to irq
9029          * sharing or irqpoll.
9030          */
9031         tg3_flag_set(tp, CHIP_RESETTING);
9032         for (i = 0; i < tp->irq_cnt; i++) {
9033                 struct tg3_napi *tnapi = &tp->napi[i];
9034                 if (tnapi->hw_status) {
9035                         tnapi->hw_status->status = 0;
9036                         tnapi->hw_status->status_tag = 0;
9037                 }
9038                 tnapi->last_tag = 0;
9039                 tnapi->last_irq_tag = 0;
9040         }
9041         smp_mb();
9042
9043         for (i = 0; i < tp->irq_cnt; i++)
9044                 synchronize_irq(tp->napi[i].irq_vec);
9045
9046         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9047                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9048                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9049         }
9050
9051         /* do the reset */
9052         val = GRC_MISC_CFG_CORECLK_RESET;
9053
9054         if (tg3_flag(tp, PCI_EXPRESS)) {
9055                 /* Force PCIe 1.0a mode */
9056                 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9057                     !tg3_flag(tp, 57765_PLUS) &&
9058                     tr32(TG3_PCIE_PHY_TSTCTL) ==
9059                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9060                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9061
9062                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9063                         tw32(GRC_MISC_CFG, (1 << 29));
9064                         val |= (1 << 29);
9065                 }
9066         }
9067
9068         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9069                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9070                 tw32(GRC_VCPU_EXT_CTRL,
9071                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9072         }
9073
9074         /* Set the clock to the highest frequency to avoid timeouts. With link
9075          * aware mode, the clock speed could be slow and bootcode does not
9076          * complete within the expected time. Override the clock to allow the
9077          * bootcode to finish sooner and then restore it.
9078          */
9079         tg3_override_clk(tp);
9080
9081         /* Manage gphy power for all CPMU absent PCIe devices. */
9082         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9083                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9084
9085         tw32(GRC_MISC_CFG, val);
9086
9087         /* restore 5701 hardware bug workaround write method */
9088         tp->write32 = write_op;
9089
9090         /* Unfortunately, we have to delay before the PCI read back.
9091          * Some 575X chips even will not respond to a PCI cfg access
9092          * when the reset command is given to the chip.
9093          *
9094          * How do these hardware designers expect things to work
9095          * properly if the PCI write is posted for a long period
9096          * of time?  It is always necessary to have some method by
9097          * which a register read back can occur to push the write
9098          * out which does the reset.
9099          *
9100          * For most tg3 variants the trick below was working.
9101          * Ho hum...
9102          */
9103         udelay(120);
9104
9105         /* Flush PCI posted writes.  The normal MMIO registers
9106          * are inaccessible at this time so this is the only
9107          * way to make this reliably (actually, this is no longer
9108          * the case, see above).  I tried to use indirect
9109          * register read/write but this upset some 5701 variants.
9110          */
9111         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9112
9113         udelay(120);
9114
9115         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9116                 u16 val16;
9117
9118                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9119                         int j;
9120                         u32 cfg_val;
9121
9122                         /* Wait for link training to complete.  */
9123                         for (j = 0; j < 5000; j++)
9124                                 udelay(100);
9125
9126                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9127                         pci_write_config_dword(tp->pdev, 0xc4,
9128                                                cfg_val | (1 << 15));
9129                 }
9130
9131                 /* Clear the "no snoop" and "relaxed ordering" bits. */
9132                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9133                 /*
9134                  * Older PCIe devices only support the 128 byte
9135                  * MPS setting.  Enforce the restriction.
9136                  */
9137                 if (!tg3_flag(tp, CPMU_PRESENT))
9138                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9139                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9140
9141                 /* Clear error status */
9142                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9143                                       PCI_EXP_DEVSTA_CED |
9144                                       PCI_EXP_DEVSTA_NFED |
9145                                       PCI_EXP_DEVSTA_FED |
9146                                       PCI_EXP_DEVSTA_URD);
9147         }
9148
9149         tg3_restore_pci_state(tp);
9150
9151         tg3_flag_clear(tp, CHIP_RESETTING);
9152         tg3_flag_clear(tp, ERROR_PROCESSED);
9153
9154         val = 0;
9155         if (tg3_flag(tp, 5780_CLASS))
9156                 val = tr32(MEMARB_MODE);
9157         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9158
9159         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9160                 tg3_stop_fw(tp);
9161                 tw32(0x5000, 0x400);
9162         }
9163
9164         if (tg3_flag(tp, IS_SSB_CORE)) {
9165                 /*
9166                  * BCM4785: In order to avoid repercussions from using
9167                  * potentially defective internal ROM, stop the Rx RISC CPU,
9168                  * which is not required.
9169                  */
9170                 tg3_stop_fw(tp);
9171                 tg3_halt_cpu(tp, RX_CPU_BASE);
9172         }
9173
9174         err = tg3_poll_fw(tp);
9175         if (err)
9176                 return err;
9177
9178         tw32(GRC_MODE, tp->grc_mode);
9179
9180         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9181                 val = tr32(0xc4);
9182
9183                 tw32(0xc4, val | (1 << 15));
9184         }
9185
9186         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9187             tg3_asic_rev(tp) == ASIC_REV_5705) {
9188                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9189                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9190                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9191                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9192         }
9193
9194         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9195                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9196                 val = tp->mac_mode;
9197         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9198                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9199                 val = tp->mac_mode;
9200         } else
9201                 val = 0;
9202
9203         tw32_f(MAC_MODE, val);
9204         udelay(40);
9205
9206         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9207
9208         tg3_mdio_start(tp);
9209
9210         if (tg3_flag(tp, PCI_EXPRESS) &&
9211             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9212             tg3_asic_rev(tp) != ASIC_REV_5785 &&
9213             !tg3_flag(tp, 57765_PLUS)) {
9214                 val = tr32(0x7c00);
9215
9216                 tw32(0x7c00, val | (1 << 25));
9217         }
9218
9219         tg3_restore_clk(tp);
9220
9221         /* Reprobe ASF enable state.  */
9222         tg3_flag_clear(tp, ENABLE_ASF);
9223         tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9224                            TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9225
9226         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9227         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9228         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9229                 u32 nic_cfg;
9230
9231                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9232                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9233                         tg3_flag_set(tp, ENABLE_ASF);
9234                         tp->last_event_jiffies = jiffies;
9235                         if (tg3_flag(tp, 5750_PLUS))
9236                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9237
9238                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9239                         if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9240                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9241                         if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9242                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9243                 }
9244         }
9245
9246         return 0;
9247 }
9248
9249 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9250 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9251 static void __tg3_set_rx_mode(struct net_device *);
9252
9253 /* tp->lock is held. */
9254 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9255 {
9256         int err;
9257
9258         tg3_stop_fw(tp);
9259
9260         tg3_write_sig_pre_reset(tp, kind);
9261
9262         tg3_abort_hw(tp, silent);
9263         err = tg3_chip_reset(tp);
9264
9265         __tg3_set_mac_addr(tp, false);
9266
9267         tg3_write_sig_legacy(tp, kind);
9268         tg3_write_sig_post_reset(tp, kind);
9269
9270         if (tp->hw_stats) {
9271                 /* Save the stats across chip resets... */
9272                 tg3_get_nstats(tp, &tp->net_stats_prev);
9273                 tg3_get_estats(tp, &tp->estats_prev);
9274
9275                 /* And make sure the next sample is new data */
9276                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9277         }
9278
9279         return err;
9280 }
9281
9282 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9283 {
9284         struct tg3 *tp = netdev_priv(dev);
9285         struct sockaddr *addr = p;
9286         int err = 0;
9287         bool skip_mac_1 = false;
9288
9289         if (!is_valid_ether_addr(addr->sa_data))
9290                 return -EADDRNOTAVAIL;
9291
9292         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9293
9294         if (!netif_running(dev))
9295                 return 0;
9296
9297         if (tg3_flag(tp, ENABLE_ASF)) {
9298                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9299
9300                 addr0_high = tr32(MAC_ADDR_0_HIGH);
9301                 addr0_low = tr32(MAC_ADDR_0_LOW);
9302                 addr1_high = tr32(MAC_ADDR_1_HIGH);
9303                 addr1_low = tr32(MAC_ADDR_1_LOW);
9304
9305                 /* Skip MAC addr 1 if ASF is using it. */
9306                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9307                     !(addr1_high == 0 && addr1_low == 0))
9308                         skip_mac_1 = true;
9309         }
9310         spin_lock_bh(&tp->lock);
9311         __tg3_set_mac_addr(tp, skip_mac_1);
9312         __tg3_set_rx_mode(dev);
9313         spin_unlock_bh(&tp->lock);
9314
9315         return err;
9316 }
9317
9318 /* tp->lock is held. */
9319 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9320                            dma_addr_t mapping, u32 maxlen_flags,
9321                            u32 nic_addr)
9322 {
9323         tg3_write_mem(tp,
9324                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9325                       ((u64) mapping >> 32));
9326         tg3_write_mem(tp,
9327                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9328                       ((u64) mapping & 0xffffffff));
9329         tg3_write_mem(tp,
9330                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9331                        maxlen_flags);
9332
9333         if (!tg3_flag(tp, 5705_PLUS))
9334                 tg3_write_mem(tp,
9335                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9336                               nic_addr);
9337 }
9338
9339
9340 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9341 {
9342         int i = 0;
9343
9344         if (!tg3_flag(tp, ENABLE_TSS)) {
9345                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9346                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9347                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9348         } else {
9349                 tw32(HOSTCC_TXCOL_TICKS, 0);
9350                 tw32(HOSTCC_TXMAX_FRAMES, 0);
9351                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9352
9353                 for (; i < tp->txq_cnt; i++) {
9354                         u32 reg;
9355
9356                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9357                         tw32(reg, ec->tx_coalesce_usecs);
9358                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9359                         tw32(reg, ec->tx_max_coalesced_frames);
9360                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9361                         tw32(reg, ec->tx_max_coalesced_frames_irq);
9362                 }
9363         }
9364
9365         for (; i < tp->irq_max - 1; i++) {
9366                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9367                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9368                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9369         }
9370 }
9371
9372 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9373 {
9374         int i = 0;
9375         u32 limit = tp->rxq_cnt;
9376
9377         if (!tg3_flag(tp, ENABLE_RSS)) {
9378                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9379                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9380                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9381                 limit--;
9382         } else {
9383                 tw32(HOSTCC_RXCOL_TICKS, 0);
9384                 tw32(HOSTCC_RXMAX_FRAMES, 0);
9385                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9386         }
9387
9388         for (; i < limit; i++) {
9389                 u32 reg;
9390
9391                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9392                 tw32(reg, ec->rx_coalesce_usecs);
9393                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9394                 tw32(reg, ec->rx_max_coalesced_frames);
9395                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9396                 tw32(reg, ec->rx_max_coalesced_frames_irq);
9397         }
9398
9399         for (; i < tp->irq_max - 1; i++) {
9400                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9401                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9402                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9403         }
9404 }
9405
9406 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9407 {
9408         tg3_coal_tx_init(tp, ec);
9409         tg3_coal_rx_init(tp, ec);
9410
9411         if (!tg3_flag(tp, 5705_PLUS)) {
9412                 u32 val = ec->stats_block_coalesce_usecs;
9413
9414                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9415                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9416
9417                 if (!tp->link_up)
9418                         val = 0;
9419
9420                 tw32(HOSTCC_STAT_COAL_TICKS, val);
9421         }
9422 }
9423
9424 /* tp->lock is held. */
9425 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9426 {
9427         u32 txrcb, limit;
9428
9429         /* Disable all transmit rings but the first. */
9430         if (!tg3_flag(tp, 5705_PLUS))
9431                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9432         else if (tg3_flag(tp, 5717_PLUS))
9433                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9434         else if (tg3_flag(tp, 57765_CLASS) ||
9435                  tg3_asic_rev(tp) == ASIC_REV_5762)
9436                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9437         else
9438                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9439
9440         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9441              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9442                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9443                               BDINFO_FLAGS_DISABLED);
9444 }
9445
9446 /* tp->lock is held. */
9447 static void tg3_tx_rcbs_init(struct tg3 *tp)
9448 {
9449         int i = 0;
9450         u32 txrcb = NIC_SRAM_SEND_RCB;
9451
9452         if (tg3_flag(tp, ENABLE_TSS))
9453                 i++;
9454
9455         for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9456                 struct tg3_napi *tnapi = &tp->napi[i];
9457
9458                 if (!tnapi->tx_ring)
9459                         continue;
9460
9461                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9462                                (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9463                                NIC_SRAM_TX_BUFFER_DESC);
9464         }
9465 }
9466
9467 /* tp->lock is held. */
9468 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9469 {
9470         u32 rxrcb, limit;
9471
9472         /* Disable all receive return rings but the first. */
9473         if (tg3_flag(tp, 5717_PLUS))
9474                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9475         else if (!tg3_flag(tp, 5705_PLUS))
9476                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9477         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9478                  tg3_asic_rev(tp) == ASIC_REV_5762 ||
9479                  tg3_flag(tp, 57765_CLASS))
9480                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9481         else
9482                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9483
9484         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9485              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9486                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9487                               BDINFO_FLAGS_DISABLED);
9488 }
9489
9490 /* tp->lock is held. */
9491 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9492 {
9493         int i = 0;
9494         u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9495
9496         if (tg3_flag(tp, ENABLE_RSS))
9497                 i++;
9498
9499         for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9500                 struct tg3_napi *tnapi = &tp->napi[i];
9501
9502                 if (!tnapi->rx_rcb)
9503                         continue;
9504
9505                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9506                                (tp->rx_ret_ring_mask + 1) <<
9507                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9508         }
9509 }
9510
9511 /* tp->lock is held. */
9512 static void tg3_rings_reset(struct tg3 *tp)
9513 {
9514         int i;
9515         u32 stblk;
9516         struct tg3_napi *tnapi = &tp->napi[0];
9517
9518         tg3_tx_rcbs_disable(tp);
9519
9520         tg3_rx_ret_rcbs_disable(tp);
9521
9522         /* Disable interrupts */
9523         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9524         tp->napi[0].chk_msi_cnt = 0;
9525         tp->napi[0].last_rx_cons = 0;
9526         tp->napi[0].last_tx_cons = 0;
9527
9528         /* Zero mailbox registers. */
9529         if (tg3_flag(tp, SUPPORT_MSIX)) {
9530                 for (i = 1; i < tp->irq_max; i++) {
9531                         tp->napi[i].tx_prod = 0;
9532                         tp->napi[i].tx_cons = 0;
9533                         if (tg3_flag(tp, ENABLE_TSS))
9534                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
9535                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
9536                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9537                         tp->napi[i].chk_msi_cnt = 0;
9538                         tp->napi[i].last_rx_cons = 0;
9539                         tp->napi[i].last_tx_cons = 0;
9540                 }
9541                 if (!tg3_flag(tp, ENABLE_TSS))
9542                         tw32_mailbox(tp->napi[0].prodmbox, 0);
9543         } else {
9544                 tp->napi[0].tx_prod = 0;
9545                 tp->napi[0].tx_cons = 0;
9546                 tw32_mailbox(tp->napi[0].prodmbox, 0);
9547                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9548         }
9549
9550         /* Make sure the NIC-based send BD rings are disabled. */
9551         if (!tg3_flag(tp, 5705_PLUS)) {
9552                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9553                 for (i = 0; i < 16; i++)
9554                         tw32_tx_mbox(mbox + i * 8, 0);
9555         }
9556
9557         /* Clear status block in ram. */
9558         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9559
9560         /* Set status block DMA address */
9561         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9562              ((u64) tnapi->status_mapping >> 32));
9563         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9564              ((u64) tnapi->status_mapping & 0xffffffff));
9565
9566         stblk = HOSTCC_STATBLCK_RING1;
9567
9568         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9569                 u64 mapping = (u64)tnapi->status_mapping;
9570                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9571                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9572                 stblk += 8;
9573
9574                 /* Clear status block in ram. */
9575                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9576         }
9577
9578         tg3_tx_rcbs_init(tp);
9579         tg3_rx_ret_rcbs_init(tp);
9580 }
9581
9582 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9583 {
9584         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9585
9586         if (!tg3_flag(tp, 5750_PLUS) ||
9587             tg3_flag(tp, 5780_CLASS) ||
9588             tg3_asic_rev(tp) == ASIC_REV_5750 ||
9589             tg3_asic_rev(tp) == ASIC_REV_5752 ||
9590             tg3_flag(tp, 57765_PLUS))
9591                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9592         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9593                  tg3_asic_rev(tp) == ASIC_REV_5787)
9594                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9595         else
9596                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9597
9598         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9599         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9600
9601         val = min(nic_rep_thresh, host_rep_thresh);
9602         tw32(RCVBDI_STD_THRESH, val);
9603
9604         if (tg3_flag(tp, 57765_PLUS))
9605                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9606
9607         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9608                 return;
9609
9610         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9611
9612         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9613
9614         val = min(bdcache_maxcnt / 2, host_rep_thresh);
9615         tw32(RCVBDI_JUMBO_THRESH, val);
9616
9617         if (tg3_flag(tp, 57765_PLUS))
9618                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9619 }
9620
9621 static inline u32 calc_crc(unsigned char *buf, int len)
9622 {
9623         u32 reg;
9624         u32 tmp;
9625         int j, k;
9626
9627         reg = 0xffffffff;
9628
9629         for (j = 0; j < len; j++) {
9630                 reg ^= buf[j];
9631
9632                 for (k = 0; k < 8; k++) {
9633                         tmp = reg & 0x01;
9634
9635                         reg >>= 1;
9636
9637                         if (tmp)
9638                                 reg ^= 0xedb88320;
9639                 }
9640         }
9641
9642         return ~reg;
9643 }
9644
9645 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9646 {
9647         /* accept or reject all multicast frames */
9648         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9649         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9650         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9651         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9652 }
9653
9654 static void __tg3_set_rx_mode(struct net_device *dev)
9655 {
9656         struct tg3 *tp = netdev_priv(dev);
9657         u32 rx_mode;
9658
9659         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9660                                   RX_MODE_KEEP_VLAN_TAG);
9661
9662 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9663         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9664          * flag clear.
9665          */
9666         if (!tg3_flag(tp, ENABLE_ASF))
9667                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9668 #endif
9669
9670         if (dev->flags & IFF_PROMISC) {
9671                 /* Promiscuous mode. */
9672                 rx_mode |= RX_MODE_PROMISC;
9673         } else if (dev->flags & IFF_ALLMULTI) {
9674                 /* Accept all multicast. */
9675                 tg3_set_multi(tp, 1);
9676         } else if (netdev_mc_empty(dev)) {
9677                 /* Reject all multicast. */
9678                 tg3_set_multi(tp, 0);
9679         } else {
9680                 /* Accept one or more multicast(s). */
9681                 struct netdev_hw_addr *ha;
9682                 u32 mc_filter[4] = { 0, };
9683                 u32 regidx;
9684                 u32 bit;
9685                 u32 crc;
9686
9687                 netdev_for_each_mc_addr(ha, dev) {
9688                         crc = calc_crc(ha->addr, ETH_ALEN);
9689                         bit = ~crc & 0x7f;
9690                         regidx = (bit & 0x60) >> 5;
9691                         bit &= 0x1f;
9692                         mc_filter[regidx] |= (1 << bit);
9693                 }
9694
9695                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9696                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9697                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9698                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9699         }
9700
9701         if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9702                 rx_mode |= RX_MODE_PROMISC;
9703         } else if (!(dev->flags & IFF_PROMISC)) {
9704                 /* Add all entries into to the mac addr filter list */
9705                 int i = 0;
9706                 struct netdev_hw_addr *ha;
9707
9708                 netdev_for_each_uc_addr(ha, dev) {
9709                         __tg3_set_one_mac_addr(tp, ha->addr,
9710                                                i + TG3_UCAST_ADDR_IDX(tp));
9711                         i++;
9712                 }
9713         }
9714
9715         if (rx_mode != tp->rx_mode) {
9716                 tp->rx_mode = rx_mode;
9717                 tw32_f(MAC_RX_MODE, rx_mode);
9718                 udelay(10);
9719         }
9720 }
9721
9722 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9723 {
9724         int i;
9725
9726         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9727                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9728 }
9729
9730 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9731 {
9732         int i;
9733
9734         if (!tg3_flag(tp, SUPPORT_MSIX))
9735                 return;
9736
9737         if (tp->rxq_cnt == 1) {
9738                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9739                 return;
9740         }
9741
9742         /* Validate table against current IRQ count */
9743         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9744                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9745                         break;
9746         }
9747
9748         if (i != TG3_RSS_INDIR_TBL_SIZE)
9749                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9750 }
9751
9752 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9753 {
9754         int i = 0;
9755         u32 reg = MAC_RSS_INDIR_TBL_0;
9756
9757         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9758                 u32 val = tp->rss_ind_tbl[i];
9759                 i++;
9760                 for (; i % 8; i++) {
9761                         val <<= 4;
9762                         val |= tp->rss_ind_tbl[i];
9763                 }
9764                 tw32(reg, val);
9765                 reg += 4;
9766         }
9767 }
9768
9769 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9770 {
9771         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9772                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9773         else
9774                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9775 }
9776
9777 /* tp->lock is held. */
9778 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9779 {
9780         u32 val, rdmac_mode;
9781         int i, err, limit;
9782         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9783
9784         tg3_disable_ints(tp);
9785
9786         tg3_stop_fw(tp);
9787
9788         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9789
9790         if (tg3_flag(tp, INIT_COMPLETE))
9791                 tg3_abort_hw(tp, 1);
9792
9793         if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9794             !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9795                 tg3_phy_pull_config(tp);
9796                 tg3_eee_pull_config(tp, NULL);
9797                 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9798         }
9799
9800         /* Enable MAC control of LPI */
9801         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9802                 tg3_setup_eee(tp);
9803
9804         if (reset_phy)
9805                 tg3_phy_reset(tp);
9806
9807         err = tg3_chip_reset(tp);
9808         if (err)
9809                 return err;
9810
9811         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9812
9813         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9814                 val = tr32(TG3_CPMU_CTRL);
9815                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9816                 tw32(TG3_CPMU_CTRL, val);
9817
9818                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9819                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9820                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9821                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9822
9823                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9824                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9825                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9826                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9827
9828                 val = tr32(TG3_CPMU_HST_ACC);
9829                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9830                 val |= CPMU_HST_ACC_MACCLK_6_25;
9831                 tw32(TG3_CPMU_HST_ACC, val);
9832         }
9833
9834         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9835                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9836                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9837                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9838                 tw32(PCIE_PWR_MGMT_THRESH, val);
9839
9840                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9841                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9842
9843                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9844
9845                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9846                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9847         }
9848
9849         if (tg3_flag(tp, L1PLLPD_EN)) {
9850                 u32 grc_mode = tr32(GRC_MODE);
9851
9852                 /* Access the lower 1K of PL PCIE block registers. */
9853                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9854                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9855
9856                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9857                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9858                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9859
9860                 tw32(GRC_MODE, grc_mode);
9861         }
9862
9863         if (tg3_flag(tp, 57765_CLASS)) {
9864                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9865                         u32 grc_mode = tr32(GRC_MODE);
9866
9867                         /* Access the lower 1K of PL PCIE block registers. */
9868                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9869                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9870
9871                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9872                                    TG3_PCIE_PL_LO_PHYCTL5);
9873                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9874                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9875
9876                         tw32(GRC_MODE, grc_mode);
9877                 }
9878
9879                 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9880                         u32 grc_mode;
9881
9882                         /* Fix transmit hangs */
9883                         val = tr32(TG3_CPMU_PADRNG_CTL);
9884                         val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9885                         tw32(TG3_CPMU_PADRNG_CTL, val);
9886
9887                         grc_mode = tr32(GRC_MODE);
9888
9889                         /* Access the lower 1K of DL PCIE block registers. */
9890                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9891                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9892
9893                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9894                                    TG3_PCIE_DL_LO_FTSMAX);
9895                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9896                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9897                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9898
9899                         tw32(GRC_MODE, grc_mode);
9900                 }
9901
9902                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9903                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9904                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9905                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9906         }
9907
9908         /* This works around an issue with Athlon chipsets on
9909          * B3 tigon3 silicon.  This bit has no effect on any
9910          * other revision.  But do not set this on PCI Express
9911          * chips and don't even touch the clocks if the CPMU is present.
9912          */
9913         if (!tg3_flag(tp, CPMU_PRESENT)) {
9914                 if (!tg3_flag(tp, PCI_EXPRESS))
9915                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9916                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9917         }
9918
9919         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9920             tg3_flag(tp, PCIX_MODE)) {
9921                 val = tr32(TG3PCI_PCISTATE);
9922                 val |= PCISTATE_RETRY_SAME_DMA;
9923                 tw32(TG3PCI_PCISTATE, val);
9924         }
9925
9926         if (tg3_flag(tp, ENABLE_APE)) {
9927                 /* Allow reads and writes to the
9928                  * APE register and memory space.
9929                  */
9930                 val = tr32(TG3PCI_PCISTATE);
9931                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9932                        PCISTATE_ALLOW_APE_SHMEM_WR |
9933                        PCISTATE_ALLOW_APE_PSPACE_WR;
9934                 tw32(TG3PCI_PCISTATE, val);
9935         }
9936
9937         if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9938                 /* Enable some hw fixes.  */
9939                 val = tr32(TG3PCI_MSI_DATA);
9940                 val |= (1 << 26) | (1 << 28) | (1 << 29);
9941                 tw32(TG3PCI_MSI_DATA, val);
9942         }
9943
9944         /* Descriptor ring init may make accesses to the
9945          * NIC SRAM area to setup the TX descriptors, so we
9946          * can only do this after the hardware has been
9947          * successfully reset.
9948          */
9949         err = tg3_init_rings(tp);
9950         if (err)
9951                 return err;
9952
9953         if (tg3_flag(tp, 57765_PLUS)) {
9954                 val = tr32(TG3PCI_DMA_RW_CTRL) &
9955                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9956                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9957                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9958                 if (!tg3_flag(tp, 57765_CLASS) &&
9959                     tg3_asic_rev(tp) != ASIC_REV_5717 &&
9960                     tg3_asic_rev(tp) != ASIC_REV_5762)
9961                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
9962                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9963         } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9964                    tg3_asic_rev(tp) != ASIC_REV_5761) {
9965                 /* This value is determined during the probe time DMA
9966                  * engine test, tg3_test_dma.
9967                  */
9968                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9969         }
9970
9971         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9972                           GRC_MODE_4X_NIC_SEND_RINGS |
9973                           GRC_MODE_NO_TX_PHDR_CSUM |
9974                           GRC_MODE_NO_RX_PHDR_CSUM);
9975         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9976
9977         /* Pseudo-header checksum is done by hardware logic and not
9978          * the offload processers, so make the chip do the pseudo-
9979          * header checksums on receive.  For transmit it is more
9980          * convenient to do the pseudo-header checksum in software
9981          * as Linux does that on transmit for us in all cases.
9982          */
9983         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9984
9985         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9986         if (tp->rxptpctl)
9987                 tw32(TG3_RX_PTP_CTL,
9988                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9989
9990         if (tg3_flag(tp, PTP_CAPABLE))
9991                 val |= GRC_MODE_TIME_SYNC_ENABLE;
9992
9993         tw32(GRC_MODE, tp->grc_mode | val);
9994
9995         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
9996         val = tr32(GRC_MISC_CFG);
9997         val &= ~0xff;
9998         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9999         tw32(GRC_MISC_CFG, val);
10000
10001         /* Initialize MBUF/DESC pool. */
10002         if (tg3_flag(tp, 5750_PLUS)) {
10003                 /* Do nothing.  */
10004         } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10005                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10006                 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10007                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10008                 else
10009                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10010                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10011                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10012         } else if (tg3_flag(tp, TSO_CAPABLE)) {
10013                 int fw_len;
10014
10015                 fw_len = tp->fw_len;
10016                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10017                 tw32(BUFMGR_MB_POOL_ADDR,
10018                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10019                 tw32(BUFMGR_MB_POOL_SIZE,
10020                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10021         }
10022
10023         if (tp->dev->mtu <= ETH_DATA_LEN) {
10024                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10025                      tp->bufmgr_config.mbuf_read_dma_low_water);
10026                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10027                      tp->bufmgr_config.mbuf_mac_rx_low_water);
10028                 tw32(BUFMGR_MB_HIGH_WATER,
10029                      tp->bufmgr_config.mbuf_high_water);
10030         } else {
10031                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10032                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10033                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10034                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10035                 tw32(BUFMGR_MB_HIGH_WATER,
10036                      tp->bufmgr_config.mbuf_high_water_jumbo);
10037         }
10038         tw32(BUFMGR_DMA_LOW_WATER,
10039              tp->bufmgr_config.dma_low_water);
10040         tw32(BUFMGR_DMA_HIGH_WATER,
10041              tp->bufmgr_config.dma_high_water);
10042
10043         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10044         if (tg3_asic_rev(tp) == ASIC_REV_5719)
10045                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10046         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10047             tg3_asic_rev(tp) == ASIC_REV_5762 ||
10048             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10049             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10050                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10051         tw32(BUFMGR_MODE, val);
10052         for (i = 0; i < 2000; i++) {
10053                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10054                         break;
10055                 udelay(10);
10056         }
10057         if (i >= 2000) {
10058                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10059                 return -ENODEV;
10060         }
10061
10062         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10063                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10064
10065         tg3_setup_rxbd_thresholds(tp);
10066
10067         /* Initialize TG3_BDINFO's at:
10068          *  RCVDBDI_STD_BD:     standard eth size rx ring
10069          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
10070          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
10071          *
10072          * like so:
10073          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
10074          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
10075          *                              ring attribute flags
10076          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
10077          *
10078          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10079          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10080          *
10081          * The size of each ring is fixed in the firmware, but the location is
10082          * configurable.
10083          */
10084         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10085              ((u64) tpr->rx_std_mapping >> 32));
10086         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10087              ((u64) tpr->rx_std_mapping & 0xffffffff));
10088         if (!tg3_flag(tp, 5717_PLUS))
10089                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10090                      NIC_SRAM_RX_BUFFER_DESC);
10091
10092         /* Disable the mini ring */
10093         if (!tg3_flag(tp, 5705_PLUS))
10094                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10095                      BDINFO_FLAGS_DISABLED);
10096
10097         /* Program the jumbo buffer descriptor ring control
10098          * blocks on those devices that have them.
10099          */
10100         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10101             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10102
10103                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10104                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10105                              ((u64) tpr->rx_jmb_mapping >> 32));
10106                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10107                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10108                         val = TG3_RX_JMB_RING_SIZE(tp) <<
10109                               BDINFO_FLAGS_MAXLEN_SHIFT;
10110                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10111                              val | BDINFO_FLAGS_USE_EXT_RECV);
10112                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10113                             tg3_flag(tp, 57765_CLASS) ||
10114                             tg3_asic_rev(tp) == ASIC_REV_5762)
10115                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10116                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10117                 } else {
10118                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10119                              BDINFO_FLAGS_DISABLED);
10120                 }
10121
10122                 if (tg3_flag(tp, 57765_PLUS)) {
10123                         val = TG3_RX_STD_RING_SIZE(tp);
10124                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10125                         val |= (TG3_RX_STD_DMA_SZ << 2);
10126                 } else
10127                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10128         } else
10129                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10130
10131         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10132
10133         tpr->rx_std_prod_idx = tp->rx_pending;
10134         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10135
10136         tpr->rx_jmb_prod_idx =
10137                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10138         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10139
10140         tg3_rings_reset(tp);
10141
10142         /* Initialize MAC address and backoff seed. */
10143         __tg3_set_mac_addr(tp, false);
10144
10145         /* MTU + ethernet header + FCS + optional VLAN tag */
10146         tw32(MAC_RX_MTU_SIZE,
10147              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10148
10149         /* The slot time is changed by tg3_setup_phy if we
10150          * run at gigabit with half duplex.
10151          */
10152         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10153               (6 << TX_LENGTHS_IPG_SHIFT) |
10154               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10155
10156         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10157             tg3_asic_rev(tp) == ASIC_REV_5762)
10158                 val |= tr32(MAC_TX_LENGTHS) &
10159                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
10160                         TX_LENGTHS_CNT_DWN_VAL_MSK);
10161
10162         tw32(MAC_TX_LENGTHS, val);
10163
10164         /* Receive rules. */
10165         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10166         tw32(RCVLPC_CONFIG, 0x0181);
10167
10168         /* Calculate RDMAC_MODE setting early, we need it to determine
10169          * the RCVLPC_STATE_ENABLE mask.
10170          */
10171         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10172                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10173                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10174                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10175                       RDMAC_MODE_LNGREAD_ENAB);
10176
10177         if (tg3_asic_rev(tp) == ASIC_REV_5717)
10178                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10179
10180         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10181             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10182             tg3_asic_rev(tp) == ASIC_REV_57780)
10183                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10184                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10185                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10186
10187         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10188             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10189                 if (tg3_flag(tp, TSO_CAPABLE) &&
10190                     tg3_asic_rev(tp) == ASIC_REV_5705) {
10191                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10192                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10193                            !tg3_flag(tp, IS_5788)) {
10194                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10195                 }
10196         }
10197
10198         if (tg3_flag(tp, PCI_EXPRESS))
10199                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10200
10201         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10202                 tp->dma_limit = 0;
10203                 if (tp->dev->mtu <= ETH_DATA_LEN) {
10204                         rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10205                         tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10206                 }
10207         }
10208
10209         if (tg3_flag(tp, HW_TSO_1) ||
10210             tg3_flag(tp, HW_TSO_2) ||
10211             tg3_flag(tp, HW_TSO_3))
10212                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10213
10214         if (tg3_flag(tp, 57765_PLUS) ||
10215             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10216             tg3_asic_rev(tp) == ASIC_REV_57780)
10217                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10218
10219         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10220             tg3_asic_rev(tp) == ASIC_REV_5762)
10221                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10222
10223         if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10224             tg3_asic_rev(tp) == ASIC_REV_5784 ||
10225             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10226             tg3_asic_rev(tp) == ASIC_REV_57780 ||
10227             tg3_flag(tp, 57765_PLUS)) {
10228                 u32 tgtreg;
10229
10230                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10231                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10232                 else
10233                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
10234
10235                 val = tr32(tgtreg);
10236                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10237                     tg3_asic_rev(tp) == ASIC_REV_5762) {
10238                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10239                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10240                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10241                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10242                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10243                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10244                 }
10245                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10246         }
10247
10248         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10249             tg3_asic_rev(tp) == ASIC_REV_5720 ||
10250             tg3_asic_rev(tp) == ASIC_REV_5762) {
10251                 u32 tgtreg;
10252
10253                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10254                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10255                 else
10256                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10257
10258                 val = tr32(tgtreg);
10259                 tw32(tgtreg, val |
10260                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10261                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10262         }
10263
10264         /* Receive/send statistics. */
10265         if (tg3_flag(tp, 5750_PLUS)) {
10266                 val = tr32(RCVLPC_STATS_ENABLE);
10267                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10268                 tw32(RCVLPC_STATS_ENABLE, val);
10269         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10270                    tg3_flag(tp, TSO_CAPABLE)) {
10271                 val = tr32(RCVLPC_STATS_ENABLE);
10272                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10273                 tw32(RCVLPC_STATS_ENABLE, val);
10274         } else {
10275                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10276         }
10277         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10278         tw32(SNDDATAI_STATSENAB, 0xffffff);
10279         tw32(SNDDATAI_STATSCTRL,
10280              (SNDDATAI_SCTRL_ENABLE |
10281               SNDDATAI_SCTRL_FASTUPD));
10282
10283         /* Setup host coalescing engine. */
10284         tw32(HOSTCC_MODE, 0);
10285         for (i = 0; i < 2000; i++) {
10286                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10287                         break;
10288                 udelay(10);
10289         }
10290
10291         __tg3_set_coalesce(tp, &tp->coal);
10292
10293         if (!tg3_flag(tp, 5705_PLUS)) {
10294                 /* Status/statistics block address.  See tg3_timer,
10295                  * the tg3_periodic_fetch_stats call there, and
10296                  * tg3_get_stats to see how this works for 5705/5750 chips.
10297                  */
10298                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10299                      ((u64) tp->stats_mapping >> 32));
10300                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10301                      ((u64) tp->stats_mapping & 0xffffffff));
10302                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10303
10304                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10305
10306                 /* Clear statistics and status block memory areas */
10307                 for (i = NIC_SRAM_STATS_BLK;
10308                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10309                      i += sizeof(u32)) {
10310                         tg3_write_mem(tp, i, 0);
10311                         udelay(40);
10312                 }
10313         }
10314
10315         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10316
10317         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10318         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10319         if (!tg3_flag(tp, 5705_PLUS))
10320                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10321
10322         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10323                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10324                 /* reset to prevent losing 1st rx packet intermittently */
10325                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10326                 udelay(10);
10327         }
10328
10329         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10330                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10331                         MAC_MODE_FHDE_ENABLE;
10332         if (tg3_flag(tp, ENABLE_APE))
10333                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10334         if (!tg3_flag(tp, 5705_PLUS) &&
10335             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10336             tg3_asic_rev(tp) != ASIC_REV_5700)
10337                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10338         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10339         udelay(40);
10340
10341         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10342          * If TG3_FLAG_IS_NIC is zero, we should read the
10343          * register to preserve the GPIO settings for LOMs. The GPIOs,
10344          * whether used as inputs or outputs, are set by boot code after
10345          * reset.
10346          */
10347         if (!tg3_flag(tp, IS_NIC)) {
10348                 u32 gpio_mask;
10349
10350                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10351                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10352                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10353
10354                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10355                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10356                                      GRC_LCLCTRL_GPIO_OUTPUT3;
10357
10358                 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10359                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10360
10361                 tp->grc_local_ctrl &= ~gpio_mask;
10362                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10363
10364                 /* GPIO1 must be driven high for eeprom write protect */
10365                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10366                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10367                                                GRC_LCLCTRL_GPIO_OUTPUT1);
10368         }
10369         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10370         udelay(100);
10371
10372         if (tg3_flag(tp, USING_MSIX)) {
10373                 val = tr32(MSGINT_MODE);
10374                 val |= MSGINT_MODE_ENABLE;
10375                 if (tp->irq_cnt > 1)
10376                         val |= MSGINT_MODE_MULTIVEC_EN;
10377                 if (!tg3_flag(tp, 1SHOT_MSI))
10378                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10379                 tw32(MSGINT_MODE, val);
10380         }
10381
10382         if (!tg3_flag(tp, 5705_PLUS)) {
10383                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10384                 udelay(40);
10385         }
10386
10387         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10388                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10389                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10390                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10391                WDMAC_MODE_LNGREAD_ENAB);
10392
10393         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10394             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10395                 if (tg3_flag(tp, TSO_CAPABLE) &&
10396                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10397                      tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10398                         /* nothing */
10399                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10400                            !tg3_flag(tp, IS_5788)) {
10401                         val |= WDMAC_MODE_RX_ACCEL;
10402                 }
10403         }
10404
10405         /* Enable host coalescing bug fix */
10406         if (tg3_flag(tp, 5755_PLUS))
10407                 val |= WDMAC_MODE_STATUS_TAG_FIX;
10408
10409         if (tg3_asic_rev(tp) == ASIC_REV_5785)
10410                 val |= WDMAC_MODE_BURST_ALL_DATA;
10411
10412         tw32_f(WDMAC_MODE, val);
10413         udelay(40);
10414
10415         if (tg3_flag(tp, PCIX_MODE)) {
10416                 u16 pcix_cmd;
10417
10418                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10419                                      &pcix_cmd);
10420                 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10421                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10422                         pcix_cmd |= PCI_X_CMD_READ_2K;
10423                 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10424                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10425                         pcix_cmd |= PCI_X_CMD_READ_2K;
10426                 }
10427                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10428                                       pcix_cmd);
10429         }
10430
10431         tw32_f(RDMAC_MODE, rdmac_mode);
10432         udelay(40);
10433
10434         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10435             tg3_asic_rev(tp) == ASIC_REV_5720) {
10436                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10437                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10438                                 break;
10439                 }
10440                 if (i < TG3_NUM_RDMA_CHANNELS) {
10441                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10442                         val |= tg3_lso_rd_dma_workaround_bit(tp);
10443                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10444                         tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10445                 }
10446         }
10447
10448         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10449         if (!tg3_flag(tp, 5705_PLUS))
10450                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10451
10452         if (tg3_asic_rev(tp) == ASIC_REV_5761)
10453                 tw32(SNDDATAC_MODE,
10454                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10455         else
10456                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10457
10458         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10459         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10460         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10461         if (tg3_flag(tp, LRG_PROD_RING_CAP))
10462                 val |= RCVDBDI_MODE_LRG_RING_SZ;
10463         tw32(RCVDBDI_MODE, val);
10464         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10465         if (tg3_flag(tp, HW_TSO_1) ||
10466             tg3_flag(tp, HW_TSO_2) ||
10467             tg3_flag(tp, HW_TSO_3))
10468                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10469         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10470         if (tg3_flag(tp, ENABLE_TSS))
10471                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10472         tw32(SNDBDI_MODE, val);
10473         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10474
10475         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10476                 err = tg3_load_5701_a0_firmware_fix(tp);
10477                 if (err)
10478                         return err;
10479         }
10480
10481         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10482                 /* Ignore any errors for the firmware download. If download
10483                  * fails, the device will operate with EEE disabled
10484                  */
10485                 tg3_load_57766_firmware(tp);
10486         }
10487
10488         if (tg3_flag(tp, TSO_CAPABLE)) {
10489                 err = tg3_load_tso_firmware(tp);
10490                 if (err)
10491                         return err;
10492         }
10493
10494         tp->tx_mode = TX_MODE_ENABLE;
10495
10496         if (tg3_flag(tp, 5755_PLUS) ||
10497             tg3_asic_rev(tp) == ASIC_REV_5906)
10498                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10499
10500         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10501             tg3_asic_rev(tp) == ASIC_REV_5762) {
10502                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10503                 tp->tx_mode &= ~val;
10504                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10505         }
10506
10507         tw32_f(MAC_TX_MODE, tp->tx_mode);
10508         udelay(100);
10509
10510         if (tg3_flag(tp, ENABLE_RSS)) {
10511                 tg3_rss_write_indir_tbl(tp);
10512
10513                 /* Setup the "secret" hash key. */
10514                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
10515                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
10516                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
10517                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
10518                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
10519                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
10520                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
10521                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
10522                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
10523                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
10524         }
10525
10526         tp->rx_mode = RX_MODE_ENABLE;
10527         if (tg3_flag(tp, 5755_PLUS))
10528                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10529
10530         if (tg3_asic_rev(tp) == ASIC_REV_5762)
10531                 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10532
10533         if (tg3_flag(tp, ENABLE_RSS))
10534                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10535                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
10536                                RX_MODE_RSS_IPV6_HASH_EN |
10537                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
10538                                RX_MODE_RSS_IPV4_HASH_EN |
10539                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
10540
10541         tw32_f(MAC_RX_MODE, tp->rx_mode);
10542         udelay(10);
10543
10544         tw32(MAC_LED_CTRL, tp->led_ctrl);
10545
10546         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10547         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10548                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10549                 udelay(10);
10550         }
10551         tw32_f(MAC_RX_MODE, tp->rx_mode);
10552         udelay(10);
10553
10554         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10555                 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10556                     !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10557                         /* Set drive transmission level to 1.2V  */
10558                         /* only if the signal pre-emphasis bit is not set  */
10559                         val = tr32(MAC_SERDES_CFG);
10560                         val &= 0xfffff000;
10561                         val |= 0x880;
10562                         tw32(MAC_SERDES_CFG, val);
10563                 }
10564                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10565                         tw32(MAC_SERDES_CFG, 0x616000);
10566         }
10567
10568         /* Prevent chip from dropping frames when flow control
10569          * is enabled.
10570          */
10571         if (tg3_flag(tp, 57765_CLASS))
10572                 val = 1;
10573         else
10574                 val = 2;
10575         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10576
10577         if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10578             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10579                 /* Use hardware link auto-negotiation */
10580                 tg3_flag_set(tp, HW_AUTONEG);
10581         }
10582
10583         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10584             tg3_asic_rev(tp) == ASIC_REV_5714) {
10585                 u32 tmp;
10586
10587                 tmp = tr32(SERDES_RX_CTRL);
10588                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10589                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10590                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10591                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10592         }
10593
10594         if (!tg3_flag(tp, USE_PHYLIB)) {
10595                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10596                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10597
10598                 err = tg3_setup_phy(tp, false);
10599                 if (err)
10600                         return err;
10601
10602                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10603                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10604                         u32 tmp;
10605
10606                         /* Clear CRC stats. */
10607                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10608                                 tg3_writephy(tp, MII_TG3_TEST1,
10609                                              tmp | MII_TG3_TEST1_CRC_EN);
10610                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10611                         }
10612                 }
10613         }
10614
10615         __tg3_set_rx_mode(tp->dev);
10616
10617         /* Initialize receive rules. */
10618         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10619         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10620         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10621         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10622
10623         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10624                 limit = 8;
10625         else
10626                 limit = 16;
10627         if (tg3_flag(tp, ENABLE_ASF))
10628                 limit -= 4;
10629         switch (limit) {
10630         case 16:
10631                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10632         case 15:
10633                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10634         case 14:
10635                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10636         case 13:
10637                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10638         case 12:
10639                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10640         case 11:
10641                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10642         case 10:
10643                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10644         case 9:
10645                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10646         case 8:
10647                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10648         case 7:
10649                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10650         case 6:
10651                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10652         case 5:
10653                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10654         case 4:
10655                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10656         case 3:
10657                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10658         case 2:
10659         case 1:
10660
10661         default:
10662                 break;
10663         }
10664
10665         if (tg3_flag(tp, ENABLE_APE))
10666                 /* Write our heartbeat update interval to APE. */
10667                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10668                                 APE_HOST_HEARTBEAT_INT_DISABLE);
10669
10670         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10671
10672         return 0;
10673 }
10674
10675 /* Called at device open time to get the chip ready for
10676  * packet processing.  Invoked with tp->lock held.
10677  */
10678 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10679 {
10680         /* Chip may have been just powered on. If so, the boot code may still
10681          * be running initialization. Wait for it to finish to avoid races in
10682          * accessing the hardware.
10683          */
10684         tg3_enable_register_access(tp);
10685         tg3_poll_fw(tp);
10686
10687         tg3_switch_clocks(tp);
10688
10689         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10690
10691         return tg3_reset_hw(tp, reset_phy);
10692 }
10693
10694 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10695 {
10696         int i;
10697
10698         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10699                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10700
10701                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10702                 off += len;
10703
10704                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10705                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10706                         memset(ocir, 0, TG3_OCIR_LEN);
10707         }
10708 }
10709
10710 /* sysfs attributes for hwmon */
10711 static ssize_t tg3_show_temp(struct device *dev,
10712                              struct device_attribute *devattr, char *buf)
10713 {
10714         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10715         struct tg3 *tp = dev_get_drvdata(dev);
10716         u32 temperature;
10717
10718         spin_lock_bh(&tp->lock);
10719         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10720                                 sizeof(temperature));
10721         spin_unlock_bh(&tp->lock);
10722         return sprintf(buf, "%u\n", temperature);
10723 }
10724
10725
10726 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10727                           TG3_TEMP_SENSOR_OFFSET);
10728 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10729                           TG3_TEMP_CAUTION_OFFSET);
10730 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10731                           TG3_TEMP_MAX_OFFSET);
10732
10733 static struct attribute *tg3_attrs[] = {
10734         &sensor_dev_attr_temp1_input.dev_attr.attr,
10735         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10736         &sensor_dev_attr_temp1_max.dev_attr.attr,
10737         NULL
10738 };
10739 ATTRIBUTE_GROUPS(tg3);
10740
10741 static void tg3_hwmon_close(struct tg3 *tp)
10742 {
10743         if (tp->hwmon_dev) {
10744                 hwmon_device_unregister(tp->hwmon_dev);
10745                 tp->hwmon_dev = NULL;
10746         }
10747 }
10748
10749 static void tg3_hwmon_open(struct tg3 *tp)
10750 {
10751         int i;
10752         u32 size = 0;
10753         struct pci_dev *pdev = tp->pdev;
10754         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10755
10756         tg3_sd_scan_scratchpad(tp, ocirs);
10757
10758         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10759                 if (!ocirs[i].src_data_length)
10760                         continue;
10761
10762                 size += ocirs[i].src_hdr_length;
10763                 size += ocirs[i].src_data_length;
10764         }
10765
10766         if (!size)
10767                 return;
10768
10769         tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10770                                                           tp, tg3_groups);
10771         if (IS_ERR(tp->hwmon_dev)) {
10772                 tp->hwmon_dev = NULL;
10773                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10774         }
10775 }
10776
10777
10778 #define TG3_STAT_ADD32(PSTAT, REG) \
10779 do {    u32 __val = tr32(REG); \
10780         (PSTAT)->low += __val; \
10781         if ((PSTAT)->low < __val) \
10782                 (PSTAT)->high += 1; \
10783 } while (0)
10784
10785 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10786 {
10787         struct tg3_hw_stats *sp = tp->hw_stats;
10788
10789         if (!tp->link_up)
10790                 return;
10791
10792         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10793         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10794         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10795         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10796         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10797         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10798         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10799         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10800         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10801         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10802         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10803         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10804         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10805         if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10806                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10807                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10808                 u32 val;
10809
10810                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10811                 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10812                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10813                 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10814         }
10815
10816         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10817         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10818         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10819         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10820         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10821         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10822         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10823         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10824         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10825         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10826         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10827         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10828         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10829         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10830
10831         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10832         if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10833             tg3_asic_rev(tp) != ASIC_REV_5762 &&
10834             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10835             tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10836                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10837         } else {
10838                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10839                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10840                 if (val) {
10841                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10842                         sp->rx_discards.low += val;
10843                         if (sp->rx_discards.low < val)
10844                                 sp->rx_discards.high += 1;
10845                 }
10846                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10847         }
10848         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10849 }
10850
10851 static void tg3_chk_missed_msi(struct tg3 *tp)
10852 {
10853         u32 i;
10854
10855         for (i = 0; i < tp->irq_cnt; i++) {
10856                 struct tg3_napi *tnapi = &tp->napi[i];
10857
10858                 if (tg3_has_work(tnapi)) {
10859                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10860                             tnapi->last_tx_cons == tnapi->tx_cons) {
10861                                 if (tnapi->chk_msi_cnt < 1) {
10862                                         tnapi->chk_msi_cnt++;
10863                                         return;
10864                                 }
10865                                 tg3_msi(0, tnapi);
10866                         }
10867                 }
10868                 tnapi->chk_msi_cnt = 0;
10869                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10870                 tnapi->last_tx_cons = tnapi->tx_cons;
10871         }
10872 }
10873
10874 static void tg3_timer(unsigned long __opaque)
10875 {
10876         struct tg3 *tp = (struct tg3 *) __opaque;
10877
10878         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10879                 goto restart_timer;
10880
10881         spin_lock(&tp->lock);
10882
10883         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10884             tg3_flag(tp, 57765_CLASS))
10885                 tg3_chk_missed_msi(tp);
10886
10887         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10888                 /* BCM4785: Flush posted writes from GbE to host memory. */
10889                 tr32(HOSTCC_MODE);
10890         }
10891
10892         if (!tg3_flag(tp, TAGGED_STATUS)) {
10893                 /* All of this garbage is because when using non-tagged
10894                  * IRQ status the mailbox/status_block protocol the chip
10895                  * uses with the cpu is race prone.
10896                  */
10897                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10898                         tw32(GRC_LOCAL_CTRL,
10899                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10900                 } else {
10901                         tw32(HOSTCC_MODE, tp->coalesce_mode |
10902                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10903                 }
10904
10905                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10906                         spin_unlock(&tp->lock);
10907                         tg3_reset_task_schedule(tp);
10908                         goto restart_timer;
10909                 }
10910         }
10911
10912         /* This part only runs once per second. */
10913         if (!--tp->timer_counter) {
10914                 if (tg3_flag(tp, 5705_PLUS))
10915                         tg3_periodic_fetch_stats(tp);
10916
10917                 if (tp->setlpicnt && !--tp->setlpicnt)
10918                         tg3_phy_eee_enable(tp);
10919
10920                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10921                         u32 mac_stat;
10922                         int phy_event;
10923
10924                         mac_stat = tr32(MAC_STATUS);
10925
10926                         phy_event = 0;
10927                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10928                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10929                                         phy_event = 1;
10930                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10931                                 phy_event = 1;
10932
10933                         if (phy_event)
10934                                 tg3_setup_phy(tp, false);
10935                 } else if (tg3_flag(tp, POLL_SERDES)) {
10936                         u32 mac_stat = tr32(MAC_STATUS);
10937                         int need_setup = 0;
10938
10939                         if (tp->link_up &&
10940                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10941                                 need_setup = 1;
10942                         }
10943                         if (!tp->link_up &&
10944                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
10945                                          MAC_STATUS_SIGNAL_DET))) {
10946                                 need_setup = 1;
10947                         }
10948                         if (need_setup) {
10949                                 if (!tp->serdes_counter) {
10950                                         tw32_f(MAC_MODE,
10951                                              (tp->mac_mode &
10952                                               ~MAC_MODE_PORT_MODE_MASK));
10953                                         udelay(40);
10954                                         tw32_f(MAC_MODE, tp->mac_mode);
10955                                         udelay(40);
10956                                 }
10957                                 tg3_setup_phy(tp, false);
10958                         }
10959                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10960                            tg3_flag(tp, 5780_CLASS)) {
10961                         tg3_serdes_parallel_detect(tp);
10962                 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
10963                         u32 cpmu = tr32(TG3_CPMU_STATUS);
10964                         bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
10965                                          TG3_CPMU_STATUS_LINK_MASK);
10966
10967                         if (link_up != tp->link_up)
10968                                 tg3_setup_phy(tp, false);
10969                 }
10970
10971                 tp->timer_counter = tp->timer_multiplier;
10972         }
10973
10974         /* Heartbeat is only sent once every 2 seconds.
10975          *
10976          * The heartbeat is to tell the ASF firmware that the host
10977          * driver is still alive.  In the event that the OS crashes,
10978          * ASF needs to reset the hardware to free up the FIFO space
10979          * that may be filled with rx packets destined for the host.
10980          * If the FIFO is full, ASF will no longer function properly.
10981          *
10982          * Unintended resets have been reported on real time kernels
10983          * where the timer doesn't run on time.  Netpoll will also have
10984          * same problem.
10985          *
10986          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10987          * to check the ring condition when the heartbeat is expiring
10988          * before doing the reset.  This will prevent most unintended
10989          * resets.
10990          */
10991         if (!--tp->asf_counter) {
10992                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10993                         tg3_wait_for_event_ack(tp);
10994
10995                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10996                                       FWCMD_NICDRV_ALIVE3);
10997                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10998                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10999                                       TG3_FW_UPDATE_TIMEOUT_SEC);
11000
11001                         tg3_generate_fw_event(tp);
11002                 }
11003                 tp->asf_counter = tp->asf_multiplier;
11004         }
11005
11006         spin_unlock(&tp->lock);
11007
11008 restart_timer:
11009         tp->timer.expires = jiffies + tp->timer_offset;
11010         add_timer(&tp->timer);
11011 }
11012
11013 static void tg3_timer_init(struct tg3 *tp)
11014 {
11015         if (tg3_flag(tp, TAGGED_STATUS) &&
11016             tg3_asic_rev(tp) != ASIC_REV_5717 &&
11017             !tg3_flag(tp, 57765_CLASS))
11018                 tp->timer_offset = HZ;
11019         else
11020                 tp->timer_offset = HZ / 10;
11021
11022         BUG_ON(tp->timer_offset > HZ);
11023
11024         tp->timer_multiplier = (HZ / tp->timer_offset);
11025         tp->asf_multiplier = (HZ / tp->timer_offset) *
11026                              TG3_FW_UPDATE_FREQ_SEC;
11027
11028         init_timer(&tp->timer);
11029         tp->timer.data = (unsigned long) tp;
11030         tp->timer.function = tg3_timer;
11031 }
11032
11033 static void tg3_timer_start(struct tg3 *tp)
11034 {
11035         tp->asf_counter   = tp->asf_multiplier;
11036         tp->timer_counter = tp->timer_multiplier;
11037
11038         tp->timer.expires = jiffies + tp->timer_offset;
11039         add_timer(&tp->timer);
11040 }
11041
11042 static void tg3_timer_stop(struct tg3 *tp)
11043 {
11044         del_timer_sync(&tp->timer);
11045 }
11046
11047 /* Restart hardware after configuration changes, self-test, etc.
11048  * Invoked with tp->lock held.
11049  */
11050 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11051         __releases(tp->lock)
11052         __acquires(tp->lock)
11053 {
11054         int err;
11055
11056         err = tg3_init_hw(tp, reset_phy);
11057         if (err) {
11058                 netdev_err(tp->dev,
11059                            "Failed to re-initialize device, aborting\n");
11060                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11061                 tg3_full_unlock(tp);
11062                 tg3_timer_stop(tp);
11063                 tp->irq_sync = 0;
11064                 tg3_napi_enable(tp);
11065                 dev_close(tp->dev);
11066                 tg3_full_lock(tp, 0);
11067         }
11068         return err;
11069 }
11070
11071 static void tg3_reset_task(struct work_struct *work)
11072 {
11073         struct tg3 *tp = container_of(work, struct tg3, reset_task);
11074         int err;
11075
11076         tg3_full_lock(tp, 0);
11077
11078         if (!netif_running(tp->dev)) {
11079                 tg3_flag_clear(tp, RESET_TASK_PENDING);
11080                 tg3_full_unlock(tp);
11081                 return;
11082         }
11083
11084         tg3_full_unlock(tp);
11085
11086         tg3_phy_stop(tp);
11087
11088         tg3_netif_stop(tp);
11089
11090         tg3_full_lock(tp, 1);
11091
11092         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11093                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11094                 tp->write32_rx_mbox = tg3_write_flush_reg32;
11095                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11096                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11097         }
11098
11099         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11100         err = tg3_init_hw(tp, true);
11101         if (err)
11102                 goto out;
11103
11104         tg3_netif_start(tp);
11105
11106 out:
11107         tg3_full_unlock(tp);
11108
11109         if (!err)
11110                 tg3_phy_start(tp);
11111
11112         tg3_flag_clear(tp, RESET_TASK_PENDING);
11113 }
11114
11115 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11116 {
11117         irq_handler_t fn;
11118         unsigned long flags;
11119         char *name;
11120         struct tg3_napi *tnapi = &tp->napi[irq_num];
11121
11122         if (tp->irq_cnt == 1)
11123                 name = tp->dev->name;
11124         else {
11125                 name = &tnapi->irq_lbl[0];
11126                 if (tnapi->tx_buffers && tnapi->rx_rcb)
11127                         snprintf(name, IFNAMSIZ,
11128                                  "%s-txrx-%d", tp->dev->name, irq_num);
11129                 else if (tnapi->tx_buffers)
11130                         snprintf(name, IFNAMSIZ,
11131                                  "%s-tx-%d", tp->dev->name, irq_num);
11132                 else if (tnapi->rx_rcb)
11133                         snprintf(name, IFNAMSIZ,
11134                                  "%s-rx-%d", tp->dev->name, irq_num);
11135                 else
11136                         snprintf(name, IFNAMSIZ,
11137                                  "%s-%d", tp->dev->name, irq_num);
11138                 name[IFNAMSIZ-1] = 0;
11139         }
11140
11141         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11142                 fn = tg3_msi;
11143                 if (tg3_flag(tp, 1SHOT_MSI))
11144                         fn = tg3_msi_1shot;
11145                 flags = 0;
11146         } else {
11147                 fn = tg3_interrupt;
11148                 if (tg3_flag(tp, TAGGED_STATUS))
11149                         fn = tg3_interrupt_tagged;
11150                 flags = IRQF_SHARED;
11151         }
11152
11153         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11154 }
11155
11156 static int tg3_test_interrupt(struct tg3 *tp)
11157 {
11158         struct tg3_napi *tnapi = &tp->napi[0];
11159         struct net_device *dev = tp->dev;
11160         int err, i, intr_ok = 0;
11161         u32 val;
11162
11163         if (!netif_running(dev))
11164                 return -ENODEV;
11165
11166         tg3_disable_ints(tp);
11167
11168         free_irq(tnapi->irq_vec, tnapi);
11169
11170         /*
11171          * Turn off MSI one shot mode.  Otherwise this test has no
11172          * observable way to know whether the interrupt was delivered.
11173          */
11174         if (tg3_flag(tp, 57765_PLUS)) {
11175                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11176                 tw32(MSGINT_MODE, val);
11177         }
11178
11179         err = request_irq(tnapi->irq_vec, tg3_test_isr,
11180                           IRQF_SHARED, dev->name, tnapi);
11181         if (err)
11182                 return err;
11183
11184         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11185         tg3_enable_ints(tp);
11186
11187         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11188                tnapi->coal_now);
11189
11190         for (i = 0; i < 5; i++) {
11191                 u32 int_mbox, misc_host_ctrl;
11192
11193                 int_mbox = tr32_mailbox(tnapi->int_mbox);
11194                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11195
11196                 if ((int_mbox != 0) ||
11197                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11198                         intr_ok = 1;
11199                         break;
11200                 }
11201
11202                 if (tg3_flag(tp, 57765_PLUS) &&
11203                     tnapi->hw_status->status_tag != tnapi->last_tag)
11204                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11205
11206                 msleep(10);
11207         }
11208
11209         tg3_disable_ints(tp);
11210
11211         free_irq(tnapi->irq_vec, tnapi);
11212
11213         err = tg3_request_irq(tp, 0);
11214
11215         if (err)
11216                 return err;
11217
11218         if (intr_ok) {
11219                 /* Reenable MSI one shot mode. */
11220                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11221                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11222                         tw32(MSGINT_MODE, val);
11223                 }
11224                 return 0;
11225         }
11226
11227         return -EIO;
11228 }
11229
11230 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11231  * successfully restored
11232  */
11233 static int tg3_test_msi(struct tg3 *tp)
11234 {
11235         int err;
11236         u16 pci_cmd;
11237
11238         if (!tg3_flag(tp, USING_MSI))
11239                 return 0;
11240
11241         /* Turn off SERR reporting in case MSI terminates with Master
11242          * Abort.
11243          */
11244         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11245         pci_write_config_word(tp->pdev, PCI_COMMAND,
11246                               pci_cmd & ~PCI_COMMAND_SERR);
11247
11248         err = tg3_test_interrupt(tp);
11249
11250         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11251
11252         if (!err)
11253                 return 0;
11254
11255         /* other failures */
11256         if (err != -EIO)
11257                 return err;
11258
11259         /* MSI test failed, go back to INTx mode */
11260         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11261                     "to INTx mode. Please report this failure to the PCI "
11262                     "maintainer and include system chipset information\n");
11263
11264         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11265
11266         pci_disable_msi(tp->pdev);
11267
11268         tg3_flag_clear(tp, USING_MSI);
11269         tp->napi[0].irq_vec = tp->pdev->irq;
11270
11271         err = tg3_request_irq(tp, 0);
11272         if (err)
11273                 return err;
11274
11275         /* Need to reset the chip because the MSI cycle may have terminated
11276          * with Master Abort.
11277          */
11278         tg3_full_lock(tp, 1);
11279
11280         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11281         err = tg3_init_hw(tp, true);
11282
11283         tg3_full_unlock(tp);
11284
11285         if (err)
11286                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11287
11288         return err;
11289 }
11290
11291 static int tg3_request_firmware(struct tg3 *tp)
11292 {
11293         const struct tg3_firmware_hdr *fw_hdr;
11294
11295         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11296                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11297                            tp->fw_needed);
11298                 return -ENOENT;
11299         }
11300
11301         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11302
11303         /* Firmware blob starts with version numbers, followed by
11304          * start address and _full_ length including BSS sections
11305          * (which must be longer than the actual data, of course
11306          */
11307
11308         tp->fw_len = be32_to_cpu(fw_hdr->len);  /* includes bss */
11309         if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11310                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11311                            tp->fw_len, tp->fw_needed);
11312                 release_firmware(tp->fw);
11313                 tp->fw = NULL;
11314                 return -EINVAL;
11315         }
11316
11317         /* We no longer need firmware; we have it. */
11318         tp->fw_needed = NULL;
11319         return 0;
11320 }
11321
11322 static u32 tg3_irq_count(struct tg3 *tp)
11323 {
11324         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11325
11326         if (irq_cnt > 1) {
11327                 /* We want as many rx rings enabled as there are cpus.
11328                  * In multiqueue MSI-X mode, the first MSI-X vector
11329                  * only deals with link interrupts, etc, so we add
11330                  * one to the number of vectors we are requesting.
11331                  */
11332                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11333         }
11334
11335         return irq_cnt;
11336 }
11337
11338 static bool tg3_enable_msix(struct tg3 *tp)
11339 {
11340         int i, rc;
11341         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11342
11343         tp->txq_cnt = tp->txq_req;
11344         tp->rxq_cnt = tp->rxq_req;
11345         if (!tp->rxq_cnt)
11346                 tp->rxq_cnt = netif_get_num_default_rss_queues();
11347         if (tp->rxq_cnt > tp->rxq_max)
11348                 tp->rxq_cnt = tp->rxq_max;
11349
11350         /* Disable multiple TX rings by default.  Simple round-robin hardware
11351          * scheduling of the TX rings can cause starvation of rings with
11352          * small packets when other rings have TSO or jumbo packets.
11353          */
11354         if (!tp->txq_req)
11355                 tp->txq_cnt = 1;
11356
11357         tp->irq_cnt = tg3_irq_count(tp);
11358
11359         for (i = 0; i < tp->irq_max; i++) {
11360                 msix_ent[i].entry  = i;
11361                 msix_ent[i].vector = 0;
11362         }
11363
11364         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
11365         if (rc < 0) {
11366                 return false;
11367         } else if (rc != 0) {
11368                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
11369                         return false;
11370                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11371                               tp->irq_cnt, rc);
11372                 tp->irq_cnt = rc;
11373                 tp->rxq_cnt = max(rc - 1, 1);
11374                 if (tp->txq_cnt)
11375                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11376         }
11377
11378         for (i = 0; i < tp->irq_max; i++)
11379                 tp->napi[i].irq_vec = msix_ent[i].vector;
11380
11381         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11382                 pci_disable_msix(tp->pdev);
11383                 return false;
11384         }
11385
11386         if (tp->irq_cnt == 1)
11387                 return true;
11388
11389         tg3_flag_set(tp, ENABLE_RSS);
11390
11391         if (tp->txq_cnt > 1)
11392                 tg3_flag_set(tp, ENABLE_TSS);
11393
11394         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11395
11396         return true;
11397 }
11398
11399 static void tg3_ints_init(struct tg3 *tp)
11400 {
11401         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11402             !tg3_flag(tp, TAGGED_STATUS)) {
11403                 /* All MSI supporting chips should support tagged
11404                  * status.  Assert that this is the case.
11405                  */
11406                 netdev_warn(tp->dev,
11407                             "MSI without TAGGED_STATUS? Not using MSI\n");
11408                 goto defcfg;
11409         }
11410
11411         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11412                 tg3_flag_set(tp, USING_MSIX);
11413         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11414                 tg3_flag_set(tp, USING_MSI);
11415
11416         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11417                 u32 msi_mode = tr32(MSGINT_MODE);
11418                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11419                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11420                 if (!tg3_flag(tp, 1SHOT_MSI))
11421                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11422                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11423         }
11424 defcfg:
11425         if (!tg3_flag(tp, USING_MSIX)) {
11426                 tp->irq_cnt = 1;
11427                 tp->napi[0].irq_vec = tp->pdev->irq;
11428         }
11429
11430         if (tp->irq_cnt == 1) {
11431                 tp->txq_cnt = 1;
11432                 tp->rxq_cnt = 1;
11433                 netif_set_real_num_tx_queues(tp->dev, 1);
11434                 netif_set_real_num_rx_queues(tp->dev, 1);
11435         }
11436 }
11437
11438 static void tg3_ints_fini(struct tg3 *tp)
11439 {
11440         if (tg3_flag(tp, USING_MSIX))
11441                 pci_disable_msix(tp->pdev);
11442         else if (tg3_flag(tp, USING_MSI))
11443                 pci_disable_msi(tp->pdev);
11444         tg3_flag_clear(tp, USING_MSI);
11445         tg3_flag_clear(tp, USING_MSIX);
11446         tg3_flag_clear(tp, ENABLE_RSS);
11447         tg3_flag_clear(tp, ENABLE_TSS);
11448 }
11449
11450 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11451                      bool init)
11452 {
11453         struct net_device *dev = tp->dev;
11454         int i, err;
11455
11456         /*
11457          * Setup interrupts first so we know how
11458          * many NAPI resources to allocate
11459          */
11460         tg3_ints_init(tp);
11461
11462         tg3_rss_check_indir_tbl(tp);
11463
11464         /* The placement of this call is tied
11465          * to the setup and use of Host TX descriptors.
11466          */
11467         err = tg3_alloc_consistent(tp);
11468         if (err)
11469                 goto out_ints_fini;
11470
11471         tg3_napi_init(tp);
11472
11473         tg3_napi_enable(tp);
11474
11475         for (i = 0; i < tp->irq_cnt; i++) {
11476                 struct tg3_napi *tnapi = &tp->napi[i];
11477                 err = tg3_request_irq(tp, i);
11478                 if (err) {
11479                         for (i--; i >= 0; i--) {
11480                                 tnapi = &tp->napi[i];
11481                                 free_irq(tnapi->irq_vec, tnapi);
11482                         }
11483                         goto out_napi_fini;
11484                 }
11485         }
11486
11487         tg3_full_lock(tp, 0);
11488
11489         if (init)
11490                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11491
11492         err = tg3_init_hw(tp, reset_phy);
11493         if (err) {
11494                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11495                 tg3_free_rings(tp);
11496         }
11497
11498         tg3_full_unlock(tp);
11499
11500         if (err)
11501                 goto out_free_irq;
11502
11503         if (test_irq && tg3_flag(tp, USING_MSI)) {
11504                 err = tg3_test_msi(tp);
11505
11506                 if (err) {
11507                         tg3_full_lock(tp, 0);
11508                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11509                         tg3_free_rings(tp);
11510                         tg3_full_unlock(tp);
11511
11512                         goto out_napi_fini;
11513                 }
11514
11515                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11516                         u32 val = tr32(PCIE_TRANSACTION_CFG);
11517
11518                         tw32(PCIE_TRANSACTION_CFG,
11519                              val | PCIE_TRANS_CFG_1SHOT_MSI);
11520                 }
11521         }
11522
11523         tg3_phy_start(tp);
11524
11525         tg3_hwmon_open(tp);
11526
11527         tg3_full_lock(tp, 0);
11528
11529         tg3_timer_start(tp);
11530         tg3_flag_set(tp, INIT_COMPLETE);
11531         tg3_enable_ints(tp);
11532
11533         if (init)
11534                 tg3_ptp_init(tp);
11535         else
11536                 tg3_ptp_resume(tp);
11537
11538
11539         tg3_full_unlock(tp);
11540
11541         netif_tx_start_all_queues(dev);
11542
11543         /*
11544          * Reset loopback feature if it was turned on while the device was down
11545          * make sure that it's installed properly now.
11546          */
11547         if (dev->features & NETIF_F_LOOPBACK)
11548                 tg3_set_loopback(dev, dev->features);
11549
11550         return 0;
11551
11552 out_free_irq:
11553         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11554                 struct tg3_napi *tnapi = &tp->napi[i];
11555                 free_irq(tnapi->irq_vec, tnapi);
11556         }
11557
11558 out_napi_fini:
11559         tg3_napi_disable(tp);
11560         tg3_napi_fini(tp);
11561         tg3_free_consistent(tp);
11562
11563 out_ints_fini:
11564         tg3_ints_fini(tp);
11565
11566         return err;
11567 }
11568
11569 static void tg3_stop(struct tg3 *tp)
11570 {
11571         int i;
11572
11573         tg3_reset_task_cancel(tp);
11574         tg3_netif_stop(tp);
11575
11576         tg3_timer_stop(tp);
11577
11578         tg3_hwmon_close(tp);
11579
11580         tg3_phy_stop(tp);
11581
11582         tg3_full_lock(tp, 1);
11583
11584         tg3_disable_ints(tp);
11585
11586         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11587         tg3_free_rings(tp);
11588         tg3_flag_clear(tp, INIT_COMPLETE);
11589
11590         tg3_full_unlock(tp);
11591
11592         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11593                 struct tg3_napi *tnapi = &tp->napi[i];
11594                 free_irq(tnapi->irq_vec, tnapi);
11595         }
11596
11597         tg3_ints_fini(tp);
11598
11599         tg3_napi_fini(tp);
11600
11601         tg3_free_consistent(tp);
11602 }
11603
11604 static int tg3_open(struct net_device *dev)
11605 {
11606         struct tg3 *tp = netdev_priv(dev);
11607         int err;
11608
11609         if (tp->fw_needed) {
11610                 err = tg3_request_firmware(tp);
11611                 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11612                         if (err) {
11613                                 netdev_warn(tp->dev, "EEE capability disabled\n");
11614                                 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11615                         } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11616                                 netdev_warn(tp->dev, "EEE capability restored\n");
11617                                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11618                         }
11619                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11620                         if (err)
11621                                 return err;
11622                 } else if (err) {
11623                         netdev_warn(tp->dev, "TSO capability disabled\n");
11624                         tg3_flag_clear(tp, TSO_CAPABLE);
11625                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11626                         netdev_notice(tp->dev, "TSO capability restored\n");
11627                         tg3_flag_set(tp, TSO_CAPABLE);
11628                 }
11629         }
11630
11631         tg3_carrier_off(tp);
11632
11633         err = tg3_power_up(tp);
11634         if (err)
11635                 return err;
11636
11637         tg3_full_lock(tp, 0);
11638
11639         tg3_disable_ints(tp);
11640         tg3_flag_clear(tp, INIT_COMPLETE);
11641
11642         tg3_full_unlock(tp);
11643
11644         err = tg3_start(tp,
11645                         !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11646                         true, true);
11647         if (err) {
11648                 tg3_frob_aux_power(tp, false);
11649                 pci_set_power_state(tp->pdev, PCI_D3hot);
11650         }
11651
11652         if (tg3_flag(tp, PTP_CAPABLE)) {
11653                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11654                                                    &tp->pdev->dev);
11655                 if (IS_ERR(tp->ptp_clock))
11656                         tp->ptp_clock = NULL;
11657         }
11658
11659         return err;
11660 }
11661
11662 static int tg3_close(struct net_device *dev)
11663 {
11664         struct tg3 *tp = netdev_priv(dev);
11665
11666         tg3_ptp_fini(tp);
11667
11668         tg3_stop(tp);
11669
11670         /* Clear stats across close / open calls */
11671         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11672         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11673
11674         if (pci_device_is_present(tp->pdev)) {
11675                 tg3_power_down_prepare(tp);
11676
11677                 tg3_carrier_off(tp);
11678         }
11679         return 0;
11680 }
11681
11682 static inline u64 get_stat64(tg3_stat64_t *val)
11683 {
11684        return ((u64)val->high << 32) | ((u64)val->low);
11685 }
11686
11687 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11688 {
11689         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11690
11691         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11692             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11693              tg3_asic_rev(tp) == ASIC_REV_5701)) {
11694                 u32 val;
11695
11696                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11697                         tg3_writephy(tp, MII_TG3_TEST1,
11698                                      val | MII_TG3_TEST1_CRC_EN);
11699                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11700                 } else
11701                         val = 0;
11702
11703                 tp->phy_crc_errors += val;
11704
11705                 return tp->phy_crc_errors;
11706         }
11707
11708         return get_stat64(&hw_stats->rx_fcs_errors);
11709 }
11710
11711 #define ESTAT_ADD(member) \
11712         estats->member =        old_estats->member + \
11713                                 get_stat64(&hw_stats->member)
11714
11715 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11716 {
11717         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11718         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11719
11720         ESTAT_ADD(rx_octets);
11721         ESTAT_ADD(rx_fragments);
11722         ESTAT_ADD(rx_ucast_packets);
11723         ESTAT_ADD(rx_mcast_packets);
11724         ESTAT_ADD(rx_bcast_packets);
11725         ESTAT_ADD(rx_fcs_errors);
11726         ESTAT_ADD(rx_align_errors);
11727         ESTAT_ADD(rx_xon_pause_rcvd);
11728         ESTAT_ADD(rx_xoff_pause_rcvd);
11729         ESTAT_ADD(rx_mac_ctrl_rcvd);
11730         ESTAT_ADD(rx_xoff_entered);
11731         ESTAT_ADD(rx_frame_too_long_errors);
11732         ESTAT_ADD(rx_jabbers);
11733         ESTAT_ADD(rx_undersize_packets);
11734         ESTAT_ADD(rx_in_length_errors);
11735         ESTAT_ADD(rx_out_length_errors);
11736         ESTAT_ADD(rx_64_or_less_octet_packets);
11737         ESTAT_ADD(rx_65_to_127_octet_packets);
11738         ESTAT_ADD(rx_128_to_255_octet_packets);
11739         ESTAT_ADD(rx_256_to_511_octet_packets);
11740         ESTAT_ADD(rx_512_to_1023_octet_packets);
11741         ESTAT_ADD(rx_1024_to_1522_octet_packets);
11742         ESTAT_ADD(rx_1523_to_2047_octet_packets);
11743         ESTAT_ADD(rx_2048_to_4095_octet_packets);
11744         ESTAT_ADD(rx_4096_to_8191_octet_packets);
11745         ESTAT_ADD(rx_8192_to_9022_octet_packets);
11746
11747         ESTAT_ADD(tx_octets);
11748         ESTAT_ADD(tx_collisions);
11749         ESTAT_ADD(tx_xon_sent);
11750         ESTAT_ADD(tx_xoff_sent);
11751         ESTAT_ADD(tx_flow_control);
11752         ESTAT_ADD(tx_mac_errors);
11753         ESTAT_ADD(tx_single_collisions);
11754         ESTAT_ADD(tx_mult_collisions);
11755         ESTAT_ADD(tx_deferred);
11756         ESTAT_ADD(tx_excessive_collisions);
11757         ESTAT_ADD(tx_late_collisions);
11758         ESTAT_ADD(tx_collide_2times);
11759         ESTAT_ADD(tx_collide_3times);
11760         ESTAT_ADD(tx_collide_4times);
11761         ESTAT_ADD(tx_collide_5times);
11762         ESTAT_ADD(tx_collide_6times);
11763         ESTAT_ADD(tx_collide_7times);
11764         ESTAT_ADD(tx_collide_8times);
11765         ESTAT_ADD(tx_collide_9times);
11766         ESTAT_ADD(tx_collide_10times);
11767         ESTAT_ADD(tx_collide_11times);
11768         ESTAT_ADD(tx_collide_12times);
11769         ESTAT_ADD(tx_collide_13times);
11770         ESTAT_ADD(tx_collide_14times);
11771         ESTAT_ADD(tx_collide_15times);
11772         ESTAT_ADD(tx_ucast_packets);
11773         ESTAT_ADD(tx_mcast_packets);
11774         ESTAT_ADD(tx_bcast_packets);
11775         ESTAT_ADD(tx_carrier_sense_errors);
11776         ESTAT_ADD(tx_discards);
11777         ESTAT_ADD(tx_errors);
11778
11779         ESTAT_ADD(dma_writeq_full);
11780         ESTAT_ADD(dma_write_prioq_full);
11781         ESTAT_ADD(rxbds_empty);
11782         ESTAT_ADD(rx_discards);
11783         ESTAT_ADD(rx_errors);
11784         ESTAT_ADD(rx_threshold_hit);
11785
11786         ESTAT_ADD(dma_readq_full);
11787         ESTAT_ADD(dma_read_prioq_full);
11788         ESTAT_ADD(tx_comp_queue_full);
11789
11790         ESTAT_ADD(ring_set_send_prod_index);
11791         ESTAT_ADD(ring_status_update);
11792         ESTAT_ADD(nic_irqs);
11793         ESTAT_ADD(nic_avoided_irqs);
11794         ESTAT_ADD(nic_tx_threshold_hit);
11795
11796         ESTAT_ADD(mbuf_lwm_thresh_hit);
11797 }
11798
11799 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11800 {
11801         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11802         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11803
11804         stats->rx_packets = old_stats->rx_packets +
11805                 get_stat64(&hw_stats->rx_ucast_packets) +
11806                 get_stat64(&hw_stats->rx_mcast_packets) +
11807                 get_stat64(&hw_stats->rx_bcast_packets);
11808
11809         stats->tx_packets = old_stats->tx_packets +
11810                 get_stat64(&hw_stats->tx_ucast_packets) +
11811                 get_stat64(&hw_stats->tx_mcast_packets) +
11812                 get_stat64(&hw_stats->tx_bcast_packets);
11813
11814         stats->rx_bytes = old_stats->rx_bytes +
11815                 get_stat64(&hw_stats->rx_octets);
11816         stats->tx_bytes = old_stats->tx_bytes +
11817                 get_stat64(&hw_stats->tx_octets);
11818
11819         stats->rx_errors = old_stats->rx_errors +
11820                 get_stat64(&hw_stats->rx_errors);
11821         stats->tx_errors = old_stats->tx_errors +
11822                 get_stat64(&hw_stats->tx_errors) +
11823                 get_stat64(&hw_stats->tx_mac_errors) +
11824                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11825                 get_stat64(&hw_stats->tx_discards);
11826
11827         stats->multicast = old_stats->multicast +
11828                 get_stat64(&hw_stats->rx_mcast_packets);
11829         stats->collisions = old_stats->collisions +
11830                 get_stat64(&hw_stats->tx_collisions);
11831
11832         stats->rx_length_errors = old_stats->rx_length_errors +
11833                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11834                 get_stat64(&hw_stats->rx_undersize_packets);
11835
11836         stats->rx_frame_errors = old_stats->rx_frame_errors +
11837                 get_stat64(&hw_stats->rx_align_errors);
11838         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11839                 get_stat64(&hw_stats->tx_discards);
11840         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11841                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11842
11843         stats->rx_crc_errors = old_stats->rx_crc_errors +
11844                 tg3_calc_crc_errors(tp);
11845
11846         stats->rx_missed_errors = old_stats->rx_missed_errors +
11847                 get_stat64(&hw_stats->rx_discards);
11848
11849         stats->rx_dropped = tp->rx_dropped;
11850         stats->tx_dropped = tp->tx_dropped;
11851 }
11852
11853 static int tg3_get_regs_len(struct net_device *dev)
11854 {
11855         return TG3_REG_BLK_SIZE;
11856 }
11857
11858 static void tg3_get_regs(struct net_device *dev,
11859                 struct ethtool_regs *regs, void *_p)
11860 {
11861         struct tg3 *tp = netdev_priv(dev);
11862
11863         regs->version = 0;
11864
11865         memset(_p, 0, TG3_REG_BLK_SIZE);
11866
11867         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11868                 return;
11869
11870         tg3_full_lock(tp, 0);
11871
11872         tg3_dump_legacy_regs(tp, (u32 *)_p);
11873
11874         tg3_full_unlock(tp);
11875 }
11876
11877 static int tg3_get_eeprom_len(struct net_device *dev)
11878 {
11879         struct tg3 *tp = netdev_priv(dev);
11880
11881         return tp->nvram_size;
11882 }
11883
11884 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11885 {
11886         struct tg3 *tp = netdev_priv(dev);
11887         int ret;
11888         u8  *pd;
11889         u32 i, offset, len, b_offset, b_count;
11890         __be32 val;
11891
11892         if (tg3_flag(tp, NO_NVRAM))
11893                 return -EINVAL;
11894
11895         offset = eeprom->offset;
11896         len = eeprom->len;
11897         eeprom->len = 0;
11898
11899         eeprom->magic = TG3_EEPROM_MAGIC;
11900
11901         if (offset & 3) {
11902                 /* adjustments to start on required 4 byte boundary */
11903                 b_offset = offset & 3;
11904                 b_count = 4 - b_offset;
11905                 if (b_count > len) {
11906                         /* i.e. offset=1 len=2 */
11907                         b_count = len;
11908                 }
11909                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11910                 if (ret)
11911                         return ret;
11912                 memcpy(data, ((char *)&val) + b_offset, b_count);
11913                 len -= b_count;
11914                 offset += b_count;
11915                 eeprom->len += b_count;
11916         }
11917
11918         /* read bytes up to the last 4 byte boundary */
11919         pd = &data[eeprom->len];
11920         for (i = 0; i < (len - (len & 3)); i += 4) {
11921                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11922                 if (ret) {
11923                         eeprom->len += i;
11924                         return ret;
11925                 }
11926                 memcpy(pd + i, &val, 4);
11927         }
11928         eeprom->len += i;
11929
11930         if (len & 3) {
11931                 /* read last bytes not ending on 4 byte boundary */
11932                 pd = &data[eeprom->len];
11933                 b_count = len & 3;
11934                 b_offset = offset + len - b_count;
11935                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11936                 if (ret)
11937                         return ret;
11938                 memcpy(pd, &val, b_count);
11939                 eeprom->len += b_count;
11940         }
11941         return 0;
11942 }
11943
11944 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11945 {
11946         struct tg3 *tp = netdev_priv(dev);
11947         int ret;
11948         u32 offset, len, b_offset, odd_len;
11949         u8 *buf;
11950         __be32 start, end;
11951
11952         if (tg3_flag(tp, NO_NVRAM) ||
11953             eeprom->magic != TG3_EEPROM_MAGIC)
11954                 return -EINVAL;
11955
11956         offset = eeprom->offset;
11957         len = eeprom->len;
11958
11959         if ((b_offset = (offset & 3))) {
11960                 /* adjustments to start on required 4 byte boundary */
11961                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11962                 if (ret)
11963                         return ret;
11964                 len += b_offset;
11965                 offset &= ~3;
11966                 if (len < 4)
11967                         len = 4;
11968         }
11969
11970         odd_len = 0;
11971         if (len & 3) {
11972                 /* adjustments to end on required 4 byte boundary */
11973                 odd_len = 1;
11974                 len = (len + 3) & ~3;
11975                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11976                 if (ret)
11977                         return ret;
11978         }
11979
11980         buf = data;
11981         if (b_offset || odd_len) {
11982                 buf = kmalloc(len, GFP_KERNEL);
11983                 if (!buf)
11984                         return -ENOMEM;
11985                 if (b_offset)
11986                         memcpy(buf, &start, 4);
11987                 if (odd_len)
11988                         memcpy(buf+len-4, &end, 4);
11989                 memcpy(buf + b_offset, data, eeprom->len);
11990         }
11991
11992         ret = tg3_nvram_write_block(tp, offset, len, buf);
11993
11994         if (buf != data)
11995                 kfree(buf);
11996
11997         return ret;
11998 }
11999
12000 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
12001 {
12002         struct tg3 *tp = netdev_priv(dev);
12003
12004         if (tg3_flag(tp, USE_PHYLIB)) {
12005                 struct phy_device *phydev;
12006                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12007                         return -EAGAIN;
12008                 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
12009                 return phy_ethtool_gset(phydev, cmd);
12010         }
12011
12012         cmd->supported = (SUPPORTED_Autoneg);
12013
12014         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12015                 cmd->supported |= (SUPPORTED_1000baseT_Half |
12016                                    SUPPORTED_1000baseT_Full);
12017
12018         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12019                 cmd->supported |= (SUPPORTED_100baseT_Half |
12020                                   SUPPORTED_100baseT_Full |
12021                                   SUPPORTED_10baseT_Half |
12022                                   SUPPORTED_10baseT_Full |
12023                                   SUPPORTED_TP);
12024                 cmd->port = PORT_TP;
12025         } else {
12026                 cmd->supported |= SUPPORTED_FIBRE;
12027                 cmd->port = PORT_FIBRE;
12028         }
12029
12030         cmd->advertising = tp->link_config.advertising;
12031         if (tg3_flag(tp, PAUSE_AUTONEG)) {
12032                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12033                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12034                                 cmd->advertising |= ADVERTISED_Pause;
12035                         } else {
12036                                 cmd->advertising |= ADVERTISED_Pause |
12037                                                     ADVERTISED_Asym_Pause;
12038                         }
12039                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12040                         cmd->advertising |= ADVERTISED_Asym_Pause;
12041                 }
12042         }
12043         if (netif_running(dev) && tp->link_up) {
12044                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
12045                 cmd->duplex = tp->link_config.active_duplex;
12046                 cmd->lp_advertising = tp->link_config.rmt_adv;
12047                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12048                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12049                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
12050                         else
12051                                 cmd->eth_tp_mdix = ETH_TP_MDI;
12052                 }
12053         } else {
12054                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
12055                 cmd->duplex = DUPLEX_UNKNOWN;
12056                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
12057         }
12058         cmd->phy_address = tp->phy_addr;
12059         cmd->transceiver = XCVR_INTERNAL;
12060         cmd->autoneg = tp->link_config.autoneg;
12061         cmd->maxtxpkt = 0;
12062         cmd->maxrxpkt = 0;
12063         return 0;
12064 }
12065
12066 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
12067 {
12068         struct tg3 *tp = netdev_priv(dev);
12069         u32 speed = ethtool_cmd_speed(cmd);
12070
12071         if (tg3_flag(tp, USE_PHYLIB)) {
12072                 struct phy_device *phydev;
12073                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12074                         return -EAGAIN;
12075                 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
12076                 return phy_ethtool_sset(phydev, cmd);
12077         }
12078
12079         if (cmd->autoneg != AUTONEG_ENABLE &&
12080             cmd->autoneg != AUTONEG_DISABLE)
12081                 return -EINVAL;
12082
12083         if (cmd->autoneg == AUTONEG_DISABLE &&
12084             cmd->duplex != DUPLEX_FULL &&
12085             cmd->duplex != DUPLEX_HALF)
12086                 return -EINVAL;
12087
12088         if (cmd->autoneg == AUTONEG_ENABLE) {
12089                 u32 mask = ADVERTISED_Autoneg |
12090                            ADVERTISED_Pause |
12091                            ADVERTISED_Asym_Pause;
12092
12093                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12094                         mask |= ADVERTISED_1000baseT_Half |
12095                                 ADVERTISED_1000baseT_Full;
12096
12097                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12098                         mask |= ADVERTISED_100baseT_Half |
12099                                 ADVERTISED_100baseT_Full |
12100                                 ADVERTISED_10baseT_Half |
12101                                 ADVERTISED_10baseT_Full |
12102                                 ADVERTISED_TP;
12103                 else
12104                         mask |= ADVERTISED_FIBRE;
12105
12106                 if (cmd->advertising & ~mask)
12107                         return -EINVAL;
12108
12109                 mask &= (ADVERTISED_1000baseT_Half |
12110                          ADVERTISED_1000baseT_Full |
12111                          ADVERTISED_100baseT_Half |
12112                          ADVERTISED_100baseT_Full |
12113                          ADVERTISED_10baseT_Half |
12114                          ADVERTISED_10baseT_Full);
12115
12116                 cmd->advertising &= mask;
12117         } else {
12118                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12119                         if (speed != SPEED_1000)
12120                                 return -EINVAL;
12121
12122                         if (cmd->duplex != DUPLEX_FULL)
12123                                 return -EINVAL;
12124                 } else {
12125                         if (speed != SPEED_100 &&
12126                             speed != SPEED_10)
12127                                 return -EINVAL;
12128                 }
12129         }
12130
12131         tg3_full_lock(tp, 0);
12132
12133         tp->link_config.autoneg = cmd->autoneg;
12134         if (cmd->autoneg == AUTONEG_ENABLE) {
12135                 tp->link_config.advertising = (cmd->advertising |
12136                                               ADVERTISED_Autoneg);
12137                 tp->link_config.speed = SPEED_UNKNOWN;
12138                 tp->link_config.duplex = DUPLEX_UNKNOWN;
12139         } else {
12140                 tp->link_config.advertising = 0;
12141                 tp->link_config.speed = speed;
12142                 tp->link_config.duplex = cmd->duplex;
12143         }
12144
12145         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12146
12147         tg3_warn_mgmt_link_flap(tp);
12148
12149         if (netif_running(dev))
12150                 tg3_setup_phy(tp, true);
12151
12152         tg3_full_unlock(tp);
12153
12154         return 0;
12155 }
12156
12157 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12158 {
12159         struct tg3 *tp = netdev_priv(dev);
12160
12161         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12162         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12163         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12164         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12165 }
12166
12167 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12168 {
12169         struct tg3 *tp = netdev_priv(dev);
12170
12171         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12172                 wol->supported = WAKE_MAGIC;
12173         else
12174                 wol->supported = 0;
12175         wol->wolopts = 0;
12176         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12177                 wol->wolopts = WAKE_MAGIC;
12178         memset(&wol->sopass, 0, sizeof(wol->sopass));
12179 }
12180
12181 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12182 {
12183         struct tg3 *tp = netdev_priv(dev);
12184         struct device *dp = &tp->pdev->dev;
12185
12186         if (wol->wolopts & ~WAKE_MAGIC)
12187                 return -EINVAL;
12188         if ((wol->wolopts & WAKE_MAGIC) &&
12189             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12190                 return -EINVAL;
12191
12192         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12193
12194         if (device_may_wakeup(dp))
12195                 tg3_flag_set(tp, WOL_ENABLE);
12196         else
12197                 tg3_flag_clear(tp, WOL_ENABLE);
12198
12199         return 0;
12200 }
12201
12202 static u32 tg3_get_msglevel(struct net_device *dev)
12203 {
12204         struct tg3 *tp = netdev_priv(dev);
12205         return tp->msg_enable;
12206 }
12207
12208 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12209 {
12210         struct tg3 *tp = netdev_priv(dev);
12211         tp->msg_enable = value;
12212 }
12213
12214 static int tg3_nway_reset(struct net_device *dev)
12215 {
12216         struct tg3 *tp = netdev_priv(dev);
12217         int r;
12218
12219         if (!netif_running(dev))
12220                 return -EAGAIN;
12221
12222         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12223                 return -EINVAL;
12224
12225         tg3_warn_mgmt_link_flap(tp);
12226
12227         if (tg3_flag(tp, USE_PHYLIB)) {
12228                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12229                         return -EAGAIN;
12230                 r = phy_start_aneg(tp->mdio_bus->phy_map[tp->phy_addr]);
12231         } else {
12232                 u32 bmcr;
12233
12234                 spin_lock_bh(&tp->lock);
12235                 r = -EINVAL;
12236                 tg3_readphy(tp, MII_BMCR, &bmcr);
12237                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12238                     ((bmcr & BMCR_ANENABLE) ||
12239                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12240                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12241                                                    BMCR_ANENABLE);
12242                         r = 0;
12243                 }
12244                 spin_unlock_bh(&tp->lock);
12245         }
12246
12247         return r;
12248 }
12249
12250 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12251 {
12252         struct tg3 *tp = netdev_priv(dev);
12253
12254         ering->rx_max_pending = tp->rx_std_ring_mask;
12255         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12256                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12257         else
12258                 ering->rx_jumbo_max_pending = 0;
12259
12260         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12261
12262         ering->rx_pending = tp->rx_pending;
12263         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12264                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12265         else
12266                 ering->rx_jumbo_pending = 0;
12267
12268         ering->tx_pending = tp->napi[0].tx_pending;
12269 }
12270
12271 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12272 {
12273         struct tg3 *tp = netdev_priv(dev);
12274         int i, irq_sync = 0, err = 0;
12275
12276         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12277             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12278             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12279             (ering->tx_pending <= MAX_SKB_FRAGS) ||
12280             (tg3_flag(tp, TSO_BUG) &&
12281              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12282                 return -EINVAL;
12283
12284         if (netif_running(dev)) {
12285                 tg3_phy_stop(tp);
12286                 tg3_netif_stop(tp);
12287                 irq_sync = 1;
12288         }
12289
12290         tg3_full_lock(tp, irq_sync);
12291
12292         tp->rx_pending = ering->rx_pending;
12293
12294         if (tg3_flag(tp, MAX_RXPEND_64) &&
12295             tp->rx_pending > 63)
12296                 tp->rx_pending = 63;
12297         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12298
12299         for (i = 0; i < tp->irq_max; i++)
12300                 tp->napi[i].tx_pending = ering->tx_pending;
12301
12302         if (netif_running(dev)) {
12303                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12304                 err = tg3_restart_hw(tp, false);
12305                 if (!err)
12306                         tg3_netif_start(tp);
12307         }
12308
12309         tg3_full_unlock(tp);
12310
12311         if (irq_sync && !err)
12312                 tg3_phy_start(tp);
12313
12314         return err;
12315 }
12316
12317 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12318 {
12319         struct tg3 *tp = netdev_priv(dev);
12320
12321         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12322
12323         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12324                 epause->rx_pause = 1;
12325         else
12326                 epause->rx_pause = 0;
12327
12328         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12329                 epause->tx_pause = 1;
12330         else
12331                 epause->tx_pause = 0;
12332 }
12333
12334 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12335 {
12336         struct tg3 *tp = netdev_priv(dev);
12337         int err = 0;
12338
12339         if (tp->link_config.autoneg == AUTONEG_ENABLE)
12340                 tg3_warn_mgmt_link_flap(tp);
12341
12342         if (tg3_flag(tp, USE_PHYLIB)) {
12343                 u32 newadv;
12344                 struct phy_device *phydev;
12345
12346                 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
12347
12348                 if (!(phydev->supported & SUPPORTED_Pause) ||
12349                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12350                      (epause->rx_pause != epause->tx_pause)))
12351                         return -EINVAL;
12352
12353                 tp->link_config.flowctrl = 0;
12354                 if (epause->rx_pause) {
12355                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12356
12357                         if (epause->tx_pause) {
12358                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12359                                 newadv = ADVERTISED_Pause;
12360                         } else
12361                                 newadv = ADVERTISED_Pause |
12362                                          ADVERTISED_Asym_Pause;
12363                 } else if (epause->tx_pause) {
12364                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12365                         newadv = ADVERTISED_Asym_Pause;
12366                 } else
12367                         newadv = 0;
12368
12369                 if (epause->autoneg)
12370                         tg3_flag_set(tp, PAUSE_AUTONEG);
12371                 else
12372                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12373
12374                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12375                         u32 oldadv = phydev->advertising &
12376                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12377                         if (oldadv != newadv) {
12378                                 phydev->advertising &=
12379                                         ~(ADVERTISED_Pause |
12380                                           ADVERTISED_Asym_Pause);
12381                                 phydev->advertising |= newadv;
12382                                 if (phydev->autoneg) {
12383                                         /*
12384                                          * Always renegotiate the link to
12385                                          * inform our link partner of our
12386                                          * flow control settings, even if the
12387                                          * flow control is forced.  Let
12388                                          * tg3_adjust_link() do the final
12389                                          * flow control setup.
12390                                          */
12391                                         return phy_start_aneg(phydev);
12392                                 }
12393                         }
12394
12395                         if (!epause->autoneg)
12396                                 tg3_setup_flow_control(tp, 0, 0);
12397                 } else {
12398                         tp->link_config.advertising &=
12399                                         ~(ADVERTISED_Pause |
12400                                           ADVERTISED_Asym_Pause);
12401                         tp->link_config.advertising |= newadv;
12402                 }
12403         } else {
12404                 int irq_sync = 0;
12405
12406                 if (netif_running(dev)) {
12407                         tg3_netif_stop(tp);
12408                         irq_sync = 1;
12409                 }
12410
12411                 tg3_full_lock(tp, irq_sync);
12412
12413                 if (epause->autoneg)
12414                         tg3_flag_set(tp, PAUSE_AUTONEG);
12415                 else
12416                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12417                 if (epause->rx_pause)
12418                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12419                 else
12420                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12421                 if (epause->tx_pause)
12422                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12423                 else
12424                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12425
12426                 if (netif_running(dev)) {
12427                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12428                         err = tg3_restart_hw(tp, false);
12429                         if (!err)
12430                                 tg3_netif_start(tp);
12431                 }
12432
12433                 tg3_full_unlock(tp);
12434         }
12435
12436         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12437
12438         return err;
12439 }
12440
12441 static int tg3_get_sset_count(struct net_device *dev, int sset)
12442 {
12443         switch (sset) {
12444         case ETH_SS_TEST:
12445                 return TG3_NUM_TEST;
12446         case ETH_SS_STATS:
12447                 return TG3_NUM_STATS;
12448         default:
12449                 return -EOPNOTSUPP;
12450         }
12451 }
12452
12453 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12454                          u32 *rules __always_unused)
12455 {
12456         struct tg3 *tp = netdev_priv(dev);
12457
12458         if (!tg3_flag(tp, SUPPORT_MSIX))
12459                 return -EOPNOTSUPP;
12460
12461         switch (info->cmd) {
12462         case ETHTOOL_GRXRINGS:
12463                 if (netif_running(tp->dev))
12464                         info->data = tp->rxq_cnt;
12465                 else {
12466                         info->data = num_online_cpus();
12467                         if (info->data > TG3_RSS_MAX_NUM_QS)
12468                                 info->data = TG3_RSS_MAX_NUM_QS;
12469                 }
12470
12471                 /* The first interrupt vector only
12472                  * handles link interrupts.
12473                  */
12474                 info->data -= 1;
12475                 return 0;
12476
12477         default:
12478                 return -EOPNOTSUPP;
12479         }
12480 }
12481
12482 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12483 {
12484         u32 size = 0;
12485         struct tg3 *tp = netdev_priv(dev);
12486
12487         if (tg3_flag(tp, SUPPORT_MSIX))
12488                 size = TG3_RSS_INDIR_TBL_SIZE;
12489
12490         return size;
12491 }
12492
12493 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
12494 {
12495         struct tg3 *tp = netdev_priv(dev);
12496         int i;
12497
12498         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12499                 indir[i] = tp->rss_ind_tbl[i];
12500
12501         return 0;
12502 }
12503
12504 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
12505 {
12506         struct tg3 *tp = netdev_priv(dev);
12507         size_t i;
12508
12509         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12510                 tp->rss_ind_tbl[i] = indir[i];
12511
12512         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12513                 return 0;
12514
12515         /* It is legal to write the indirection
12516          * table while the device is running.
12517          */
12518         tg3_full_lock(tp, 0);
12519         tg3_rss_write_indir_tbl(tp);
12520         tg3_full_unlock(tp);
12521
12522         return 0;
12523 }
12524
12525 static void tg3_get_channels(struct net_device *dev,
12526                              struct ethtool_channels *channel)
12527 {
12528         struct tg3 *tp = netdev_priv(dev);
12529         u32 deflt_qs = netif_get_num_default_rss_queues();
12530
12531         channel->max_rx = tp->rxq_max;
12532         channel->max_tx = tp->txq_max;
12533
12534         if (netif_running(dev)) {
12535                 channel->rx_count = tp->rxq_cnt;
12536                 channel->tx_count = tp->txq_cnt;
12537         } else {
12538                 if (tp->rxq_req)
12539                         channel->rx_count = tp->rxq_req;
12540                 else
12541                         channel->rx_count = min(deflt_qs, tp->rxq_max);
12542
12543                 if (tp->txq_req)
12544                         channel->tx_count = tp->txq_req;
12545                 else
12546                         channel->tx_count = min(deflt_qs, tp->txq_max);
12547         }
12548 }
12549
12550 static int tg3_set_channels(struct net_device *dev,
12551                             struct ethtool_channels *channel)
12552 {
12553         struct tg3 *tp = netdev_priv(dev);
12554
12555         if (!tg3_flag(tp, SUPPORT_MSIX))
12556                 return -EOPNOTSUPP;
12557
12558         if (channel->rx_count > tp->rxq_max ||
12559             channel->tx_count > tp->txq_max)
12560                 return -EINVAL;
12561
12562         tp->rxq_req = channel->rx_count;
12563         tp->txq_req = channel->tx_count;
12564
12565         if (!netif_running(dev))
12566                 return 0;
12567
12568         tg3_stop(tp);
12569
12570         tg3_carrier_off(tp);
12571
12572         tg3_start(tp, true, false, false);
12573
12574         return 0;
12575 }
12576
12577 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12578 {
12579         switch (stringset) {
12580         case ETH_SS_STATS:
12581                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12582                 break;
12583         case ETH_SS_TEST:
12584                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12585                 break;
12586         default:
12587                 WARN_ON(1);     /* we need a WARN() */
12588                 break;
12589         }
12590 }
12591
12592 static int tg3_set_phys_id(struct net_device *dev,
12593                             enum ethtool_phys_id_state state)
12594 {
12595         struct tg3 *tp = netdev_priv(dev);
12596
12597         if (!netif_running(tp->dev))
12598                 return -EAGAIN;
12599
12600         switch (state) {
12601         case ETHTOOL_ID_ACTIVE:
12602                 return 1;       /* cycle on/off once per second */
12603
12604         case ETHTOOL_ID_ON:
12605                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12606                      LED_CTRL_1000MBPS_ON |
12607                      LED_CTRL_100MBPS_ON |
12608                      LED_CTRL_10MBPS_ON |
12609                      LED_CTRL_TRAFFIC_OVERRIDE |
12610                      LED_CTRL_TRAFFIC_BLINK |
12611                      LED_CTRL_TRAFFIC_LED);
12612                 break;
12613
12614         case ETHTOOL_ID_OFF:
12615                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12616                      LED_CTRL_TRAFFIC_OVERRIDE);
12617                 break;
12618
12619         case ETHTOOL_ID_INACTIVE:
12620                 tw32(MAC_LED_CTRL, tp->led_ctrl);
12621                 break;
12622         }
12623
12624         return 0;
12625 }
12626
12627 static void tg3_get_ethtool_stats(struct net_device *dev,
12628                                    struct ethtool_stats *estats, u64 *tmp_stats)
12629 {
12630         struct tg3 *tp = netdev_priv(dev);
12631
12632         if (tp->hw_stats)
12633                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12634         else
12635                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12636 }
12637
12638 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12639 {
12640         int i;
12641         __be32 *buf;
12642         u32 offset = 0, len = 0;
12643         u32 magic, val;
12644
12645         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12646                 return NULL;
12647
12648         if (magic == TG3_EEPROM_MAGIC) {
12649                 for (offset = TG3_NVM_DIR_START;
12650                      offset < TG3_NVM_DIR_END;
12651                      offset += TG3_NVM_DIRENT_SIZE) {
12652                         if (tg3_nvram_read(tp, offset, &val))
12653                                 return NULL;
12654
12655                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12656                             TG3_NVM_DIRTYPE_EXTVPD)
12657                                 break;
12658                 }
12659
12660                 if (offset != TG3_NVM_DIR_END) {
12661                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12662                         if (tg3_nvram_read(tp, offset + 4, &offset))
12663                                 return NULL;
12664
12665                         offset = tg3_nvram_logical_addr(tp, offset);
12666                 }
12667         }
12668
12669         if (!offset || !len) {
12670                 offset = TG3_NVM_VPD_OFF;
12671                 len = TG3_NVM_VPD_LEN;
12672         }
12673
12674         buf = kmalloc(len, GFP_KERNEL);
12675         if (buf == NULL)
12676                 return NULL;
12677
12678         if (magic == TG3_EEPROM_MAGIC) {
12679                 for (i = 0; i < len; i += 4) {
12680                         /* The data is in little-endian format in NVRAM.
12681                          * Use the big-endian read routines to preserve
12682                          * the byte order as it exists in NVRAM.
12683                          */
12684                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12685                                 goto error;
12686                 }
12687         } else {
12688                 u8 *ptr;
12689                 ssize_t cnt;
12690                 unsigned int pos = 0;
12691
12692                 ptr = (u8 *)&buf[0];
12693                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12694                         cnt = pci_read_vpd(tp->pdev, pos,
12695                                            len - pos, ptr);
12696                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
12697                                 cnt = 0;
12698                         else if (cnt < 0)
12699                                 goto error;
12700                 }
12701                 if (pos != len)
12702                         goto error;
12703         }
12704
12705         *vpdlen = len;
12706
12707         return buf;
12708
12709 error:
12710         kfree(buf);
12711         return NULL;
12712 }
12713
12714 #define NVRAM_TEST_SIZE 0x100
12715 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
12716 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
12717 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
12718 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
12719 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
12720 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
12721 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12722 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12723
12724 static int tg3_test_nvram(struct tg3 *tp)
12725 {
12726         u32 csum, magic, len;
12727         __be32 *buf;
12728         int i, j, k, err = 0, size;
12729
12730         if (tg3_flag(tp, NO_NVRAM))
12731                 return 0;
12732
12733         if (tg3_nvram_read(tp, 0, &magic) != 0)
12734                 return -EIO;
12735
12736         if (magic == TG3_EEPROM_MAGIC)
12737                 size = NVRAM_TEST_SIZE;
12738         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12739                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12740                     TG3_EEPROM_SB_FORMAT_1) {
12741                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12742                         case TG3_EEPROM_SB_REVISION_0:
12743                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12744                                 break;
12745                         case TG3_EEPROM_SB_REVISION_2:
12746                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12747                                 break;
12748                         case TG3_EEPROM_SB_REVISION_3:
12749                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12750                                 break;
12751                         case TG3_EEPROM_SB_REVISION_4:
12752                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12753                                 break;
12754                         case TG3_EEPROM_SB_REVISION_5:
12755                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12756                                 break;
12757                         case TG3_EEPROM_SB_REVISION_6:
12758                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12759                                 break;
12760                         default:
12761                                 return -EIO;
12762                         }
12763                 } else
12764                         return 0;
12765         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12766                 size = NVRAM_SELFBOOT_HW_SIZE;
12767         else
12768                 return -EIO;
12769
12770         buf = kmalloc(size, GFP_KERNEL);
12771         if (buf == NULL)
12772                 return -ENOMEM;
12773
12774         err = -EIO;
12775         for (i = 0, j = 0; i < size; i += 4, j++) {
12776                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12777                 if (err)
12778                         break;
12779         }
12780         if (i < size)
12781                 goto out;
12782
12783         /* Selfboot format */
12784         magic = be32_to_cpu(buf[0]);
12785         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12786             TG3_EEPROM_MAGIC_FW) {
12787                 u8 *buf8 = (u8 *) buf, csum8 = 0;
12788
12789                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12790                     TG3_EEPROM_SB_REVISION_2) {
12791                         /* For rev 2, the csum doesn't include the MBA. */
12792                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12793                                 csum8 += buf8[i];
12794                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12795                                 csum8 += buf8[i];
12796                 } else {
12797                         for (i = 0; i < size; i++)
12798                                 csum8 += buf8[i];
12799                 }
12800
12801                 if (csum8 == 0) {
12802                         err = 0;
12803                         goto out;
12804                 }
12805
12806                 err = -EIO;
12807                 goto out;
12808         }
12809
12810         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12811             TG3_EEPROM_MAGIC_HW) {
12812                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12813                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12814                 u8 *buf8 = (u8 *) buf;
12815
12816                 /* Separate the parity bits and the data bytes.  */
12817                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12818                         if ((i == 0) || (i == 8)) {
12819                                 int l;
12820                                 u8 msk;
12821
12822                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12823                                         parity[k++] = buf8[i] & msk;
12824                                 i++;
12825                         } else if (i == 16) {
12826                                 int l;
12827                                 u8 msk;
12828
12829                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12830                                         parity[k++] = buf8[i] & msk;
12831                                 i++;
12832
12833                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12834                                         parity[k++] = buf8[i] & msk;
12835                                 i++;
12836                         }
12837                         data[j++] = buf8[i];
12838                 }
12839
12840                 err = -EIO;
12841                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12842                         u8 hw8 = hweight8(data[i]);
12843
12844                         if ((hw8 & 0x1) && parity[i])
12845                                 goto out;
12846                         else if (!(hw8 & 0x1) && !parity[i])
12847                                 goto out;
12848                 }
12849                 err = 0;
12850                 goto out;
12851         }
12852
12853         err = -EIO;
12854
12855         /* Bootstrap checksum at offset 0x10 */
12856         csum = calc_crc((unsigned char *) buf, 0x10);
12857         if (csum != le32_to_cpu(buf[0x10/4]))
12858                 goto out;
12859
12860         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12861         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12862         if (csum != le32_to_cpu(buf[0xfc/4]))
12863                 goto out;
12864
12865         kfree(buf);
12866
12867         buf = tg3_vpd_readblock(tp, &len);
12868         if (!buf)
12869                 return -ENOMEM;
12870
12871         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12872         if (i > 0) {
12873                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12874                 if (j < 0)
12875                         goto out;
12876
12877                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12878                         goto out;
12879
12880                 i += PCI_VPD_LRDT_TAG_SIZE;
12881                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12882                                               PCI_VPD_RO_KEYWORD_CHKSUM);
12883                 if (j > 0) {
12884                         u8 csum8 = 0;
12885
12886                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
12887
12888                         for (i = 0; i <= j; i++)
12889                                 csum8 += ((u8 *)buf)[i];
12890
12891                         if (csum8)
12892                                 goto out;
12893                 }
12894         }
12895
12896         err = 0;
12897
12898 out:
12899         kfree(buf);
12900         return err;
12901 }
12902
12903 #define TG3_SERDES_TIMEOUT_SEC  2
12904 #define TG3_COPPER_TIMEOUT_SEC  6
12905
12906 static int tg3_test_link(struct tg3 *tp)
12907 {
12908         int i, max;
12909
12910         if (!netif_running(tp->dev))
12911                 return -ENODEV;
12912
12913         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12914                 max = TG3_SERDES_TIMEOUT_SEC;
12915         else
12916                 max = TG3_COPPER_TIMEOUT_SEC;
12917
12918         for (i = 0; i < max; i++) {
12919                 if (tp->link_up)
12920                         return 0;
12921
12922                 if (msleep_interruptible(1000))
12923                         break;
12924         }
12925
12926         return -EIO;
12927 }
12928
12929 /* Only test the commonly used registers */
12930 static int tg3_test_registers(struct tg3 *tp)
12931 {
12932         int i, is_5705, is_5750;
12933         u32 offset, read_mask, write_mask, val, save_val, read_val;
12934         static struct {
12935                 u16 offset;
12936                 u16 flags;
12937 #define TG3_FL_5705     0x1
12938 #define TG3_FL_NOT_5705 0x2
12939 #define TG3_FL_NOT_5788 0x4
12940 #define TG3_FL_NOT_5750 0x8
12941                 u32 read_mask;
12942                 u32 write_mask;
12943         } reg_tbl[] = {
12944                 /* MAC Control Registers */
12945                 { MAC_MODE, TG3_FL_NOT_5705,
12946                         0x00000000, 0x00ef6f8c },
12947                 { MAC_MODE, TG3_FL_5705,
12948                         0x00000000, 0x01ef6b8c },
12949                 { MAC_STATUS, TG3_FL_NOT_5705,
12950                         0x03800107, 0x00000000 },
12951                 { MAC_STATUS, TG3_FL_5705,
12952                         0x03800100, 0x00000000 },
12953                 { MAC_ADDR_0_HIGH, 0x0000,
12954                         0x00000000, 0x0000ffff },
12955                 { MAC_ADDR_0_LOW, 0x0000,
12956                         0x00000000, 0xffffffff },
12957                 { MAC_RX_MTU_SIZE, 0x0000,
12958                         0x00000000, 0x0000ffff },
12959                 { MAC_TX_MODE, 0x0000,
12960                         0x00000000, 0x00000070 },
12961                 { MAC_TX_LENGTHS, 0x0000,
12962                         0x00000000, 0x00003fff },
12963                 { MAC_RX_MODE, TG3_FL_NOT_5705,
12964                         0x00000000, 0x000007fc },
12965                 { MAC_RX_MODE, TG3_FL_5705,
12966                         0x00000000, 0x000007dc },
12967                 { MAC_HASH_REG_0, 0x0000,
12968                         0x00000000, 0xffffffff },
12969                 { MAC_HASH_REG_1, 0x0000,
12970                         0x00000000, 0xffffffff },
12971                 { MAC_HASH_REG_2, 0x0000,
12972                         0x00000000, 0xffffffff },
12973                 { MAC_HASH_REG_3, 0x0000,
12974                         0x00000000, 0xffffffff },
12975
12976                 /* Receive Data and Receive BD Initiator Control Registers. */
12977                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12978                         0x00000000, 0xffffffff },
12979                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12980                         0x00000000, 0xffffffff },
12981                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12982                         0x00000000, 0x00000003 },
12983                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12984                         0x00000000, 0xffffffff },
12985                 { RCVDBDI_STD_BD+0, 0x0000,
12986                         0x00000000, 0xffffffff },
12987                 { RCVDBDI_STD_BD+4, 0x0000,
12988                         0x00000000, 0xffffffff },
12989                 { RCVDBDI_STD_BD+8, 0x0000,
12990                         0x00000000, 0xffff0002 },
12991                 { RCVDBDI_STD_BD+0xc, 0x0000,
12992                         0x00000000, 0xffffffff },
12993
12994                 /* Receive BD Initiator Control Registers. */
12995                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12996                         0x00000000, 0xffffffff },
12997                 { RCVBDI_STD_THRESH, TG3_FL_5705,
12998                         0x00000000, 0x000003ff },
12999                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13000                         0x00000000, 0xffffffff },
13001
13002                 /* Host Coalescing Control Registers. */
13003                 { HOSTCC_MODE, TG3_FL_NOT_5705,
13004                         0x00000000, 0x00000004 },
13005                 { HOSTCC_MODE, TG3_FL_5705,
13006                         0x00000000, 0x000000f6 },
13007                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13008                         0x00000000, 0xffffffff },
13009                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13010                         0x00000000, 0x000003ff },
13011                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13012                         0x00000000, 0xffffffff },
13013                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13014                         0x00000000, 0x000003ff },
13015                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13016                         0x00000000, 0xffffffff },
13017                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13018                         0x00000000, 0x000000ff },
13019                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13020                         0x00000000, 0xffffffff },
13021                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13022                         0x00000000, 0x000000ff },
13023                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13024                         0x00000000, 0xffffffff },
13025                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13026                         0x00000000, 0xffffffff },
13027                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13028                         0x00000000, 0xffffffff },
13029                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13030                         0x00000000, 0x000000ff },
13031                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13032                         0x00000000, 0xffffffff },
13033                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13034                         0x00000000, 0x000000ff },
13035                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13036                         0x00000000, 0xffffffff },
13037                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13038                         0x00000000, 0xffffffff },
13039                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13040                         0x00000000, 0xffffffff },
13041                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13042                         0x00000000, 0xffffffff },
13043                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13044                         0x00000000, 0xffffffff },
13045                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13046                         0xffffffff, 0x00000000 },
13047                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13048                         0xffffffff, 0x00000000 },
13049
13050                 /* Buffer Manager Control Registers. */
13051                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13052                         0x00000000, 0x007fff80 },
13053                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13054                         0x00000000, 0x007fffff },
13055                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13056                         0x00000000, 0x0000003f },
13057                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13058                         0x00000000, 0x000001ff },
13059                 { BUFMGR_MB_HIGH_WATER, 0x0000,
13060                         0x00000000, 0x000001ff },
13061                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13062                         0xffffffff, 0x00000000 },
13063                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13064                         0xffffffff, 0x00000000 },
13065
13066                 /* Mailbox Registers */
13067                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13068                         0x00000000, 0x000001ff },
13069                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13070                         0x00000000, 0x000001ff },
13071                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13072                         0x00000000, 0x000007ff },
13073                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13074                         0x00000000, 0x000001ff },
13075
13076                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13077         };
13078
13079         is_5705 = is_5750 = 0;
13080         if (tg3_flag(tp, 5705_PLUS)) {
13081                 is_5705 = 1;
13082                 if (tg3_flag(tp, 5750_PLUS))
13083                         is_5750 = 1;
13084         }
13085
13086         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13087                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13088                         continue;
13089
13090                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13091                         continue;
13092
13093                 if (tg3_flag(tp, IS_5788) &&
13094                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
13095                         continue;
13096
13097                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13098                         continue;
13099
13100                 offset = (u32) reg_tbl[i].offset;
13101                 read_mask = reg_tbl[i].read_mask;
13102                 write_mask = reg_tbl[i].write_mask;
13103
13104                 /* Save the original register content */
13105                 save_val = tr32(offset);
13106
13107                 /* Determine the read-only value. */
13108                 read_val = save_val & read_mask;
13109
13110                 /* Write zero to the register, then make sure the read-only bits
13111                  * are not changed and the read/write bits are all zeros.
13112                  */
13113                 tw32(offset, 0);
13114
13115                 val = tr32(offset);
13116
13117                 /* Test the read-only and read/write bits. */
13118                 if (((val & read_mask) != read_val) || (val & write_mask))
13119                         goto out;
13120
13121                 /* Write ones to all the bits defined by RdMask and WrMask, then
13122                  * make sure the read-only bits are not changed and the
13123                  * read/write bits are all ones.
13124                  */
13125                 tw32(offset, read_mask | write_mask);
13126
13127                 val = tr32(offset);
13128
13129                 /* Test the read-only bits. */
13130                 if ((val & read_mask) != read_val)
13131                         goto out;
13132
13133                 /* Test the read/write bits. */
13134                 if ((val & write_mask) != write_mask)
13135                         goto out;
13136
13137                 tw32(offset, save_val);
13138         }
13139
13140         return 0;
13141
13142 out:
13143         if (netif_msg_hw(tp))
13144                 netdev_err(tp->dev,
13145                            "Register test failed at offset %x\n", offset);
13146         tw32(offset, save_val);
13147         return -EIO;
13148 }
13149
13150 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13151 {
13152         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13153         int i;
13154         u32 j;
13155
13156         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13157                 for (j = 0; j < len; j += 4) {
13158                         u32 val;
13159
13160                         tg3_write_mem(tp, offset + j, test_pattern[i]);
13161                         tg3_read_mem(tp, offset + j, &val);
13162                         if (val != test_pattern[i])
13163                                 return -EIO;
13164                 }
13165         }
13166         return 0;
13167 }
13168
13169 static int tg3_test_memory(struct tg3 *tp)
13170 {
13171         static struct mem_entry {
13172                 u32 offset;
13173                 u32 len;
13174         } mem_tbl_570x[] = {
13175                 { 0x00000000, 0x00b50},
13176                 { 0x00002000, 0x1c000},
13177                 { 0xffffffff, 0x00000}
13178         }, mem_tbl_5705[] = {
13179                 { 0x00000100, 0x0000c},
13180                 { 0x00000200, 0x00008},
13181                 { 0x00004000, 0x00800},
13182                 { 0x00006000, 0x01000},
13183                 { 0x00008000, 0x02000},
13184                 { 0x00010000, 0x0e000},
13185                 { 0xffffffff, 0x00000}
13186         }, mem_tbl_5755[] = {
13187                 { 0x00000200, 0x00008},
13188                 { 0x00004000, 0x00800},
13189                 { 0x00006000, 0x00800},
13190                 { 0x00008000, 0x02000},
13191                 { 0x00010000, 0x0c000},
13192                 { 0xffffffff, 0x00000}
13193         }, mem_tbl_5906[] = {
13194                 { 0x00000200, 0x00008},
13195                 { 0x00004000, 0x00400},
13196                 { 0x00006000, 0x00400},
13197                 { 0x00008000, 0x01000},
13198                 { 0x00010000, 0x01000},
13199                 { 0xffffffff, 0x00000}
13200         }, mem_tbl_5717[] = {
13201                 { 0x00000200, 0x00008},
13202                 { 0x00010000, 0x0a000},
13203                 { 0x00020000, 0x13c00},
13204                 { 0xffffffff, 0x00000}
13205         }, mem_tbl_57765[] = {
13206                 { 0x00000200, 0x00008},
13207                 { 0x00004000, 0x00800},
13208                 { 0x00006000, 0x09800},
13209                 { 0x00010000, 0x0a000},
13210                 { 0xffffffff, 0x00000}
13211         };
13212         struct mem_entry *mem_tbl;
13213         int err = 0;
13214         int i;
13215
13216         if (tg3_flag(tp, 5717_PLUS))
13217                 mem_tbl = mem_tbl_5717;
13218         else if (tg3_flag(tp, 57765_CLASS) ||
13219                  tg3_asic_rev(tp) == ASIC_REV_5762)
13220                 mem_tbl = mem_tbl_57765;
13221         else if (tg3_flag(tp, 5755_PLUS))
13222                 mem_tbl = mem_tbl_5755;
13223         else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13224                 mem_tbl = mem_tbl_5906;
13225         else if (tg3_flag(tp, 5705_PLUS))
13226                 mem_tbl = mem_tbl_5705;
13227         else
13228                 mem_tbl = mem_tbl_570x;
13229
13230         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13231                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13232                 if (err)
13233                         break;
13234         }
13235
13236         return err;
13237 }
13238
13239 #define TG3_TSO_MSS             500
13240
13241 #define TG3_TSO_IP_HDR_LEN      20
13242 #define TG3_TSO_TCP_HDR_LEN     20
13243 #define TG3_TSO_TCP_OPT_LEN     12
13244
13245 static const u8 tg3_tso_header[] = {
13246 0x08, 0x00,
13247 0x45, 0x00, 0x00, 0x00,
13248 0x00, 0x00, 0x40, 0x00,
13249 0x40, 0x06, 0x00, 0x00,
13250 0x0a, 0x00, 0x00, 0x01,
13251 0x0a, 0x00, 0x00, 0x02,
13252 0x0d, 0x00, 0xe0, 0x00,
13253 0x00, 0x00, 0x01, 0x00,
13254 0x00, 0x00, 0x02, 0x00,
13255 0x80, 0x10, 0x10, 0x00,
13256 0x14, 0x09, 0x00, 0x00,
13257 0x01, 0x01, 0x08, 0x0a,
13258 0x11, 0x11, 0x11, 0x11,
13259 0x11, 0x11, 0x11, 0x11,
13260 };
13261
13262 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13263 {
13264         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13265         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13266         u32 budget;
13267         struct sk_buff *skb;
13268         u8 *tx_data, *rx_data;
13269         dma_addr_t map;
13270         int num_pkts, tx_len, rx_len, i, err;
13271         struct tg3_rx_buffer_desc *desc;
13272         struct tg3_napi *tnapi, *rnapi;
13273         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13274
13275         tnapi = &tp->napi[0];
13276         rnapi = &tp->napi[0];
13277         if (tp->irq_cnt > 1) {
13278                 if (tg3_flag(tp, ENABLE_RSS))
13279                         rnapi = &tp->napi[1];
13280                 if (tg3_flag(tp, ENABLE_TSS))
13281                         tnapi = &tp->napi[1];
13282         }
13283         coal_now = tnapi->coal_now | rnapi->coal_now;
13284
13285         err = -EIO;
13286
13287         tx_len = pktsz;
13288         skb = netdev_alloc_skb(tp->dev, tx_len);
13289         if (!skb)
13290                 return -ENOMEM;
13291
13292         tx_data = skb_put(skb, tx_len);
13293         memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13294         memset(tx_data + ETH_ALEN, 0x0, 8);
13295
13296         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13297
13298         if (tso_loopback) {
13299                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13300
13301                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13302                               TG3_TSO_TCP_OPT_LEN;
13303
13304                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13305                        sizeof(tg3_tso_header));
13306                 mss = TG3_TSO_MSS;
13307
13308                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13309                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13310
13311                 /* Set the total length field in the IP header */
13312                 iph->tot_len = htons((u16)(mss + hdr_len));
13313
13314                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13315                               TXD_FLAG_CPU_POST_DMA);
13316
13317                 if (tg3_flag(tp, HW_TSO_1) ||
13318                     tg3_flag(tp, HW_TSO_2) ||
13319                     tg3_flag(tp, HW_TSO_3)) {
13320                         struct tcphdr *th;
13321                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13322                         th = (struct tcphdr *)&tx_data[val];
13323                         th->check = 0;
13324                 } else
13325                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
13326
13327                 if (tg3_flag(tp, HW_TSO_3)) {
13328                         mss |= (hdr_len & 0xc) << 12;
13329                         if (hdr_len & 0x10)
13330                                 base_flags |= 0x00000010;
13331                         base_flags |= (hdr_len & 0x3e0) << 5;
13332                 } else if (tg3_flag(tp, HW_TSO_2))
13333                         mss |= hdr_len << 9;
13334                 else if (tg3_flag(tp, HW_TSO_1) ||
13335                          tg3_asic_rev(tp) == ASIC_REV_5705) {
13336                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13337                 } else {
13338                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13339                 }
13340
13341                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13342         } else {
13343                 num_pkts = 1;
13344                 data_off = ETH_HLEN;
13345
13346                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13347                     tx_len > VLAN_ETH_FRAME_LEN)
13348                         base_flags |= TXD_FLAG_JMB_PKT;
13349         }
13350
13351         for (i = data_off; i < tx_len; i++)
13352                 tx_data[i] = (u8) (i & 0xff);
13353
13354         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13355         if (pci_dma_mapping_error(tp->pdev, map)) {
13356                 dev_kfree_skb(skb);
13357                 return -EIO;
13358         }
13359
13360         val = tnapi->tx_prod;
13361         tnapi->tx_buffers[val].skb = skb;
13362         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13363
13364         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13365                rnapi->coal_now);
13366
13367         udelay(10);
13368
13369         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13370
13371         budget = tg3_tx_avail(tnapi);
13372         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13373                             base_flags | TXD_FLAG_END, mss, 0)) {
13374                 tnapi->tx_buffers[val].skb = NULL;
13375                 dev_kfree_skb(skb);
13376                 return -EIO;
13377         }
13378
13379         tnapi->tx_prod++;
13380
13381         /* Sync BD data before updating mailbox */
13382         wmb();
13383
13384         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13385         tr32_mailbox(tnapi->prodmbox);
13386
13387         udelay(10);
13388
13389         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13390         for (i = 0; i < 35; i++) {
13391                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13392                        coal_now);
13393
13394                 udelay(10);
13395
13396                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13397                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13398                 if ((tx_idx == tnapi->tx_prod) &&
13399                     (rx_idx == (rx_start_idx + num_pkts)))
13400                         break;
13401         }
13402
13403         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13404         dev_kfree_skb(skb);
13405
13406         if (tx_idx != tnapi->tx_prod)
13407                 goto out;
13408
13409         if (rx_idx != rx_start_idx + num_pkts)
13410                 goto out;
13411
13412         val = data_off;
13413         while (rx_idx != rx_start_idx) {
13414                 desc = &rnapi->rx_rcb[rx_start_idx++];
13415                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13416                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13417
13418                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13419                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13420                         goto out;
13421
13422                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13423                          - ETH_FCS_LEN;
13424
13425                 if (!tso_loopback) {
13426                         if (rx_len != tx_len)
13427                                 goto out;
13428
13429                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13430                                 if (opaque_key != RXD_OPAQUE_RING_STD)
13431                                         goto out;
13432                         } else {
13433                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13434                                         goto out;
13435                         }
13436                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13437                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13438                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
13439                         goto out;
13440                 }
13441
13442                 if (opaque_key == RXD_OPAQUE_RING_STD) {
13443                         rx_data = tpr->rx_std_buffers[desc_idx].data;
13444                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13445                                              mapping);
13446                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13447                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13448                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13449                                              mapping);
13450                 } else
13451                         goto out;
13452
13453                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13454                                             PCI_DMA_FROMDEVICE);
13455
13456                 rx_data += TG3_RX_OFFSET(tp);
13457                 for (i = data_off; i < rx_len; i++, val++) {
13458                         if (*(rx_data + i) != (u8) (val & 0xff))
13459                                 goto out;
13460                 }
13461         }
13462
13463         err = 0;
13464
13465         /* tg3_free_rings will unmap and free the rx_data */
13466 out:
13467         return err;
13468 }
13469
13470 #define TG3_STD_LOOPBACK_FAILED         1
13471 #define TG3_JMB_LOOPBACK_FAILED         2
13472 #define TG3_TSO_LOOPBACK_FAILED         4
13473 #define TG3_LOOPBACK_FAILED \
13474         (TG3_STD_LOOPBACK_FAILED | \
13475          TG3_JMB_LOOPBACK_FAILED | \
13476          TG3_TSO_LOOPBACK_FAILED)
13477
13478 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13479 {
13480         int err = -EIO;
13481         u32 eee_cap;
13482         u32 jmb_pkt_sz = 9000;
13483
13484         if (tp->dma_limit)
13485                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13486
13487         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13488         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13489
13490         if (!netif_running(tp->dev)) {
13491                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13492                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13493                 if (do_extlpbk)
13494                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13495                 goto done;
13496         }
13497
13498         err = tg3_reset_hw(tp, true);
13499         if (err) {
13500                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13501                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13502                 if (do_extlpbk)
13503                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13504                 goto done;
13505         }
13506
13507         if (tg3_flag(tp, ENABLE_RSS)) {
13508                 int i;
13509
13510                 /* Reroute all rx packets to the 1st queue */
13511                 for (i = MAC_RSS_INDIR_TBL_0;
13512                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13513                         tw32(i, 0x0);
13514         }
13515
13516         /* HW errata - mac loopback fails in some cases on 5780.
13517          * Normal traffic and PHY loopback are not affected by
13518          * errata.  Also, the MAC loopback test is deprecated for
13519          * all newer ASIC revisions.
13520          */
13521         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13522             !tg3_flag(tp, CPMU_PRESENT)) {
13523                 tg3_mac_loopback(tp, true);
13524
13525                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13526                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13527
13528                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13529                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13530                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13531
13532                 tg3_mac_loopback(tp, false);
13533         }
13534
13535         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13536             !tg3_flag(tp, USE_PHYLIB)) {
13537                 int i;
13538
13539                 tg3_phy_lpbk_set(tp, 0, false);
13540
13541                 /* Wait for link */
13542                 for (i = 0; i < 100; i++) {
13543                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13544                                 break;
13545                         mdelay(1);
13546                 }
13547
13548                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13549                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13550                 if (tg3_flag(tp, TSO_CAPABLE) &&
13551                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13552                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13553                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13554                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13555                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13556
13557                 if (do_extlpbk) {
13558                         tg3_phy_lpbk_set(tp, 0, true);
13559
13560                         /* All link indications report up, but the hardware
13561                          * isn't really ready for about 20 msec.  Double it
13562                          * to be sure.
13563                          */
13564                         mdelay(40);
13565
13566                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13567                                 data[TG3_EXT_LOOPB_TEST] |=
13568                                                         TG3_STD_LOOPBACK_FAILED;
13569                         if (tg3_flag(tp, TSO_CAPABLE) &&
13570                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13571                                 data[TG3_EXT_LOOPB_TEST] |=
13572                                                         TG3_TSO_LOOPBACK_FAILED;
13573                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13574                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13575                                 data[TG3_EXT_LOOPB_TEST] |=
13576                                                         TG3_JMB_LOOPBACK_FAILED;
13577                 }
13578
13579                 /* Re-enable gphy autopowerdown. */
13580                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13581                         tg3_phy_toggle_apd(tp, true);
13582         }
13583
13584         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13585                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13586
13587 done:
13588         tp->phy_flags |= eee_cap;
13589
13590         return err;
13591 }
13592
13593 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13594                           u64 *data)
13595 {
13596         struct tg3 *tp = netdev_priv(dev);
13597         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13598
13599         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13600                 if (tg3_power_up(tp)) {
13601                         etest->flags |= ETH_TEST_FL_FAILED;
13602                         memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13603                         return;
13604                 }
13605                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13606         }
13607
13608         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13609
13610         if (tg3_test_nvram(tp) != 0) {
13611                 etest->flags |= ETH_TEST_FL_FAILED;
13612                 data[TG3_NVRAM_TEST] = 1;
13613         }
13614         if (!doextlpbk && tg3_test_link(tp)) {
13615                 etest->flags |= ETH_TEST_FL_FAILED;
13616                 data[TG3_LINK_TEST] = 1;
13617         }
13618         if (etest->flags & ETH_TEST_FL_OFFLINE) {
13619                 int err, err2 = 0, irq_sync = 0;
13620
13621                 if (netif_running(dev)) {
13622                         tg3_phy_stop(tp);
13623                         tg3_netif_stop(tp);
13624                         irq_sync = 1;
13625                 }
13626
13627                 tg3_full_lock(tp, irq_sync);
13628                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13629                 err = tg3_nvram_lock(tp);
13630                 tg3_halt_cpu(tp, RX_CPU_BASE);
13631                 if (!tg3_flag(tp, 5705_PLUS))
13632                         tg3_halt_cpu(tp, TX_CPU_BASE);
13633                 if (!err)
13634                         tg3_nvram_unlock(tp);
13635
13636                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13637                         tg3_phy_reset(tp);
13638
13639                 if (tg3_test_registers(tp) != 0) {
13640                         etest->flags |= ETH_TEST_FL_FAILED;
13641                         data[TG3_REGISTER_TEST] = 1;
13642                 }
13643
13644                 if (tg3_test_memory(tp) != 0) {
13645                         etest->flags |= ETH_TEST_FL_FAILED;
13646                         data[TG3_MEMORY_TEST] = 1;
13647                 }
13648
13649                 if (doextlpbk)
13650                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13651
13652                 if (tg3_test_loopback(tp, data, doextlpbk))
13653                         etest->flags |= ETH_TEST_FL_FAILED;
13654
13655                 tg3_full_unlock(tp);
13656
13657                 if (tg3_test_interrupt(tp) != 0) {
13658                         etest->flags |= ETH_TEST_FL_FAILED;
13659                         data[TG3_INTERRUPT_TEST] = 1;
13660                 }
13661
13662                 tg3_full_lock(tp, 0);
13663
13664                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13665                 if (netif_running(dev)) {
13666                         tg3_flag_set(tp, INIT_COMPLETE);
13667                         err2 = tg3_restart_hw(tp, true);
13668                         if (!err2)
13669                                 tg3_netif_start(tp);
13670                 }
13671
13672                 tg3_full_unlock(tp);
13673
13674                 if (irq_sync && !err2)
13675                         tg3_phy_start(tp);
13676         }
13677         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13678                 tg3_power_down_prepare(tp);
13679
13680 }
13681
13682 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13683 {
13684         struct tg3 *tp = netdev_priv(dev);
13685         struct hwtstamp_config stmpconf;
13686
13687         if (!tg3_flag(tp, PTP_CAPABLE))
13688                 return -EOPNOTSUPP;
13689
13690         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13691                 return -EFAULT;
13692
13693         if (stmpconf.flags)
13694                 return -EINVAL;
13695
13696         if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13697             stmpconf.tx_type != HWTSTAMP_TX_OFF)
13698                 return -ERANGE;
13699
13700         switch (stmpconf.rx_filter) {
13701         case HWTSTAMP_FILTER_NONE:
13702                 tp->rxptpctl = 0;
13703                 break;
13704         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13705                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13706                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13707                 break;
13708         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13709                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13710                                TG3_RX_PTP_CTL_SYNC_EVNT;
13711                 break;
13712         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13713                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13714                                TG3_RX_PTP_CTL_DELAY_REQ;
13715                 break;
13716         case HWTSTAMP_FILTER_PTP_V2_EVENT:
13717                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13718                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13719                 break;
13720         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13721                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13722                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13723                 break;
13724         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13725                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13726                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13727                 break;
13728         case HWTSTAMP_FILTER_PTP_V2_SYNC:
13729                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13730                                TG3_RX_PTP_CTL_SYNC_EVNT;
13731                 break;
13732         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13733                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13734                                TG3_RX_PTP_CTL_SYNC_EVNT;
13735                 break;
13736         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13737                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13738                                TG3_RX_PTP_CTL_SYNC_EVNT;
13739                 break;
13740         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13741                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13742                                TG3_RX_PTP_CTL_DELAY_REQ;
13743                 break;
13744         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13745                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13746                                TG3_RX_PTP_CTL_DELAY_REQ;
13747                 break;
13748         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13749                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13750                                TG3_RX_PTP_CTL_DELAY_REQ;
13751                 break;
13752         default:
13753                 return -ERANGE;
13754         }
13755
13756         if (netif_running(dev) && tp->rxptpctl)
13757                 tw32(TG3_RX_PTP_CTL,
13758                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13759
13760         if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13761                 tg3_flag_set(tp, TX_TSTAMP_EN);
13762         else
13763                 tg3_flag_clear(tp, TX_TSTAMP_EN);
13764
13765         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13766                 -EFAULT : 0;
13767 }
13768
13769 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13770 {
13771         struct tg3 *tp = netdev_priv(dev);
13772         struct hwtstamp_config stmpconf;
13773
13774         if (!tg3_flag(tp, PTP_CAPABLE))
13775                 return -EOPNOTSUPP;
13776
13777         stmpconf.flags = 0;
13778         stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13779                             HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13780
13781         switch (tp->rxptpctl) {
13782         case 0:
13783                 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13784                 break;
13785         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13786                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13787                 break;
13788         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13789                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13790                 break;
13791         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13792                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13793                 break;
13794         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13795                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13796                 break;
13797         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13798                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13799                 break;
13800         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13801                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13802                 break;
13803         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13804                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13805                 break;
13806         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13807                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13808                 break;
13809         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13810                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13811                 break;
13812         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13813                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13814                 break;
13815         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13816                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13817                 break;
13818         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13819                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13820                 break;
13821         default:
13822                 WARN_ON_ONCE(1);
13823                 return -ERANGE;
13824         }
13825
13826         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13827                 -EFAULT : 0;
13828 }
13829
13830 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13831 {
13832         struct mii_ioctl_data *data = if_mii(ifr);
13833         struct tg3 *tp = netdev_priv(dev);
13834         int err;
13835
13836         if (tg3_flag(tp, USE_PHYLIB)) {
13837                 struct phy_device *phydev;
13838                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13839                         return -EAGAIN;
13840                 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
13841                 return phy_mii_ioctl(phydev, ifr, cmd);
13842         }
13843
13844         switch (cmd) {
13845         case SIOCGMIIPHY:
13846                 data->phy_id = tp->phy_addr;
13847
13848                 /* fallthru */
13849         case SIOCGMIIREG: {
13850                 u32 mii_regval;
13851
13852                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13853                         break;                  /* We have no PHY */
13854
13855                 if (!netif_running(dev))
13856                         return -EAGAIN;
13857
13858                 spin_lock_bh(&tp->lock);
13859                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13860                                     data->reg_num & 0x1f, &mii_regval);
13861                 spin_unlock_bh(&tp->lock);
13862
13863                 data->val_out = mii_regval;
13864
13865                 return err;
13866         }
13867
13868         case SIOCSMIIREG:
13869                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13870                         break;                  /* We have no PHY */
13871
13872                 if (!netif_running(dev))
13873                         return -EAGAIN;
13874
13875                 spin_lock_bh(&tp->lock);
13876                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13877                                      data->reg_num & 0x1f, data->val_in);
13878                 spin_unlock_bh(&tp->lock);
13879
13880                 return err;
13881
13882         case SIOCSHWTSTAMP:
13883                 return tg3_hwtstamp_set(dev, ifr);
13884
13885         case SIOCGHWTSTAMP:
13886                 return tg3_hwtstamp_get(dev, ifr);
13887
13888         default:
13889                 /* do nothing */
13890                 break;
13891         }
13892         return -EOPNOTSUPP;
13893 }
13894
13895 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13896 {
13897         struct tg3 *tp = netdev_priv(dev);
13898
13899         memcpy(ec, &tp->coal, sizeof(*ec));
13900         return 0;
13901 }
13902
13903 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13904 {
13905         struct tg3 *tp = netdev_priv(dev);
13906         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13907         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13908
13909         if (!tg3_flag(tp, 5705_PLUS)) {
13910                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13911                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13912                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13913                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13914         }
13915
13916         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13917             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13918             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13919             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13920             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13921             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13922             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13923             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13924             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13925             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13926                 return -EINVAL;
13927
13928         /* No rx interrupts will be generated if both are zero */
13929         if ((ec->rx_coalesce_usecs == 0) &&
13930             (ec->rx_max_coalesced_frames == 0))
13931                 return -EINVAL;
13932
13933         /* No tx interrupts will be generated if both are zero */
13934         if ((ec->tx_coalesce_usecs == 0) &&
13935             (ec->tx_max_coalesced_frames == 0))
13936                 return -EINVAL;
13937
13938         /* Only copy relevant parameters, ignore all others. */
13939         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13940         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13941         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13942         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13943         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13944         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13945         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13946         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13947         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13948
13949         if (netif_running(dev)) {
13950                 tg3_full_lock(tp, 0);
13951                 __tg3_set_coalesce(tp, &tp->coal);
13952                 tg3_full_unlock(tp);
13953         }
13954         return 0;
13955 }
13956
13957 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
13958 {
13959         struct tg3 *tp = netdev_priv(dev);
13960
13961         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13962                 netdev_warn(tp->dev, "Board does not support EEE!\n");
13963                 return -EOPNOTSUPP;
13964         }
13965
13966         if (edata->advertised != tp->eee.advertised) {
13967                 netdev_warn(tp->dev,
13968                             "Direct manipulation of EEE advertisement is not supported\n");
13969                 return -EINVAL;
13970         }
13971
13972         if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
13973                 netdev_warn(tp->dev,
13974                             "Maximal Tx Lpi timer supported is %#x(u)\n",
13975                             TG3_CPMU_DBTMR1_LNKIDLE_MAX);
13976                 return -EINVAL;
13977         }
13978
13979         tp->eee = *edata;
13980
13981         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
13982         tg3_warn_mgmt_link_flap(tp);
13983
13984         if (netif_running(tp->dev)) {
13985                 tg3_full_lock(tp, 0);
13986                 tg3_setup_eee(tp);
13987                 tg3_phy_reset(tp);
13988                 tg3_full_unlock(tp);
13989         }
13990
13991         return 0;
13992 }
13993
13994 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
13995 {
13996         struct tg3 *tp = netdev_priv(dev);
13997
13998         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13999                 netdev_warn(tp->dev,
14000                             "Board does not support EEE!\n");
14001                 return -EOPNOTSUPP;
14002         }
14003
14004         *edata = tp->eee;
14005         return 0;
14006 }
14007
14008 static const struct ethtool_ops tg3_ethtool_ops = {
14009         .get_settings           = tg3_get_settings,
14010         .set_settings           = tg3_set_settings,
14011         .get_drvinfo            = tg3_get_drvinfo,
14012         .get_regs_len           = tg3_get_regs_len,
14013         .get_regs               = tg3_get_regs,
14014         .get_wol                = tg3_get_wol,
14015         .set_wol                = tg3_set_wol,
14016         .get_msglevel           = tg3_get_msglevel,
14017         .set_msglevel           = tg3_set_msglevel,
14018         .nway_reset             = tg3_nway_reset,
14019         .get_link               = ethtool_op_get_link,
14020         .get_eeprom_len         = tg3_get_eeprom_len,
14021         .get_eeprom             = tg3_get_eeprom,
14022         .set_eeprom             = tg3_set_eeprom,
14023         .get_ringparam          = tg3_get_ringparam,
14024         .set_ringparam          = tg3_set_ringparam,
14025         .get_pauseparam         = tg3_get_pauseparam,
14026         .set_pauseparam         = tg3_set_pauseparam,
14027         .self_test              = tg3_self_test,
14028         .get_strings            = tg3_get_strings,
14029         .set_phys_id            = tg3_set_phys_id,
14030         .get_ethtool_stats      = tg3_get_ethtool_stats,
14031         .get_coalesce           = tg3_get_coalesce,
14032         .set_coalesce           = tg3_set_coalesce,
14033         .get_sset_count         = tg3_get_sset_count,
14034         .get_rxnfc              = tg3_get_rxnfc,
14035         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
14036         .get_rxfh_indir         = tg3_get_rxfh_indir,
14037         .set_rxfh_indir         = tg3_set_rxfh_indir,
14038         .get_channels           = tg3_get_channels,
14039         .set_channels           = tg3_set_channels,
14040         .get_ts_info            = tg3_get_ts_info,
14041         .get_eee                = tg3_get_eee,
14042         .set_eee                = tg3_set_eee,
14043 };
14044
14045 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
14046                                                 struct rtnl_link_stats64 *stats)
14047 {
14048         struct tg3 *tp = netdev_priv(dev);
14049
14050         spin_lock_bh(&tp->lock);
14051         if (!tp->hw_stats) {
14052                 spin_unlock_bh(&tp->lock);
14053                 return &tp->net_stats_prev;
14054         }
14055
14056         tg3_get_nstats(tp, stats);
14057         spin_unlock_bh(&tp->lock);
14058
14059         return stats;
14060 }
14061
14062 static void tg3_set_rx_mode(struct net_device *dev)
14063 {
14064         struct tg3 *tp = netdev_priv(dev);
14065
14066         if (!netif_running(dev))
14067                 return;
14068
14069         tg3_full_lock(tp, 0);
14070         __tg3_set_rx_mode(dev);
14071         tg3_full_unlock(tp);
14072 }
14073
14074 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14075                                int new_mtu)
14076 {
14077         dev->mtu = new_mtu;
14078
14079         if (new_mtu > ETH_DATA_LEN) {
14080                 if (tg3_flag(tp, 5780_CLASS)) {
14081                         netdev_update_features(dev);
14082                         tg3_flag_clear(tp, TSO_CAPABLE);
14083                 } else {
14084                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
14085                 }
14086         } else {
14087                 if (tg3_flag(tp, 5780_CLASS)) {
14088                         tg3_flag_set(tp, TSO_CAPABLE);
14089                         netdev_update_features(dev);
14090                 }
14091                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14092         }
14093 }
14094
14095 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14096 {
14097         struct tg3 *tp = netdev_priv(dev);
14098         int err;
14099         bool reset_phy = false;
14100
14101         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
14102                 return -EINVAL;
14103
14104         if (!netif_running(dev)) {
14105                 /* We'll just catch it later when the
14106                  * device is up'd.
14107                  */
14108                 tg3_set_mtu(dev, tp, new_mtu);
14109                 return 0;
14110         }
14111
14112         tg3_phy_stop(tp);
14113
14114         tg3_netif_stop(tp);
14115
14116         tg3_full_lock(tp, 1);
14117
14118         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14119
14120         tg3_set_mtu(dev, tp, new_mtu);
14121
14122         /* Reset PHY, otherwise the read DMA engine will be in a mode that
14123          * breaks all requests to 256 bytes.
14124          */
14125         if (tg3_asic_rev(tp) == ASIC_REV_57766)
14126                 reset_phy = true;
14127
14128         err = tg3_restart_hw(tp, reset_phy);
14129
14130         if (!err)
14131                 tg3_netif_start(tp);
14132
14133         tg3_full_unlock(tp);
14134
14135         if (!err)
14136                 tg3_phy_start(tp);
14137
14138         return err;
14139 }
14140
14141 static const struct net_device_ops tg3_netdev_ops = {
14142         .ndo_open               = tg3_open,
14143         .ndo_stop               = tg3_close,
14144         .ndo_start_xmit         = tg3_start_xmit,
14145         .ndo_get_stats64        = tg3_get_stats64,
14146         .ndo_validate_addr      = eth_validate_addr,
14147         .ndo_set_rx_mode        = tg3_set_rx_mode,
14148         .ndo_set_mac_address    = tg3_set_mac_addr,
14149         .ndo_do_ioctl           = tg3_ioctl,
14150         .ndo_tx_timeout         = tg3_tx_timeout,
14151         .ndo_change_mtu         = tg3_change_mtu,
14152         .ndo_fix_features       = tg3_fix_features,
14153         .ndo_set_features       = tg3_set_features,
14154 #ifdef CONFIG_NET_POLL_CONTROLLER
14155         .ndo_poll_controller    = tg3_poll_controller,
14156 #endif
14157 };
14158
14159 static void tg3_get_eeprom_size(struct tg3 *tp)
14160 {
14161         u32 cursize, val, magic;
14162
14163         tp->nvram_size = EEPROM_CHIP_SIZE;
14164
14165         if (tg3_nvram_read(tp, 0, &magic) != 0)
14166                 return;
14167
14168         if ((magic != TG3_EEPROM_MAGIC) &&
14169             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14170             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14171                 return;
14172
14173         /*
14174          * Size the chip by reading offsets at increasing powers of two.
14175          * When we encounter our validation signature, we know the addressing
14176          * has wrapped around, and thus have our chip size.
14177          */
14178         cursize = 0x10;
14179
14180         while (cursize < tp->nvram_size) {
14181                 if (tg3_nvram_read(tp, cursize, &val) != 0)
14182                         return;
14183
14184                 if (val == magic)
14185                         break;
14186
14187                 cursize <<= 1;
14188         }
14189
14190         tp->nvram_size = cursize;
14191 }
14192
14193 static void tg3_get_nvram_size(struct tg3 *tp)
14194 {
14195         u32 val;
14196
14197         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14198                 return;
14199
14200         /* Selfboot format */
14201         if (val != TG3_EEPROM_MAGIC) {
14202                 tg3_get_eeprom_size(tp);
14203                 return;
14204         }
14205
14206         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14207                 if (val != 0) {
14208                         /* This is confusing.  We want to operate on the
14209                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14210                          * call will read from NVRAM and byteswap the data
14211                          * according to the byteswapping settings for all
14212                          * other register accesses.  This ensures the data we
14213                          * want will always reside in the lower 16-bits.
14214                          * However, the data in NVRAM is in LE format, which
14215                          * means the data from the NVRAM read will always be
14216                          * opposite the endianness of the CPU.  The 16-bit
14217                          * byteswap then brings the data to CPU endianness.
14218                          */
14219                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14220                         return;
14221                 }
14222         }
14223         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14224 }
14225
14226 static void tg3_get_nvram_info(struct tg3 *tp)
14227 {
14228         u32 nvcfg1;
14229
14230         nvcfg1 = tr32(NVRAM_CFG1);
14231         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14232                 tg3_flag_set(tp, FLASH);
14233         } else {
14234                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14235                 tw32(NVRAM_CFG1, nvcfg1);
14236         }
14237
14238         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14239             tg3_flag(tp, 5780_CLASS)) {
14240                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14241                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14242                         tp->nvram_jedecnum = JEDEC_ATMEL;
14243                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14244                         tg3_flag_set(tp, NVRAM_BUFFERED);
14245                         break;
14246                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14247                         tp->nvram_jedecnum = JEDEC_ATMEL;
14248                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14249                         break;
14250                 case FLASH_VENDOR_ATMEL_EEPROM:
14251                         tp->nvram_jedecnum = JEDEC_ATMEL;
14252                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14253                         tg3_flag_set(tp, NVRAM_BUFFERED);
14254                         break;
14255                 case FLASH_VENDOR_ST:
14256                         tp->nvram_jedecnum = JEDEC_ST;
14257                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14258                         tg3_flag_set(tp, NVRAM_BUFFERED);
14259                         break;
14260                 case FLASH_VENDOR_SAIFUN:
14261                         tp->nvram_jedecnum = JEDEC_SAIFUN;
14262                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14263                         break;
14264                 case FLASH_VENDOR_SST_SMALL:
14265                 case FLASH_VENDOR_SST_LARGE:
14266                         tp->nvram_jedecnum = JEDEC_SST;
14267                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14268                         break;
14269                 }
14270         } else {
14271                 tp->nvram_jedecnum = JEDEC_ATMEL;
14272                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14273                 tg3_flag_set(tp, NVRAM_BUFFERED);
14274         }
14275 }
14276
14277 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14278 {
14279         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14280         case FLASH_5752PAGE_SIZE_256:
14281                 tp->nvram_pagesize = 256;
14282                 break;
14283         case FLASH_5752PAGE_SIZE_512:
14284                 tp->nvram_pagesize = 512;
14285                 break;
14286         case FLASH_5752PAGE_SIZE_1K:
14287                 tp->nvram_pagesize = 1024;
14288                 break;
14289         case FLASH_5752PAGE_SIZE_2K:
14290                 tp->nvram_pagesize = 2048;
14291                 break;
14292         case FLASH_5752PAGE_SIZE_4K:
14293                 tp->nvram_pagesize = 4096;
14294                 break;
14295         case FLASH_5752PAGE_SIZE_264:
14296                 tp->nvram_pagesize = 264;
14297                 break;
14298         case FLASH_5752PAGE_SIZE_528:
14299                 tp->nvram_pagesize = 528;
14300                 break;
14301         }
14302 }
14303
14304 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14305 {
14306         u32 nvcfg1;
14307
14308         nvcfg1 = tr32(NVRAM_CFG1);
14309
14310         /* NVRAM protection for TPM */
14311         if (nvcfg1 & (1 << 27))
14312                 tg3_flag_set(tp, PROTECTED_NVRAM);
14313
14314         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14315         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14316         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14317                 tp->nvram_jedecnum = JEDEC_ATMEL;
14318                 tg3_flag_set(tp, NVRAM_BUFFERED);
14319                 break;
14320         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14321                 tp->nvram_jedecnum = JEDEC_ATMEL;
14322                 tg3_flag_set(tp, NVRAM_BUFFERED);
14323                 tg3_flag_set(tp, FLASH);
14324                 break;
14325         case FLASH_5752VENDOR_ST_M45PE10:
14326         case FLASH_5752VENDOR_ST_M45PE20:
14327         case FLASH_5752VENDOR_ST_M45PE40:
14328                 tp->nvram_jedecnum = JEDEC_ST;
14329                 tg3_flag_set(tp, NVRAM_BUFFERED);
14330                 tg3_flag_set(tp, FLASH);
14331                 break;
14332         }
14333
14334         if (tg3_flag(tp, FLASH)) {
14335                 tg3_nvram_get_pagesize(tp, nvcfg1);
14336         } else {
14337                 /* For eeprom, set pagesize to maximum eeprom size */
14338                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14339
14340                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14341                 tw32(NVRAM_CFG1, nvcfg1);
14342         }
14343 }
14344
14345 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14346 {
14347         u32 nvcfg1, protect = 0;
14348
14349         nvcfg1 = tr32(NVRAM_CFG1);
14350
14351         /* NVRAM protection for TPM */
14352         if (nvcfg1 & (1 << 27)) {
14353                 tg3_flag_set(tp, PROTECTED_NVRAM);
14354                 protect = 1;
14355         }
14356
14357         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14358         switch (nvcfg1) {
14359         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14360         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14361         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14362         case FLASH_5755VENDOR_ATMEL_FLASH_5:
14363                 tp->nvram_jedecnum = JEDEC_ATMEL;
14364                 tg3_flag_set(tp, NVRAM_BUFFERED);
14365                 tg3_flag_set(tp, FLASH);
14366                 tp->nvram_pagesize = 264;
14367                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14368                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14369                         tp->nvram_size = (protect ? 0x3e200 :
14370                                           TG3_NVRAM_SIZE_512KB);
14371                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14372                         tp->nvram_size = (protect ? 0x1f200 :
14373                                           TG3_NVRAM_SIZE_256KB);
14374                 else
14375                         tp->nvram_size = (protect ? 0x1f200 :
14376                                           TG3_NVRAM_SIZE_128KB);
14377                 break;
14378         case FLASH_5752VENDOR_ST_M45PE10:
14379         case FLASH_5752VENDOR_ST_M45PE20:
14380         case FLASH_5752VENDOR_ST_M45PE40:
14381                 tp->nvram_jedecnum = JEDEC_ST;
14382                 tg3_flag_set(tp, NVRAM_BUFFERED);
14383                 tg3_flag_set(tp, FLASH);
14384                 tp->nvram_pagesize = 256;
14385                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14386                         tp->nvram_size = (protect ?
14387                                           TG3_NVRAM_SIZE_64KB :
14388                                           TG3_NVRAM_SIZE_128KB);
14389                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14390                         tp->nvram_size = (protect ?
14391                                           TG3_NVRAM_SIZE_64KB :
14392                                           TG3_NVRAM_SIZE_256KB);
14393                 else
14394                         tp->nvram_size = (protect ?
14395                                           TG3_NVRAM_SIZE_128KB :
14396                                           TG3_NVRAM_SIZE_512KB);
14397                 break;
14398         }
14399 }
14400
14401 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14402 {
14403         u32 nvcfg1;
14404
14405         nvcfg1 = tr32(NVRAM_CFG1);
14406
14407         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14408         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14409         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14410         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14411         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14412                 tp->nvram_jedecnum = JEDEC_ATMEL;
14413                 tg3_flag_set(tp, NVRAM_BUFFERED);
14414                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14415
14416                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14417                 tw32(NVRAM_CFG1, nvcfg1);
14418                 break;
14419         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14420         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14421         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14422         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14423                 tp->nvram_jedecnum = JEDEC_ATMEL;
14424                 tg3_flag_set(tp, NVRAM_BUFFERED);
14425                 tg3_flag_set(tp, FLASH);
14426                 tp->nvram_pagesize = 264;
14427                 break;
14428         case FLASH_5752VENDOR_ST_M45PE10:
14429         case FLASH_5752VENDOR_ST_M45PE20:
14430         case FLASH_5752VENDOR_ST_M45PE40:
14431                 tp->nvram_jedecnum = JEDEC_ST;
14432                 tg3_flag_set(tp, NVRAM_BUFFERED);
14433                 tg3_flag_set(tp, FLASH);
14434                 tp->nvram_pagesize = 256;
14435                 break;
14436         }
14437 }
14438
14439 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14440 {
14441         u32 nvcfg1, protect = 0;
14442
14443         nvcfg1 = tr32(NVRAM_CFG1);
14444
14445         /* NVRAM protection for TPM */
14446         if (nvcfg1 & (1 << 27)) {
14447                 tg3_flag_set(tp, PROTECTED_NVRAM);
14448                 protect = 1;
14449         }
14450
14451         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14452         switch (nvcfg1) {
14453         case FLASH_5761VENDOR_ATMEL_ADB021D:
14454         case FLASH_5761VENDOR_ATMEL_ADB041D:
14455         case FLASH_5761VENDOR_ATMEL_ADB081D:
14456         case FLASH_5761VENDOR_ATMEL_ADB161D:
14457         case FLASH_5761VENDOR_ATMEL_MDB021D:
14458         case FLASH_5761VENDOR_ATMEL_MDB041D:
14459         case FLASH_5761VENDOR_ATMEL_MDB081D:
14460         case FLASH_5761VENDOR_ATMEL_MDB161D:
14461                 tp->nvram_jedecnum = JEDEC_ATMEL;
14462                 tg3_flag_set(tp, NVRAM_BUFFERED);
14463                 tg3_flag_set(tp, FLASH);
14464                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14465                 tp->nvram_pagesize = 256;
14466                 break;
14467         case FLASH_5761VENDOR_ST_A_M45PE20:
14468         case FLASH_5761VENDOR_ST_A_M45PE40:
14469         case FLASH_5761VENDOR_ST_A_M45PE80:
14470         case FLASH_5761VENDOR_ST_A_M45PE16:
14471         case FLASH_5761VENDOR_ST_M_M45PE20:
14472         case FLASH_5761VENDOR_ST_M_M45PE40:
14473         case FLASH_5761VENDOR_ST_M_M45PE80:
14474         case FLASH_5761VENDOR_ST_M_M45PE16:
14475                 tp->nvram_jedecnum = JEDEC_ST;
14476                 tg3_flag_set(tp, NVRAM_BUFFERED);
14477                 tg3_flag_set(tp, FLASH);
14478                 tp->nvram_pagesize = 256;
14479                 break;
14480         }
14481
14482         if (protect) {
14483                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14484         } else {
14485                 switch (nvcfg1) {
14486                 case FLASH_5761VENDOR_ATMEL_ADB161D:
14487                 case FLASH_5761VENDOR_ATMEL_MDB161D:
14488                 case FLASH_5761VENDOR_ST_A_M45PE16:
14489                 case FLASH_5761VENDOR_ST_M_M45PE16:
14490                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14491                         break;
14492                 case FLASH_5761VENDOR_ATMEL_ADB081D:
14493                 case FLASH_5761VENDOR_ATMEL_MDB081D:
14494                 case FLASH_5761VENDOR_ST_A_M45PE80:
14495                 case FLASH_5761VENDOR_ST_M_M45PE80:
14496                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14497                         break;
14498                 case FLASH_5761VENDOR_ATMEL_ADB041D:
14499                 case FLASH_5761VENDOR_ATMEL_MDB041D:
14500                 case FLASH_5761VENDOR_ST_A_M45PE40:
14501                 case FLASH_5761VENDOR_ST_M_M45PE40:
14502                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14503                         break;
14504                 case FLASH_5761VENDOR_ATMEL_ADB021D:
14505                 case FLASH_5761VENDOR_ATMEL_MDB021D:
14506                 case FLASH_5761VENDOR_ST_A_M45PE20:
14507                 case FLASH_5761VENDOR_ST_M_M45PE20:
14508                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14509                         break;
14510                 }
14511         }
14512 }
14513
14514 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14515 {
14516         tp->nvram_jedecnum = JEDEC_ATMEL;
14517         tg3_flag_set(tp, NVRAM_BUFFERED);
14518         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14519 }
14520
14521 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14522 {
14523         u32 nvcfg1;
14524
14525         nvcfg1 = tr32(NVRAM_CFG1);
14526
14527         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14528         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14529         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14530                 tp->nvram_jedecnum = JEDEC_ATMEL;
14531                 tg3_flag_set(tp, NVRAM_BUFFERED);
14532                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14533
14534                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14535                 tw32(NVRAM_CFG1, nvcfg1);
14536                 return;
14537         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14538         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14539         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14540         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14541         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14542         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14543         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14544                 tp->nvram_jedecnum = JEDEC_ATMEL;
14545                 tg3_flag_set(tp, NVRAM_BUFFERED);
14546                 tg3_flag_set(tp, FLASH);
14547
14548                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14549                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14550                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14551                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14552                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14553                         break;
14554                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14555                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14556                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14557                         break;
14558                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14559                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14560                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14561                         break;
14562                 }
14563                 break;
14564         case FLASH_5752VENDOR_ST_M45PE10:
14565         case FLASH_5752VENDOR_ST_M45PE20:
14566         case FLASH_5752VENDOR_ST_M45PE40:
14567                 tp->nvram_jedecnum = JEDEC_ST;
14568                 tg3_flag_set(tp, NVRAM_BUFFERED);
14569                 tg3_flag_set(tp, FLASH);
14570
14571                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14572                 case FLASH_5752VENDOR_ST_M45PE10:
14573                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14574                         break;
14575                 case FLASH_5752VENDOR_ST_M45PE20:
14576                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14577                         break;
14578                 case FLASH_5752VENDOR_ST_M45PE40:
14579                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14580                         break;
14581                 }
14582                 break;
14583         default:
14584                 tg3_flag_set(tp, NO_NVRAM);
14585                 return;
14586         }
14587
14588         tg3_nvram_get_pagesize(tp, nvcfg1);
14589         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14590                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14591 }
14592
14593
14594 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14595 {
14596         u32 nvcfg1;
14597
14598         nvcfg1 = tr32(NVRAM_CFG1);
14599
14600         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14601         case FLASH_5717VENDOR_ATMEL_EEPROM:
14602         case FLASH_5717VENDOR_MICRO_EEPROM:
14603                 tp->nvram_jedecnum = JEDEC_ATMEL;
14604                 tg3_flag_set(tp, NVRAM_BUFFERED);
14605                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14606
14607                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14608                 tw32(NVRAM_CFG1, nvcfg1);
14609                 return;
14610         case FLASH_5717VENDOR_ATMEL_MDB011D:
14611         case FLASH_5717VENDOR_ATMEL_ADB011B:
14612         case FLASH_5717VENDOR_ATMEL_ADB011D:
14613         case FLASH_5717VENDOR_ATMEL_MDB021D:
14614         case FLASH_5717VENDOR_ATMEL_ADB021B:
14615         case FLASH_5717VENDOR_ATMEL_ADB021D:
14616         case FLASH_5717VENDOR_ATMEL_45USPT:
14617                 tp->nvram_jedecnum = JEDEC_ATMEL;
14618                 tg3_flag_set(tp, NVRAM_BUFFERED);
14619                 tg3_flag_set(tp, FLASH);
14620
14621                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14622                 case FLASH_5717VENDOR_ATMEL_MDB021D:
14623                         /* Detect size with tg3_nvram_get_size() */
14624                         break;
14625                 case FLASH_5717VENDOR_ATMEL_ADB021B:
14626                 case FLASH_5717VENDOR_ATMEL_ADB021D:
14627                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14628                         break;
14629                 default:
14630                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14631                         break;
14632                 }
14633                 break;
14634         case FLASH_5717VENDOR_ST_M_M25PE10:
14635         case FLASH_5717VENDOR_ST_A_M25PE10:
14636         case FLASH_5717VENDOR_ST_M_M45PE10:
14637         case FLASH_5717VENDOR_ST_A_M45PE10:
14638         case FLASH_5717VENDOR_ST_M_M25PE20:
14639         case FLASH_5717VENDOR_ST_A_M25PE20:
14640         case FLASH_5717VENDOR_ST_M_M45PE20:
14641         case FLASH_5717VENDOR_ST_A_M45PE20:
14642         case FLASH_5717VENDOR_ST_25USPT:
14643         case FLASH_5717VENDOR_ST_45USPT:
14644                 tp->nvram_jedecnum = JEDEC_ST;
14645                 tg3_flag_set(tp, NVRAM_BUFFERED);
14646                 tg3_flag_set(tp, FLASH);
14647
14648                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14649                 case FLASH_5717VENDOR_ST_M_M25PE20:
14650                 case FLASH_5717VENDOR_ST_M_M45PE20:
14651                         /* Detect size with tg3_nvram_get_size() */
14652                         break;
14653                 case FLASH_5717VENDOR_ST_A_M25PE20:
14654                 case FLASH_5717VENDOR_ST_A_M45PE20:
14655                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14656                         break;
14657                 default:
14658                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14659                         break;
14660                 }
14661                 break;
14662         default:
14663                 tg3_flag_set(tp, NO_NVRAM);
14664                 return;
14665         }
14666
14667         tg3_nvram_get_pagesize(tp, nvcfg1);
14668         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14669                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14670 }
14671
14672 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14673 {
14674         u32 nvcfg1, nvmpinstrp;
14675
14676         nvcfg1 = tr32(NVRAM_CFG1);
14677         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14678
14679         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14680                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14681                         tg3_flag_set(tp, NO_NVRAM);
14682                         return;
14683                 }
14684
14685                 switch (nvmpinstrp) {
14686                 case FLASH_5762_EEPROM_HD:
14687                         nvmpinstrp = FLASH_5720_EEPROM_HD;
14688                         break;
14689                 case FLASH_5762_EEPROM_LD:
14690                         nvmpinstrp = FLASH_5720_EEPROM_LD;
14691                         break;
14692                 case FLASH_5720VENDOR_M_ST_M45PE20:
14693                         /* This pinstrap supports multiple sizes, so force it
14694                          * to read the actual size from location 0xf0.
14695                          */
14696                         nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14697                         break;
14698                 }
14699         }
14700
14701         switch (nvmpinstrp) {
14702         case FLASH_5720_EEPROM_HD:
14703         case FLASH_5720_EEPROM_LD:
14704                 tp->nvram_jedecnum = JEDEC_ATMEL;
14705                 tg3_flag_set(tp, NVRAM_BUFFERED);
14706
14707                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14708                 tw32(NVRAM_CFG1, nvcfg1);
14709                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14710                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14711                 else
14712                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14713                 return;
14714         case FLASH_5720VENDOR_M_ATMEL_DB011D:
14715         case FLASH_5720VENDOR_A_ATMEL_DB011B:
14716         case FLASH_5720VENDOR_A_ATMEL_DB011D:
14717         case FLASH_5720VENDOR_M_ATMEL_DB021D:
14718         case FLASH_5720VENDOR_A_ATMEL_DB021B:
14719         case FLASH_5720VENDOR_A_ATMEL_DB021D:
14720         case FLASH_5720VENDOR_M_ATMEL_DB041D:
14721         case FLASH_5720VENDOR_A_ATMEL_DB041B:
14722         case FLASH_5720VENDOR_A_ATMEL_DB041D:
14723         case FLASH_5720VENDOR_M_ATMEL_DB081D:
14724         case FLASH_5720VENDOR_A_ATMEL_DB081D:
14725         case FLASH_5720VENDOR_ATMEL_45USPT:
14726                 tp->nvram_jedecnum = JEDEC_ATMEL;
14727                 tg3_flag_set(tp, NVRAM_BUFFERED);
14728                 tg3_flag_set(tp, FLASH);
14729
14730                 switch (nvmpinstrp) {
14731                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14732                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14733                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14734                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14735                         break;
14736                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14737                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14738                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14739                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14740                         break;
14741                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14742                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14743                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14744                         break;
14745                 default:
14746                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14747                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14748                         break;
14749                 }
14750                 break;
14751         case FLASH_5720VENDOR_M_ST_M25PE10:
14752         case FLASH_5720VENDOR_M_ST_M45PE10:
14753         case FLASH_5720VENDOR_A_ST_M25PE10:
14754         case FLASH_5720VENDOR_A_ST_M45PE10:
14755         case FLASH_5720VENDOR_M_ST_M25PE20:
14756         case FLASH_5720VENDOR_M_ST_M45PE20:
14757         case FLASH_5720VENDOR_A_ST_M25PE20:
14758         case FLASH_5720VENDOR_A_ST_M45PE20:
14759         case FLASH_5720VENDOR_M_ST_M25PE40:
14760         case FLASH_5720VENDOR_M_ST_M45PE40:
14761         case FLASH_5720VENDOR_A_ST_M25PE40:
14762         case FLASH_5720VENDOR_A_ST_M45PE40:
14763         case FLASH_5720VENDOR_M_ST_M25PE80:
14764         case FLASH_5720VENDOR_M_ST_M45PE80:
14765         case FLASH_5720VENDOR_A_ST_M25PE80:
14766         case FLASH_5720VENDOR_A_ST_M45PE80:
14767         case FLASH_5720VENDOR_ST_25USPT:
14768         case FLASH_5720VENDOR_ST_45USPT:
14769                 tp->nvram_jedecnum = JEDEC_ST;
14770                 tg3_flag_set(tp, NVRAM_BUFFERED);
14771                 tg3_flag_set(tp, FLASH);
14772
14773                 switch (nvmpinstrp) {
14774                 case FLASH_5720VENDOR_M_ST_M25PE20:
14775                 case FLASH_5720VENDOR_M_ST_M45PE20:
14776                 case FLASH_5720VENDOR_A_ST_M25PE20:
14777                 case FLASH_5720VENDOR_A_ST_M45PE20:
14778                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14779                         break;
14780                 case FLASH_5720VENDOR_M_ST_M25PE40:
14781                 case FLASH_5720VENDOR_M_ST_M45PE40:
14782                 case FLASH_5720VENDOR_A_ST_M25PE40:
14783                 case FLASH_5720VENDOR_A_ST_M45PE40:
14784                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14785                         break;
14786                 case FLASH_5720VENDOR_M_ST_M25PE80:
14787                 case FLASH_5720VENDOR_M_ST_M45PE80:
14788                 case FLASH_5720VENDOR_A_ST_M25PE80:
14789                 case FLASH_5720VENDOR_A_ST_M45PE80:
14790                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14791                         break;
14792                 default:
14793                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14794                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14795                         break;
14796                 }
14797                 break;
14798         default:
14799                 tg3_flag_set(tp, NO_NVRAM);
14800                 return;
14801         }
14802
14803         tg3_nvram_get_pagesize(tp, nvcfg1);
14804         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14805                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14806
14807         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14808                 u32 val;
14809
14810                 if (tg3_nvram_read(tp, 0, &val))
14811                         return;
14812
14813                 if (val != TG3_EEPROM_MAGIC &&
14814                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14815                         tg3_flag_set(tp, NO_NVRAM);
14816         }
14817 }
14818
14819 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14820 static void tg3_nvram_init(struct tg3 *tp)
14821 {
14822         if (tg3_flag(tp, IS_SSB_CORE)) {
14823                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14824                 tg3_flag_clear(tp, NVRAM);
14825                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14826                 tg3_flag_set(tp, NO_NVRAM);
14827                 return;
14828         }
14829
14830         tw32_f(GRC_EEPROM_ADDR,
14831              (EEPROM_ADDR_FSM_RESET |
14832               (EEPROM_DEFAULT_CLOCK_PERIOD <<
14833                EEPROM_ADDR_CLKPERD_SHIFT)));
14834
14835         msleep(1);
14836
14837         /* Enable seeprom accesses. */
14838         tw32_f(GRC_LOCAL_CTRL,
14839              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14840         udelay(100);
14841
14842         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14843             tg3_asic_rev(tp) != ASIC_REV_5701) {
14844                 tg3_flag_set(tp, NVRAM);
14845
14846                 if (tg3_nvram_lock(tp)) {
14847                         netdev_warn(tp->dev,
14848                                     "Cannot get nvram lock, %s failed\n",
14849                                     __func__);
14850                         return;
14851                 }
14852                 tg3_enable_nvram_access(tp);
14853
14854                 tp->nvram_size = 0;
14855
14856                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14857                         tg3_get_5752_nvram_info(tp);
14858                 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14859                         tg3_get_5755_nvram_info(tp);
14860                 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14861                          tg3_asic_rev(tp) == ASIC_REV_5784 ||
14862                          tg3_asic_rev(tp) == ASIC_REV_5785)
14863                         tg3_get_5787_nvram_info(tp);
14864                 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14865                         tg3_get_5761_nvram_info(tp);
14866                 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14867                         tg3_get_5906_nvram_info(tp);
14868                 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14869                          tg3_flag(tp, 57765_CLASS))
14870                         tg3_get_57780_nvram_info(tp);
14871                 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14872                          tg3_asic_rev(tp) == ASIC_REV_5719)
14873                         tg3_get_5717_nvram_info(tp);
14874                 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14875                          tg3_asic_rev(tp) == ASIC_REV_5762)
14876                         tg3_get_5720_nvram_info(tp);
14877                 else
14878                         tg3_get_nvram_info(tp);
14879
14880                 if (tp->nvram_size == 0)
14881                         tg3_get_nvram_size(tp);
14882
14883                 tg3_disable_nvram_access(tp);
14884                 tg3_nvram_unlock(tp);
14885
14886         } else {
14887                 tg3_flag_clear(tp, NVRAM);
14888                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14889
14890                 tg3_get_eeprom_size(tp);
14891         }
14892 }
14893
14894 struct subsys_tbl_ent {
14895         u16 subsys_vendor, subsys_devid;
14896         u32 phy_id;
14897 };
14898
14899 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14900         /* Broadcom boards. */
14901         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14902           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14903         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14904           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14905         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14906           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14907         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14908           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14909         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14910           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14911         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14912           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14913         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14914           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14915         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14916           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14917         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14918           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14919         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14920           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14921         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14922           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14923
14924         /* 3com boards. */
14925         { TG3PCI_SUBVENDOR_ID_3COM,
14926           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14927         { TG3PCI_SUBVENDOR_ID_3COM,
14928           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14929         { TG3PCI_SUBVENDOR_ID_3COM,
14930           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14931         { TG3PCI_SUBVENDOR_ID_3COM,
14932           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14933         { TG3PCI_SUBVENDOR_ID_3COM,
14934           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14935
14936         /* DELL boards. */
14937         { TG3PCI_SUBVENDOR_ID_DELL,
14938           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14939         { TG3PCI_SUBVENDOR_ID_DELL,
14940           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14941         { TG3PCI_SUBVENDOR_ID_DELL,
14942           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14943         { TG3PCI_SUBVENDOR_ID_DELL,
14944           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14945
14946         /* Compaq boards. */
14947         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14948           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14949         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14950           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14951         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14952           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14953         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14954           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14955         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14956           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14957
14958         /* IBM boards. */
14959         { TG3PCI_SUBVENDOR_ID_IBM,
14960           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14961 };
14962
14963 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14964 {
14965         int i;
14966
14967         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14968                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14969                      tp->pdev->subsystem_vendor) &&
14970                     (subsys_id_to_phy_id[i].subsys_devid ==
14971                      tp->pdev->subsystem_device))
14972                         return &subsys_id_to_phy_id[i];
14973         }
14974         return NULL;
14975 }
14976
14977 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14978 {
14979         u32 val;
14980
14981         tp->phy_id = TG3_PHY_ID_INVALID;
14982         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14983
14984         /* Assume an onboard device and WOL capable by default.  */
14985         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14986         tg3_flag_set(tp, WOL_CAP);
14987
14988         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14989                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14990                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14991                         tg3_flag_set(tp, IS_NIC);
14992                 }
14993                 val = tr32(VCPU_CFGSHDW);
14994                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14995                         tg3_flag_set(tp, ASPM_WORKAROUND);
14996                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14997                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14998                         tg3_flag_set(tp, WOL_ENABLE);
14999                         device_set_wakeup_enable(&tp->pdev->dev, true);
15000                 }
15001                 goto done;
15002         }
15003
15004         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15005         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15006                 u32 nic_cfg, led_cfg;
15007                 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15008                 u32 nic_phy_id, ver, eeprom_phy_id;
15009                 int eeprom_phy_serdes = 0;
15010
15011                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15012                 tp->nic_sram_data_cfg = nic_cfg;
15013
15014                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15015                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15016                 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15017                     tg3_asic_rev(tp) != ASIC_REV_5701 &&
15018                     tg3_asic_rev(tp) != ASIC_REV_5703 &&
15019                     (ver > 0) && (ver < 0x100))
15020                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15021
15022                 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15023                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15024
15025                 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15026                     tg3_asic_rev(tp) == ASIC_REV_5719 ||
15027                     tg3_asic_rev(tp) == ASIC_REV_5720)
15028                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15029
15030                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15031                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15032                         eeprom_phy_serdes = 1;
15033
15034                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15035                 if (nic_phy_id != 0) {
15036                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15037                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15038
15039                         eeprom_phy_id  = (id1 >> 16) << 10;
15040                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
15041                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
15042                 } else
15043                         eeprom_phy_id = 0;
15044
15045                 tp->phy_id = eeprom_phy_id;
15046                 if (eeprom_phy_serdes) {
15047                         if (!tg3_flag(tp, 5705_PLUS))
15048                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15049                         else
15050                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15051                 }
15052
15053                 if (tg3_flag(tp, 5750_PLUS))
15054                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15055                                     SHASTA_EXT_LED_MODE_MASK);
15056                 else
15057                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15058
15059                 switch (led_cfg) {
15060                 default:
15061                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15062                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15063                         break;
15064
15065                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15066                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15067                         break;
15068
15069                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15070                         tp->led_ctrl = LED_CTRL_MODE_MAC;
15071
15072                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15073                          * read on some older 5700/5701 bootcode.
15074                          */
15075                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15076                             tg3_asic_rev(tp) == ASIC_REV_5701)
15077                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15078
15079                         break;
15080
15081                 case SHASTA_EXT_LED_SHARED:
15082                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
15083                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15084                             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15085                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15086                                                  LED_CTRL_MODE_PHY_2);
15087
15088                         if (tg3_flag(tp, 5717_PLUS) ||
15089                             tg3_asic_rev(tp) == ASIC_REV_5762)
15090                                 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15091                                                 LED_CTRL_BLINK_RATE_MASK;
15092
15093                         break;
15094
15095                 case SHASTA_EXT_LED_MAC:
15096                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15097                         break;
15098
15099                 case SHASTA_EXT_LED_COMBO:
15100                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
15101                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15102                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15103                                                  LED_CTRL_MODE_PHY_2);
15104                         break;
15105
15106                 }
15107
15108                 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15109                      tg3_asic_rev(tp) == ASIC_REV_5701) &&
15110                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15111                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15112
15113                 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15114                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15115
15116                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15117                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15118                         if ((tp->pdev->subsystem_vendor ==
15119                              PCI_VENDOR_ID_ARIMA) &&
15120                             (tp->pdev->subsystem_device == 0x205a ||
15121                              tp->pdev->subsystem_device == 0x2063))
15122                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15123                 } else {
15124                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15125                         tg3_flag_set(tp, IS_NIC);
15126                 }
15127
15128                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15129                         tg3_flag_set(tp, ENABLE_ASF);
15130                         if (tg3_flag(tp, 5750_PLUS))
15131                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15132                 }
15133
15134                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15135                     tg3_flag(tp, 5750_PLUS))
15136                         tg3_flag_set(tp, ENABLE_APE);
15137
15138                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15139                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15140                         tg3_flag_clear(tp, WOL_CAP);
15141
15142                 if (tg3_flag(tp, WOL_CAP) &&
15143                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15144                         tg3_flag_set(tp, WOL_ENABLE);
15145                         device_set_wakeup_enable(&tp->pdev->dev, true);
15146                 }
15147
15148                 if (cfg2 & (1 << 17))
15149                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15150
15151                 /* serdes signal pre-emphasis in register 0x590 set by */
15152                 /* bootcode if bit 18 is set */
15153                 if (cfg2 & (1 << 18))
15154                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15155
15156                 if ((tg3_flag(tp, 57765_PLUS) ||
15157                      (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15158                       tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15159                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15160                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15161
15162                 if (tg3_flag(tp, PCI_EXPRESS)) {
15163                         u32 cfg3;
15164
15165                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15166                         if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15167                             !tg3_flag(tp, 57765_PLUS) &&
15168                             (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15169                                 tg3_flag_set(tp, ASPM_WORKAROUND);
15170                         if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15171                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15172                         if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15173                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15174                 }
15175
15176                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15177                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15178                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15179                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15180                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15181                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15182
15183                 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15184                         tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15185         }
15186 done:
15187         if (tg3_flag(tp, WOL_CAP))
15188                 device_set_wakeup_enable(&tp->pdev->dev,
15189                                          tg3_flag(tp, WOL_ENABLE));
15190         else
15191                 device_set_wakeup_capable(&tp->pdev->dev, false);
15192 }
15193
15194 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15195 {
15196         int i, err;
15197         u32 val2, off = offset * 8;
15198
15199         err = tg3_nvram_lock(tp);
15200         if (err)
15201                 return err;
15202
15203         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15204         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15205                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15206         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15207         udelay(10);
15208
15209         for (i = 0; i < 100; i++) {
15210                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15211                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15212                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15213                         break;
15214                 }
15215                 udelay(10);
15216         }
15217
15218         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15219
15220         tg3_nvram_unlock(tp);
15221         if (val2 & APE_OTP_STATUS_CMD_DONE)
15222                 return 0;
15223
15224         return -EBUSY;
15225 }
15226
15227 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15228 {
15229         int i;
15230         u32 val;
15231
15232         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15233         tw32(OTP_CTRL, cmd);
15234
15235         /* Wait for up to 1 ms for command to execute. */
15236         for (i = 0; i < 100; i++) {
15237                 val = tr32(OTP_STATUS);
15238                 if (val & OTP_STATUS_CMD_DONE)
15239                         break;
15240                 udelay(10);
15241         }
15242
15243         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15244 }
15245
15246 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15247  * configuration is a 32-bit value that straddles the alignment boundary.
15248  * We do two 32-bit reads and then shift and merge the results.
15249  */
15250 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15251 {
15252         u32 bhalf_otp, thalf_otp;
15253
15254         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15255
15256         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15257                 return 0;
15258
15259         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15260
15261         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15262                 return 0;
15263
15264         thalf_otp = tr32(OTP_READ_DATA);
15265
15266         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15267
15268         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15269                 return 0;
15270
15271         bhalf_otp = tr32(OTP_READ_DATA);
15272
15273         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15274 }
15275
15276 static void tg3_phy_init_link_config(struct tg3 *tp)
15277 {
15278         u32 adv = ADVERTISED_Autoneg;
15279
15280         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15281                 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15282                         adv |= ADVERTISED_1000baseT_Half;
15283                 adv |= ADVERTISED_1000baseT_Full;
15284         }
15285
15286         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15287                 adv |= ADVERTISED_100baseT_Half |
15288                        ADVERTISED_100baseT_Full |
15289                        ADVERTISED_10baseT_Half |
15290                        ADVERTISED_10baseT_Full |
15291                        ADVERTISED_TP;
15292         else
15293                 adv |= ADVERTISED_FIBRE;
15294
15295         tp->link_config.advertising = adv;
15296         tp->link_config.speed = SPEED_UNKNOWN;
15297         tp->link_config.duplex = DUPLEX_UNKNOWN;
15298         tp->link_config.autoneg = AUTONEG_ENABLE;
15299         tp->link_config.active_speed = SPEED_UNKNOWN;
15300         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15301
15302         tp->old_link = -1;
15303 }
15304
15305 static int tg3_phy_probe(struct tg3 *tp)
15306 {
15307         u32 hw_phy_id_1, hw_phy_id_2;
15308         u32 hw_phy_id, hw_phy_id_masked;
15309         int err;
15310
15311         /* flow control autonegotiation is default behavior */
15312         tg3_flag_set(tp, PAUSE_AUTONEG);
15313         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15314
15315         if (tg3_flag(tp, ENABLE_APE)) {
15316                 switch (tp->pci_fn) {
15317                 case 0:
15318                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15319                         break;
15320                 case 1:
15321                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15322                         break;
15323                 case 2:
15324                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15325                         break;
15326                 case 3:
15327                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15328                         break;
15329                 }
15330         }
15331
15332         if (!tg3_flag(tp, ENABLE_ASF) &&
15333             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15334             !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15335                 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15336                                    TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15337
15338         if (tg3_flag(tp, USE_PHYLIB))
15339                 return tg3_phy_init(tp);
15340
15341         /* Reading the PHY ID register can conflict with ASF
15342          * firmware access to the PHY hardware.
15343          */
15344         err = 0;
15345         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15346                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15347         } else {
15348                 /* Now read the physical PHY_ID from the chip and verify
15349                  * that it is sane.  If it doesn't look good, we fall back
15350                  * to either the hard-coded table based PHY_ID and failing
15351                  * that the value found in the eeprom area.
15352                  */
15353                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15354                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15355
15356                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15357                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15358                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15359
15360                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15361         }
15362
15363         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15364                 tp->phy_id = hw_phy_id;
15365                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15366                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15367                 else
15368                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15369         } else {
15370                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15371                         /* Do nothing, phy ID already set up in
15372                          * tg3_get_eeprom_hw_cfg().
15373                          */
15374                 } else {
15375                         struct subsys_tbl_ent *p;
15376
15377                         /* No eeprom signature?  Try the hardcoded
15378                          * subsys device table.
15379                          */
15380                         p = tg3_lookup_by_subsys(tp);
15381                         if (p) {
15382                                 tp->phy_id = p->phy_id;
15383                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15384                                 /* For now we saw the IDs 0xbc050cd0,
15385                                  * 0xbc050f80 and 0xbc050c30 on devices
15386                                  * connected to an BCM4785 and there are
15387                                  * probably more. Just assume that the phy is
15388                                  * supported when it is connected to a SSB core
15389                                  * for now.
15390                                  */
15391                                 return -ENODEV;
15392                         }
15393
15394                         if (!tp->phy_id ||
15395                             tp->phy_id == TG3_PHY_ID_BCM8002)
15396                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15397                 }
15398         }
15399
15400         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15401             (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15402              tg3_asic_rev(tp) == ASIC_REV_5720 ||
15403              tg3_asic_rev(tp) == ASIC_REV_57766 ||
15404              tg3_asic_rev(tp) == ASIC_REV_5762 ||
15405              (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15406               tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15407              (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15408               tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15409                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15410
15411                 tp->eee.supported = SUPPORTED_100baseT_Full |
15412                                     SUPPORTED_1000baseT_Full;
15413                 tp->eee.advertised = ADVERTISED_100baseT_Full |
15414                                      ADVERTISED_1000baseT_Full;
15415                 tp->eee.eee_enabled = 1;
15416                 tp->eee.tx_lpi_enabled = 1;
15417                 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15418         }
15419
15420         tg3_phy_init_link_config(tp);
15421
15422         if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15423             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15424             !tg3_flag(tp, ENABLE_APE) &&
15425             !tg3_flag(tp, ENABLE_ASF)) {
15426                 u32 bmsr, dummy;
15427
15428                 tg3_readphy(tp, MII_BMSR, &bmsr);
15429                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15430                     (bmsr & BMSR_LSTATUS))
15431                         goto skip_phy_reset;
15432
15433                 err = tg3_phy_reset(tp);
15434                 if (err)
15435                         return err;
15436
15437                 tg3_phy_set_wirespeed(tp);
15438
15439                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15440                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15441                                             tp->link_config.flowctrl);
15442
15443                         tg3_writephy(tp, MII_BMCR,
15444                                      BMCR_ANENABLE | BMCR_ANRESTART);
15445                 }
15446         }
15447
15448 skip_phy_reset:
15449         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15450                 err = tg3_init_5401phy_dsp(tp);
15451                 if (err)
15452                         return err;
15453
15454                 err = tg3_init_5401phy_dsp(tp);
15455         }
15456
15457         return err;
15458 }
15459
15460 static void tg3_read_vpd(struct tg3 *tp)
15461 {
15462         u8 *vpd_data;
15463         unsigned int block_end, rosize, len;
15464         u32 vpdlen;
15465         int j, i = 0;
15466
15467         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15468         if (!vpd_data)
15469                 goto out_no_vpd;
15470
15471         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15472         if (i < 0)
15473                 goto out_not_found;
15474
15475         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15476         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15477         i += PCI_VPD_LRDT_TAG_SIZE;
15478
15479         if (block_end > vpdlen)
15480                 goto out_not_found;
15481
15482         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15483                                       PCI_VPD_RO_KEYWORD_MFR_ID);
15484         if (j > 0) {
15485                 len = pci_vpd_info_field_size(&vpd_data[j]);
15486
15487                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15488                 if (j + len > block_end || len != 4 ||
15489                     memcmp(&vpd_data[j], "1028", 4))
15490                         goto partno;
15491
15492                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15493                                               PCI_VPD_RO_KEYWORD_VENDOR0);
15494                 if (j < 0)
15495                         goto partno;
15496
15497                 len = pci_vpd_info_field_size(&vpd_data[j]);
15498
15499                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15500                 if (j + len > block_end)
15501                         goto partno;
15502
15503                 if (len >= sizeof(tp->fw_ver))
15504                         len = sizeof(tp->fw_ver) - 1;
15505                 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15506                 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15507                          &vpd_data[j]);
15508         }
15509
15510 partno:
15511         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15512                                       PCI_VPD_RO_KEYWORD_PARTNO);
15513         if (i < 0)
15514                 goto out_not_found;
15515
15516         len = pci_vpd_info_field_size(&vpd_data[i]);
15517
15518         i += PCI_VPD_INFO_FLD_HDR_SIZE;
15519         if (len > TG3_BPN_SIZE ||
15520             (len + i) > vpdlen)
15521                 goto out_not_found;
15522
15523         memcpy(tp->board_part_number, &vpd_data[i], len);
15524
15525 out_not_found:
15526         kfree(vpd_data);
15527         if (tp->board_part_number[0])
15528                 return;
15529
15530 out_no_vpd:
15531         if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15532                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15533                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15534                         strcpy(tp->board_part_number, "BCM5717");
15535                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15536                         strcpy(tp->board_part_number, "BCM5718");
15537                 else
15538                         goto nomatch;
15539         } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15540                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15541                         strcpy(tp->board_part_number, "BCM57780");
15542                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15543                         strcpy(tp->board_part_number, "BCM57760");
15544                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15545                         strcpy(tp->board_part_number, "BCM57790");
15546                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15547                         strcpy(tp->board_part_number, "BCM57788");
15548                 else
15549                         goto nomatch;
15550         } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15551                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15552                         strcpy(tp->board_part_number, "BCM57761");
15553                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15554                         strcpy(tp->board_part_number, "BCM57765");
15555                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15556                         strcpy(tp->board_part_number, "BCM57781");
15557                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15558                         strcpy(tp->board_part_number, "BCM57785");
15559                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15560                         strcpy(tp->board_part_number, "BCM57791");
15561                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15562                         strcpy(tp->board_part_number, "BCM57795");
15563                 else
15564                         goto nomatch;
15565         } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15566                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15567                         strcpy(tp->board_part_number, "BCM57762");
15568                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15569                         strcpy(tp->board_part_number, "BCM57766");
15570                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15571                         strcpy(tp->board_part_number, "BCM57782");
15572                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15573                         strcpy(tp->board_part_number, "BCM57786");
15574                 else
15575                         goto nomatch;
15576         } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15577                 strcpy(tp->board_part_number, "BCM95906");
15578         } else {
15579 nomatch:
15580                 strcpy(tp->board_part_number, "none");
15581         }
15582 }
15583
15584 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15585 {
15586         u32 val;
15587
15588         if (tg3_nvram_read(tp, offset, &val) ||
15589             (val & 0xfc000000) != 0x0c000000 ||
15590             tg3_nvram_read(tp, offset + 4, &val) ||
15591             val != 0)
15592                 return 0;
15593
15594         return 1;
15595 }
15596
15597 static void tg3_read_bc_ver(struct tg3 *tp)
15598 {
15599         u32 val, offset, start, ver_offset;
15600         int i, dst_off;
15601         bool newver = false;
15602
15603         if (tg3_nvram_read(tp, 0xc, &offset) ||
15604             tg3_nvram_read(tp, 0x4, &start))
15605                 return;
15606
15607         offset = tg3_nvram_logical_addr(tp, offset);
15608
15609         if (tg3_nvram_read(tp, offset, &val))
15610                 return;
15611
15612         if ((val & 0xfc000000) == 0x0c000000) {
15613                 if (tg3_nvram_read(tp, offset + 4, &val))
15614                         return;
15615
15616                 if (val == 0)
15617                         newver = true;
15618         }
15619
15620         dst_off = strlen(tp->fw_ver);
15621
15622         if (newver) {
15623                 if (TG3_VER_SIZE - dst_off < 16 ||
15624                     tg3_nvram_read(tp, offset + 8, &ver_offset))
15625                         return;
15626
15627                 offset = offset + ver_offset - start;
15628                 for (i = 0; i < 16; i += 4) {
15629                         __be32 v;
15630                         if (tg3_nvram_read_be32(tp, offset + i, &v))
15631                                 return;
15632
15633                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15634                 }
15635         } else {
15636                 u32 major, minor;
15637
15638                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15639                         return;
15640
15641                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15642                         TG3_NVM_BCVER_MAJSFT;
15643                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15644                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15645                          "v%d.%02d", major, minor);
15646         }
15647 }
15648
15649 static void tg3_read_hwsb_ver(struct tg3 *tp)
15650 {
15651         u32 val, major, minor;
15652
15653         /* Use native endian representation */
15654         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15655                 return;
15656
15657         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15658                 TG3_NVM_HWSB_CFG1_MAJSFT;
15659         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15660                 TG3_NVM_HWSB_CFG1_MINSFT;
15661
15662         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15663 }
15664
15665 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15666 {
15667         u32 offset, major, minor, build;
15668
15669         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15670
15671         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15672                 return;
15673
15674         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15675         case TG3_EEPROM_SB_REVISION_0:
15676                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15677                 break;
15678         case TG3_EEPROM_SB_REVISION_2:
15679                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15680                 break;
15681         case TG3_EEPROM_SB_REVISION_3:
15682                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15683                 break;
15684         case TG3_EEPROM_SB_REVISION_4:
15685                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15686                 break;
15687         case TG3_EEPROM_SB_REVISION_5:
15688                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15689                 break;
15690         case TG3_EEPROM_SB_REVISION_6:
15691                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15692                 break;
15693         default:
15694                 return;
15695         }
15696
15697         if (tg3_nvram_read(tp, offset, &val))
15698                 return;
15699
15700         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15701                 TG3_EEPROM_SB_EDH_BLD_SHFT;
15702         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15703                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15704         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15705
15706         if (minor > 99 || build > 26)
15707                 return;
15708
15709         offset = strlen(tp->fw_ver);
15710         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15711                  " v%d.%02d", major, minor);
15712
15713         if (build > 0) {
15714                 offset = strlen(tp->fw_ver);
15715                 if (offset < TG3_VER_SIZE - 1)
15716                         tp->fw_ver[offset] = 'a' + build - 1;
15717         }
15718 }
15719
15720 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15721 {
15722         u32 val, offset, start;
15723         int i, vlen;
15724
15725         for (offset = TG3_NVM_DIR_START;
15726              offset < TG3_NVM_DIR_END;
15727              offset += TG3_NVM_DIRENT_SIZE) {
15728                 if (tg3_nvram_read(tp, offset, &val))
15729                         return;
15730
15731                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15732                         break;
15733         }
15734
15735         if (offset == TG3_NVM_DIR_END)
15736                 return;
15737
15738         if (!tg3_flag(tp, 5705_PLUS))
15739                 start = 0x08000000;
15740         else if (tg3_nvram_read(tp, offset - 4, &start))
15741                 return;
15742
15743         if (tg3_nvram_read(tp, offset + 4, &offset) ||
15744             !tg3_fw_img_is_valid(tp, offset) ||
15745             tg3_nvram_read(tp, offset + 8, &val))
15746                 return;
15747
15748         offset += val - start;
15749
15750         vlen = strlen(tp->fw_ver);
15751
15752         tp->fw_ver[vlen++] = ',';
15753         tp->fw_ver[vlen++] = ' ';
15754
15755         for (i = 0; i < 4; i++) {
15756                 __be32 v;
15757                 if (tg3_nvram_read_be32(tp, offset, &v))
15758                         return;
15759
15760                 offset += sizeof(v);
15761
15762                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15763                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15764                         break;
15765                 }
15766
15767                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15768                 vlen += sizeof(v);
15769         }
15770 }
15771
15772 static void tg3_probe_ncsi(struct tg3 *tp)
15773 {
15774         u32 apedata;
15775
15776         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15777         if (apedata != APE_SEG_SIG_MAGIC)
15778                 return;
15779
15780         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15781         if (!(apedata & APE_FW_STATUS_READY))
15782                 return;
15783
15784         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15785                 tg3_flag_set(tp, APE_HAS_NCSI);
15786 }
15787
15788 static void tg3_read_dash_ver(struct tg3 *tp)
15789 {
15790         int vlen;
15791         u32 apedata;
15792         char *fwtype;
15793
15794         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15795
15796         if (tg3_flag(tp, APE_HAS_NCSI))
15797                 fwtype = "NCSI";
15798         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15799                 fwtype = "SMASH";
15800         else
15801                 fwtype = "DASH";
15802
15803         vlen = strlen(tp->fw_ver);
15804
15805         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15806                  fwtype,
15807                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15808                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15809                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15810                  (apedata & APE_FW_VERSION_BLDMSK));
15811 }
15812
15813 static void tg3_read_otp_ver(struct tg3 *tp)
15814 {
15815         u32 val, val2;
15816
15817         if (tg3_asic_rev(tp) != ASIC_REV_5762)
15818                 return;
15819
15820         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15821             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15822             TG3_OTP_MAGIC0_VALID(val)) {
15823                 u64 val64 = (u64) val << 32 | val2;
15824                 u32 ver = 0;
15825                 int i, vlen;
15826
15827                 for (i = 0; i < 7; i++) {
15828                         if ((val64 & 0xff) == 0)
15829                                 break;
15830                         ver = val64 & 0xff;
15831                         val64 >>= 8;
15832                 }
15833                 vlen = strlen(tp->fw_ver);
15834                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15835         }
15836 }
15837
15838 static void tg3_read_fw_ver(struct tg3 *tp)
15839 {
15840         u32 val;
15841         bool vpd_vers = false;
15842
15843         if (tp->fw_ver[0] != 0)
15844                 vpd_vers = true;
15845
15846         if (tg3_flag(tp, NO_NVRAM)) {
15847                 strcat(tp->fw_ver, "sb");
15848                 tg3_read_otp_ver(tp);
15849                 return;
15850         }
15851
15852         if (tg3_nvram_read(tp, 0, &val))
15853                 return;
15854
15855         if (val == TG3_EEPROM_MAGIC)
15856                 tg3_read_bc_ver(tp);
15857         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15858                 tg3_read_sb_ver(tp, val);
15859         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15860                 tg3_read_hwsb_ver(tp);
15861
15862         if (tg3_flag(tp, ENABLE_ASF)) {
15863                 if (tg3_flag(tp, ENABLE_APE)) {
15864                         tg3_probe_ncsi(tp);
15865                         if (!vpd_vers)
15866                                 tg3_read_dash_ver(tp);
15867                 } else if (!vpd_vers) {
15868                         tg3_read_mgmtfw_ver(tp);
15869                 }
15870         }
15871
15872         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15873 }
15874
15875 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15876 {
15877         if (tg3_flag(tp, LRG_PROD_RING_CAP))
15878                 return TG3_RX_RET_MAX_SIZE_5717;
15879         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15880                 return TG3_RX_RET_MAX_SIZE_5700;
15881         else
15882                 return TG3_RX_RET_MAX_SIZE_5705;
15883 }
15884
15885 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15886         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15887         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15888         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15889         { },
15890 };
15891
15892 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15893 {
15894         struct pci_dev *peer;
15895         unsigned int func, devnr = tp->pdev->devfn & ~7;
15896
15897         for (func = 0; func < 8; func++) {
15898                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15899                 if (peer && peer != tp->pdev)
15900                         break;
15901                 pci_dev_put(peer);
15902         }
15903         /* 5704 can be configured in single-port mode, set peer to
15904          * tp->pdev in that case.
15905          */
15906         if (!peer) {
15907                 peer = tp->pdev;
15908                 return peer;
15909         }
15910
15911         /*
15912          * We don't need to keep the refcount elevated; there's no way
15913          * to remove one half of this device without removing the other
15914          */
15915         pci_dev_put(peer);
15916
15917         return peer;
15918 }
15919
15920 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15921 {
15922         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15923         if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15924                 u32 reg;
15925
15926                 /* All devices that use the alternate
15927                  * ASIC REV location have a CPMU.
15928                  */
15929                 tg3_flag_set(tp, CPMU_PRESENT);
15930
15931                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15932                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15933                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15934                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15935                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15936                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
15937                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
15938                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15939                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15940                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
15941                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
15942                         reg = TG3PCI_GEN2_PRODID_ASICREV;
15943                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15944                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15945                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15946                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15947                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15948                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15949                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15950                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15951                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15952                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15953                         reg = TG3PCI_GEN15_PRODID_ASICREV;
15954                 else
15955                         reg = TG3PCI_PRODID_ASICREV;
15956
15957                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15958         }
15959
15960         /* Wrong chip ID in 5752 A0. This code can be removed later
15961          * as A0 is not in production.
15962          */
15963         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15964                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15965
15966         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15967                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15968
15969         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15970             tg3_asic_rev(tp) == ASIC_REV_5719 ||
15971             tg3_asic_rev(tp) == ASIC_REV_5720)
15972                 tg3_flag_set(tp, 5717_PLUS);
15973
15974         if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15975             tg3_asic_rev(tp) == ASIC_REV_57766)
15976                 tg3_flag_set(tp, 57765_CLASS);
15977
15978         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15979              tg3_asic_rev(tp) == ASIC_REV_5762)
15980                 tg3_flag_set(tp, 57765_PLUS);
15981
15982         /* Intentionally exclude ASIC_REV_5906 */
15983         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15984             tg3_asic_rev(tp) == ASIC_REV_5787 ||
15985             tg3_asic_rev(tp) == ASIC_REV_5784 ||
15986             tg3_asic_rev(tp) == ASIC_REV_5761 ||
15987             tg3_asic_rev(tp) == ASIC_REV_5785 ||
15988             tg3_asic_rev(tp) == ASIC_REV_57780 ||
15989             tg3_flag(tp, 57765_PLUS))
15990                 tg3_flag_set(tp, 5755_PLUS);
15991
15992         if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15993             tg3_asic_rev(tp) == ASIC_REV_5714)
15994                 tg3_flag_set(tp, 5780_CLASS);
15995
15996         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15997             tg3_asic_rev(tp) == ASIC_REV_5752 ||
15998             tg3_asic_rev(tp) == ASIC_REV_5906 ||
15999             tg3_flag(tp, 5755_PLUS) ||
16000             tg3_flag(tp, 5780_CLASS))
16001                 tg3_flag_set(tp, 5750_PLUS);
16002
16003         if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16004             tg3_flag(tp, 5750_PLUS))
16005                 tg3_flag_set(tp, 5705_PLUS);
16006 }
16007
16008 static bool tg3_10_100_only_device(struct tg3 *tp,
16009                                    const struct pci_device_id *ent)
16010 {
16011         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16012
16013         if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16014              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16015             (tp->phy_flags & TG3_PHYFLG_IS_FET))
16016                 return true;
16017
16018         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16019                 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16020                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16021                                 return true;
16022                 } else {
16023                         return true;
16024                 }
16025         }
16026
16027         return false;
16028 }
16029
16030 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16031 {
16032         u32 misc_ctrl_reg;
16033         u32 pci_state_reg, grc_misc_cfg;
16034         u32 val;
16035         u16 pci_cmd;
16036         int err;
16037
16038         /* Force memory write invalidate off.  If we leave it on,
16039          * then on 5700_BX chips we have to enable a workaround.
16040          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16041          * to match the cacheline size.  The Broadcom driver have this
16042          * workaround but turns MWI off all the times so never uses
16043          * it.  This seems to suggest that the workaround is insufficient.
16044          */
16045         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16046         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16047         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16048
16049         /* Important! -- Make sure register accesses are byteswapped
16050          * correctly.  Also, for those chips that require it, make
16051          * sure that indirect register accesses are enabled before
16052          * the first operation.
16053          */
16054         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16055                               &misc_ctrl_reg);
16056         tp->misc_host_ctrl |= (misc_ctrl_reg &
16057                                MISC_HOST_CTRL_CHIPREV);
16058         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16059                                tp->misc_host_ctrl);
16060
16061         tg3_detect_asic_rev(tp, misc_ctrl_reg);
16062
16063         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16064          * we need to disable memory and use config. cycles
16065          * only to access all registers. The 5702/03 chips
16066          * can mistakenly decode the special cycles from the
16067          * ICH chipsets as memory write cycles, causing corruption
16068          * of register and memory space. Only certain ICH bridges
16069          * will drive special cycles with non-zero data during the
16070          * address phase which can fall within the 5703's address
16071          * range. This is not an ICH bug as the PCI spec allows
16072          * non-zero address during special cycles. However, only
16073          * these ICH bridges are known to drive non-zero addresses
16074          * during special cycles.
16075          *
16076          * Since special cycles do not cross PCI bridges, we only
16077          * enable this workaround if the 5703 is on the secondary
16078          * bus of these ICH bridges.
16079          */
16080         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16081             (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16082                 static struct tg3_dev_id {
16083                         u32     vendor;
16084                         u32     device;
16085                         u32     rev;
16086                 } ich_chipsets[] = {
16087                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16088                           PCI_ANY_ID },
16089                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16090                           PCI_ANY_ID },
16091                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16092                           0xa },
16093                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16094                           PCI_ANY_ID },
16095                         { },
16096                 };
16097                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16098                 struct pci_dev *bridge = NULL;
16099
16100                 while (pci_id->vendor != 0) {
16101                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
16102                                                 bridge);
16103                         if (!bridge) {
16104                                 pci_id++;
16105                                 continue;
16106                         }
16107                         if (pci_id->rev != PCI_ANY_ID) {
16108                                 if (bridge->revision > pci_id->rev)
16109                                         continue;
16110                         }
16111                         if (bridge->subordinate &&
16112                             (bridge->subordinate->number ==
16113                              tp->pdev->bus->number)) {
16114                                 tg3_flag_set(tp, ICH_WORKAROUND);
16115                                 pci_dev_put(bridge);
16116                                 break;
16117                         }
16118                 }
16119         }
16120
16121         if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16122                 static struct tg3_dev_id {
16123                         u32     vendor;
16124                         u32     device;
16125                 } bridge_chipsets[] = {
16126                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16127                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16128                         { },
16129                 };
16130                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16131                 struct pci_dev *bridge = NULL;
16132
16133                 while (pci_id->vendor != 0) {
16134                         bridge = pci_get_device(pci_id->vendor,
16135                                                 pci_id->device,
16136                                                 bridge);
16137                         if (!bridge) {
16138                                 pci_id++;
16139                                 continue;
16140                         }
16141                         if (bridge->subordinate &&
16142                             (bridge->subordinate->number <=
16143                              tp->pdev->bus->number) &&
16144                             (bridge->subordinate->busn_res.end >=
16145                              tp->pdev->bus->number)) {
16146                                 tg3_flag_set(tp, 5701_DMA_BUG);
16147                                 pci_dev_put(bridge);
16148                                 break;
16149                         }
16150                 }
16151         }
16152
16153         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16154          * DMA addresses > 40-bit. This bridge may have other additional
16155          * 57xx devices behind it in some 4-port NIC designs for example.
16156          * Any tg3 device found behind the bridge will also need the 40-bit
16157          * DMA workaround.
16158          */
16159         if (tg3_flag(tp, 5780_CLASS)) {
16160                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16161                 tp->msi_cap = tp->pdev->msi_cap;
16162         } else {
16163                 struct pci_dev *bridge = NULL;
16164
16165                 do {
16166                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16167                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
16168                                                 bridge);
16169                         if (bridge && bridge->subordinate &&
16170                             (bridge->subordinate->number <=
16171                              tp->pdev->bus->number) &&
16172                             (bridge->subordinate->busn_res.end >=
16173                              tp->pdev->bus->number)) {
16174                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16175                                 pci_dev_put(bridge);
16176                                 break;
16177                         }
16178                 } while (bridge);
16179         }
16180
16181         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16182             tg3_asic_rev(tp) == ASIC_REV_5714)
16183                 tp->pdev_peer = tg3_find_peer(tp);
16184
16185         /* Determine TSO capabilities */
16186         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16187                 ; /* Do nothing. HW bug. */
16188         else if (tg3_flag(tp, 57765_PLUS))
16189                 tg3_flag_set(tp, HW_TSO_3);
16190         else if (tg3_flag(tp, 5755_PLUS) ||
16191                  tg3_asic_rev(tp) == ASIC_REV_5906)
16192                 tg3_flag_set(tp, HW_TSO_2);
16193         else if (tg3_flag(tp, 5750_PLUS)) {
16194                 tg3_flag_set(tp, HW_TSO_1);
16195                 tg3_flag_set(tp, TSO_BUG);
16196                 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16197                     tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16198                         tg3_flag_clear(tp, TSO_BUG);
16199         } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16200                    tg3_asic_rev(tp) != ASIC_REV_5701 &&
16201                    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16202                 tg3_flag_set(tp, FW_TSO);
16203                 tg3_flag_set(tp, TSO_BUG);
16204                 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16205                         tp->fw_needed = FIRMWARE_TG3TSO5;
16206                 else
16207                         tp->fw_needed = FIRMWARE_TG3TSO;
16208         }
16209
16210         /* Selectively allow TSO based on operating conditions */
16211         if (tg3_flag(tp, HW_TSO_1) ||
16212             tg3_flag(tp, HW_TSO_2) ||
16213             tg3_flag(tp, HW_TSO_3) ||
16214             tg3_flag(tp, FW_TSO)) {
16215                 /* For firmware TSO, assume ASF is disabled.
16216                  * We'll disable TSO later if we discover ASF
16217                  * is enabled in tg3_get_eeprom_hw_cfg().
16218                  */
16219                 tg3_flag_set(tp, TSO_CAPABLE);
16220         } else {
16221                 tg3_flag_clear(tp, TSO_CAPABLE);
16222                 tg3_flag_clear(tp, TSO_BUG);
16223                 tp->fw_needed = NULL;
16224         }
16225
16226         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16227                 tp->fw_needed = FIRMWARE_TG3;
16228
16229         if (tg3_asic_rev(tp) == ASIC_REV_57766)
16230                 tp->fw_needed = FIRMWARE_TG357766;
16231
16232         tp->irq_max = 1;
16233
16234         if (tg3_flag(tp, 5750_PLUS)) {
16235                 tg3_flag_set(tp, SUPPORT_MSI);
16236                 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16237                     tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16238                     (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16239                      tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16240                      tp->pdev_peer == tp->pdev))
16241                         tg3_flag_clear(tp, SUPPORT_MSI);
16242
16243                 if (tg3_flag(tp, 5755_PLUS) ||
16244                     tg3_asic_rev(tp) == ASIC_REV_5906) {
16245                         tg3_flag_set(tp, 1SHOT_MSI);
16246                 }
16247
16248                 if (tg3_flag(tp, 57765_PLUS)) {
16249                         tg3_flag_set(tp, SUPPORT_MSIX);
16250                         tp->irq_max = TG3_IRQ_MAX_VECS;
16251                 }
16252         }
16253
16254         tp->txq_max = 1;
16255         tp->rxq_max = 1;
16256         if (tp->irq_max > 1) {
16257                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16258                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16259
16260                 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16261                     tg3_asic_rev(tp) == ASIC_REV_5720)
16262                         tp->txq_max = tp->irq_max - 1;
16263         }
16264
16265         if (tg3_flag(tp, 5755_PLUS) ||
16266             tg3_asic_rev(tp) == ASIC_REV_5906)
16267                 tg3_flag_set(tp, SHORT_DMA_BUG);
16268
16269         if (tg3_asic_rev(tp) == ASIC_REV_5719)
16270                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16271
16272         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16273             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16274             tg3_asic_rev(tp) == ASIC_REV_5720 ||
16275             tg3_asic_rev(tp) == ASIC_REV_5762)
16276                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16277
16278         if (tg3_flag(tp, 57765_PLUS) &&
16279             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16280                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16281
16282         if (!tg3_flag(tp, 5705_PLUS) ||
16283             tg3_flag(tp, 5780_CLASS) ||
16284             tg3_flag(tp, USE_JUMBO_BDFLAG))
16285                 tg3_flag_set(tp, JUMBO_CAPABLE);
16286
16287         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16288                               &pci_state_reg);
16289
16290         if (pci_is_pcie(tp->pdev)) {
16291                 u16 lnkctl;
16292
16293                 tg3_flag_set(tp, PCI_EXPRESS);
16294
16295                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16296                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16297                         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16298                                 tg3_flag_clear(tp, HW_TSO_2);
16299                                 tg3_flag_clear(tp, TSO_CAPABLE);
16300                         }
16301                         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16302                             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16303                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16304                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16305                                 tg3_flag_set(tp, CLKREQ_BUG);
16306                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16307                         tg3_flag_set(tp, L1PLLPD_EN);
16308                 }
16309         } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16310                 /* BCM5785 devices are effectively PCIe devices, and should
16311                  * follow PCIe codepaths, but do not have a PCIe capabilities
16312                  * section.
16313                  */
16314                 tg3_flag_set(tp, PCI_EXPRESS);
16315         } else if (!tg3_flag(tp, 5705_PLUS) ||
16316                    tg3_flag(tp, 5780_CLASS)) {
16317                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16318                 if (!tp->pcix_cap) {
16319                         dev_err(&tp->pdev->dev,
16320                                 "Cannot find PCI-X capability, aborting\n");
16321                         return -EIO;
16322                 }
16323
16324                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16325                         tg3_flag_set(tp, PCIX_MODE);
16326         }
16327
16328         /* If we have an AMD 762 or VIA K8T800 chipset, write
16329          * reordering to the mailbox registers done by the host
16330          * controller can cause major troubles.  We read back from
16331          * every mailbox register write to force the writes to be
16332          * posted to the chip in order.
16333          */
16334         if (pci_dev_present(tg3_write_reorder_chipsets) &&
16335             !tg3_flag(tp, PCI_EXPRESS))
16336                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16337
16338         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16339                              &tp->pci_cacheline_sz);
16340         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16341                              &tp->pci_lat_timer);
16342         if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16343             tp->pci_lat_timer < 64) {
16344                 tp->pci_lat_timer = 64;
16345                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16346                                       tp->pci_lat_timer);
16347         }
16348
16349         /* Important! -- It is critical that the PCI-X hw workaround
16350          * situation is decided before the first MMIO register access.
16351          */
16352         if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16353                 /* 5700 BX chips need to have their TX producer index
16354                  * mailboxes written twice to workaround a bug.
16355                  */
16356                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16357
16358                 /* If we are in PCI-X mode, enable register write workaround.
16359                  *
16360                  * The workaround is to use indirect register accesses
16361                  * for all chip writes not to mailbox registers.
16362                  */
16363                 if (tg3_flag(tp, PCIX_MODE)) {
16364                         u32 pm_reg;
16365
16366                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16367
16368                         /* The chip can have it's power management PCI config
16369                          * space registers clobbered due to this bug.
16370                          * So explicitly force the chip into D0 here.
16371                          */
16372                         pci_read_config_dword(tp->pdev,
16373                                               tp->pdev->pm_cap + PCI_PM_CTRL,
16374                                               &pm_reg);
16375                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16376                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16377                         pci_write_config_dword(tp->pdev,
16378                                                tp->pdev->pm_cap + PCI_PM_CTRL,
16379                                                pm_reg);
16380
16381                         /* Also, force SERR#/PERR# in PCI command. */
16382                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16383                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16384                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16385                 }
16386         }
16387
16388         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16389                 tg3_flag_set(tp, PCI_HIGH_SPEED);
16390         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16391                 tg3_flag_set(tp, PCI_32BIT);
16392
16393         /* Chip-specific fixup from Broadcom driver */
16394         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16395             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16396                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16397                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16398         }
16399
16400         /* Default fast path register access methods */
16401         tp->read32 = tg3_read32;
16402         tp->write32 = tg3_write32;
16403         tp->read32_mbox = tg3_read32;
16404         tp->write32_mbox = tg3_write32;
16405         tp->write32_tx_mbox = tg3_write32;
16406         tp->write32_rx_mbox = tg3_write32;
16407
16408         /* Various workaround register access methods */
16409         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16410                 tp->write32 = tg3_write_indirect_reg32;
16411         else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16412                  (tg3_flag(tp, PCI_EXPRESS) &&
16413                   tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16414                 /*
16415                  * Back to back register writes can cause problems on these
16416                  * chips, the workaround is to read back all reg writes
16417                  * except those to mailbox regs.
16418                  *
16419                  * See tg3_write_indirect_reg32().
16420                  */
16421                 tp->write32 = tg3_write_flush_reg32;
16422         }
16423
16424         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16425                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16426                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16427                         tp->write32_rx_mbox = tg3_write_flush_reg32;
16428         }
16429
16430         if (tg3_flag(tp, ICH_WORKAROUND)) {
16431                 tp->read32 = tg3_read_indirect_reg32;
16432                 tp->write32 = tg3_write_indirect_reg32;
16433                 tp->read32_mbox = tg3_read_indirect_mbox;
16434                 tp->write32_mbox = tg3_write_indirect_mbox;
16435                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16436                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16437
16438                 iounmap(tp->regs);
16439                 tp->regs = NULL;
16440
16441                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16442                 pci_cmd &= ~PCI_COMMAND_MEMORY;
16443                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16444         }
16445         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16446                 tp->read32_mbox = tg3_read32_mbox_5906;
16447                 tp->write32_mbox = tg3_write32_mbox_5906;
16448                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16449                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16450         }
16451
16452         if (tp->write32 == tg3_write_indirect_reg32 ||
16453             (tg3_flag(tp, PCIX_MODE) &&
16454              (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16455               tg3_asic_rev(tp) == ASIC_REV_5701)))
16456                 tg3_flag_set(tp, SRAM_USE_CONFIG);
16457
16458         /* The memory arbiter has to be enabled in order for SRAM accesses
16459          * to succeed.  Normally on powerup the tg3 chip firmware will make
16460          * sure it is enabled, but other entities such as system netboot
16461          * code might disable it.
16462          */
16463         val = tr32(MEMARB_MODE);
16464         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16465
16466         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16467         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16468             tg3_flag(tp, 5780_CLASS)) {
16469                 if (tg3_flag(tp, PCIX_MODE)) {
16470                         pci_read_config_dword(tp->pdev,
16471                                               tp->pcix_cap + PCI_X_STATUS,
16472                                               &val);
16473                         tp->pci_fn = val & 0x7;
16474                 }
16475         } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16476                    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16477                    tg3_asic_rev(tp) == ASIC_REV_5720) {
16478                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16479                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16480                         val = tr32(TG3_CPMU_STATUS);
16481
16482                 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16483                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16484                 else
16485                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16486                                      TG3_CPMU_STATUS_FSHFT_5719;
16487         }
16488
16489         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16490                 tp->write32_tx_mbox = tg3_write_flush_reg32;
16491                 tp->write32_rx_mbox = tg3_write_flush_reg32;
16492         }
16493
16494         /* Get eeprom hw config before calling tg3_set_power_state().
16495          * In particular, the TG3_FLAG_IS_NIC flag must be
16496          * determined before calling tg3_set_power_state() so that
16497          * we know whether or not to switch out of Vaux power.
16498          * When the flag is set, it means that GPIO1 is used for eeprom
16499          * write protect and also implies that it is a LOM where GPIOs
16500          * are not used to switch power.
16501          */
16502         tg3_get_eeprom_hw_cfg(tp);
16503
16504         if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16505                 tg3_flag_clear(tp, TSO_CAPABLE);
16506                 tg3_flag_clear(tp, TSO_BUG);
16507                 tp->fw_needed = NULL;
16508         }
16509
16510         if (tg3_flag(tp, ENABLE_APE)) {
16511                 /* Allow reads and writes to the
16512                  * APE register and memory space.
16513                  */
16514                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16515                                  PCISTATE_ALLOW_APE_SHMEM_WR |
16516                                  PCISTATE_ALLOW_APE_PSPACE_WR;
16517                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16518                                        pci_state_reg);
16519
16520                 tg3_ape_lock_init(tp);
16521         }
16522
16523         /* Set up tp->grc_local_ctrl before calling
16524          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16525          * will bring 5700's external PHY out of reset.
16526          * It is also used as eeprom write protect on LOMs.
16527          */
16528         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16529         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16530             tg3_flag(tp, EEPROM_WRITE_PROT))
16531                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16532                                        GRC_LCLCTRL_GPIO_OUTPUT1);
16533         /* Unused GPIO3 must be driven as output on 5752 because there
16534          * are no pull-up resistors on unused GPIO pins.
16535          */
16536         else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16537                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16538
16539         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16540             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16541             tg3_flag(tp, 57765_CLASS))
16542                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16543
16544         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16545             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16546                 /* Turn off the debug UART. */
16547                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16548                 if (tg3_flag(tp, IS_NIC))
16549                         /* Keep VMain power. */
16550                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16551                                               GRC_LCLCTRL_GPIO_OUTPUT0;
16552         }
16553
16554         if (tg3_asic_rev(tp) == ASIC_REV_5762)
16555                 tp->grc_local_ctrl |=
16556                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16557
16558         /* Switch out of Vaux if it is a NIC */
16559         tg3_pwrsrc_switch_to_vmain(tp);
16560
16561         /* Derive initial jumbo mode from MTU assigned in
16562          * ether_setup() via the alloc_etherdev() call
16563          */
16564         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16565                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16566
16567         /* Determine WakeOnLan speed to use. */
16568         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16569             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16570             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16571             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16572                 tg3_flag_clear(tp, WOL_SPEED_100MB);
16573         } else {
16574                 tg3_flag_set(tp, WOL_SPEED_100MB);
16575         }
16576
16577         if (tg3_asic_rev(tp) == ASIC_REV_5906)
16578                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16579
16580         /* A few boards don't want Ethernet@WireSpeed phy feature */
16581         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16582             (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16583              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16584              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16585             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16586             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16587                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16588
16589         if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16590             tg3_chip_rev(tp) == CHIPREV_5704_AX)
16591                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16592         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16593                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16594
16595         if (tg3_flag(tp, 5705_PLUS) &&
16596             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16597             tg3_asic_rev(tp) != ASIC_REV_5785 &&
16598             tg3_asic_rev(tp) != ASIC_REV_57780 &&
16599             !tg3_flag(tp, 57765_PLUS)) {
16600                 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16601                     tg3_asic_rev(tp) == ASIC_REV_5787 ||
16602                     tg3_asic_rev(tp) == ASIC_REV_5784 ||
16603                     tg3_asic_rev(tp) == ASIC_REV_5761) {
16604                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16605                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16606                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16607                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16608                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16609                 } else
16610                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16611         }
16612
16613         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16614             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16615                 tp->phy_otp = tg3_read_otp_phycfg(tp);
16616                 if (tp->phy_otp == 0)
16617                         tp->phy_otp = TG3_OTP_DEFAULT;
16618         }
16619
16620         if (tg3_flag(tp, CPMU_PRESENT))
16621                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16622         else
16623                 tp->mi_mode = MAC_MI_MODE_BASE;
16624
16625         tp->coalesce_mode = 0;
16626         if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16627             tg3_chip_rev(tp) != CHIPREV_5700_BX)
16628                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16629
16630         /* Set these bits to enable statistics workaround. */
16631         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16632             tg3_asic_rev(tp) == ASIC_REV_5762 ||
16633             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16634             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16635                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16636                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16637         }
16638
16639         if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16640             tg3_asic_rev(tp) == ASIC_REV_57780)
16641                 tg3_flag_set(tp, USE_PHYLIB);
16642
16643         err = tg3_mdio_init(tp);
16644         if (err)
16645                 return err;
16646
16647         /* Initialize data/descriptor byte/word swapping. */
16648         val = tr32(GRC_MODE);
16649         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16650             tg3_asic_rev(tp) == ASIC_REV_5762)
16651                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16652                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
16653                         GRC_MODE_B2HRX_ENABLE |
16654                         GRC_MODE_HTX2B_ENABLE |
16655                         GRC_MODE_HOST_STACKUP);
16656         else
16657                 val &= GRC_MODE_HOST_STACKUP;
16658
16659         tw32(GRC_MODE, val | tp->grc_mode);
16660
16661         tg3_switch_clocks(tp);
16662
16663         /* Clear this out for sanity. */
16664         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16665
16666         /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16667         tw32(TG3PCI_REG_BASE_ADDR, 0);
16668
16669         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16670                               &pci_state_reg);
16671         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16672             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16673                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16674                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16675                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16676                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16677                         void __iomem *sram_base;
16678
16679                         /* Write some dummy words into the SRAM status block
16680                          * area, see if it reads back correctly.  If the return
16681                          * value is bad, force enable the PCIX workaround.
16682                          */
16683                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16684
16685                         writel(0x00000000, sram_base);
16686                         writel(0x00000000, sram_base + 4);
16687                         writel(0xffffffff, sram_base + 4);
16688                         if (readl(sram_base) != 0x00000000)
16689                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16690                 }
16691         }
16692
16693         udelay(50);
16694         tg3_nvram_init(tp);
16695
16696         /* If the device has an NVRAM, no need to load patch firmware */
16697         if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16698             !tg3_flag(tp, NO_NVRAM))
16699                 tp->fw_needed = NULL;
16700
16701         grc_misc_cfg = tr32(GRC_MISC_CFG);
16702         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16703
16704         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16705             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16706              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16707                 tg3_flag_set(tp, IS_5788);
16708
16709         if (!tg3_flag(tp, IS_5788) &&
16710             tg3_asic_rev(tp) != ASIC_REV_5700)
16711                 tg3_flag_set(tp, TAGGED_STATUS);
16712         if (tg3_flag(tp, TAGGED_STATUS)) {
16713                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16714                                       HOSTCC_MODE_CLRTICK_TXBD);
16715
16716                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16717                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16718                                        tp->misc_host_ctrl);
16719         }
16720
16721         /* Preserve the APE MAC_MODE bits */
16722         if (tg3_flag(tp, ENABLE_APE))
16723                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16724         else
16725                 tp->mac_mode = 0;
16726
16727         if (tg3_10_100_only_device(tp, ent))
16728                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16729
16730         err = tg3_phy_probe(tp);
16731         if (err) {
16732                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16733                 /* ... but do not return immediately ... */
16734                 tg3_mdio_fini(tp);
16735         }
16736
16737         tg3_read_vpd(tp);
16738         tg3_read_fw_ver(tp);
16739
16740         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16741                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16742         } else {
16743                 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16744                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16745                 else
16746                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16747         }
16748
16749         /* 5700 {AX,BX} chips have a broken status block link
16750          * change bit implementation, so we must use the
16751          * status register in those cases.
16752          */
16753         if (tg3_asic_rev(tp) == ASIC_REV_5700)
16754                 tg3_flag_set(tp, USE_LINKCHG_REG);
16755         else
16756                 tg3_flag_clear(tp, USE_LINKCHG_REG);
16757
16758         /* The led_ctrl is set during tg3_phy_probe, here we might
16759          * have to force the link status polling mechanism based
16760          * upon subsystem IDs.
16761          */
16762         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16763             tg3_asic_rev(tp) == ASIC_REV_5701 &&
16764             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16765                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16766                 tg3_flag_set(tp, USE_LINKCHG_REG);
16767         }
16768
16769         /* For all SERDES we poll the MAC status register. */
16770         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16771                 tg3_flag_set(tp, POLL_SERDES);
16772         else
16773                 tg3_flag_clear(tp, POLL_SERDES);
16774
16775         if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16776                 tg3_flag_set(tp, POLL_CPMU_LINK);
16777
16778         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16779         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16780         if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16781             tg3_flag(tp, PCIX_MODE)) {
16782                 tp->rx_offset = NET_SKB_PAD;
16783 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16784                 tp->rx_copy_thresh = ~(u16)0;
16785 #endif
16786         }
16787
16788         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16789         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16790         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16791
16792         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16793
16794         /* Increment the rx prod index on the rx std ring by at most
16795          * 8 for these chips to workaround hw errata.
16796          */
16797         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16798             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16799             tg3_asic_rev(tp) == ASIC_REV_5755)
16800                 tp->rx_std_max_post = 8;
16801
16802         if (tg3_flag(tp, ASPM_WORKAROUND))
16803                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16804                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
16805
16806         return err;
16807 }
16808
16809 #ifdef CONFIG_SPARC
16810 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16811 {
16812         struct net_device *dev = tp->dev;
16813         struct pci_dev *pdev = tp->pdev;
16814         struct device_node *dp = pci_device_to_OF_node(pdev);
16815         const unsigned char *addr;
16816         int len;
16817
16818         addr = of_get_property(dp, "local-mac-address", &len);
16819         if (addr && len == ETH_ALEN) {
16820                 memcpy(dev->dev_addr, addr, ETH_ALEN);
16821                 return 0;
16822         }
16823         return -ENODEV;
16824 }
16825
16826 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16827 {
16828         struct net_device *dev = tp->dev;
16829
16830         memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
16831         return 0;
16832 }
16833 #endif
16834
16835 static int tg3_get_device_address(struct tg3 *tp)
16836 {
16837         struct net_device *dev = tp->dev;
16838         u32 hi, lo, mac_offset;
16839         int addr_ok = 0;
16840         int err;
16841
16842 #ifdef CONFIG_SPARC
16843         if (!tg3_get_macaddr_sparc(tp))
16844                 return 0;
16845 #endif
16846
16847         if (tg3_flag(tp, IS_SSB_CORE)) {
16848                 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16849                 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16850                         return 0;
16851         }
16852
16853         mac_offset = 0x7c;
16854         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16855             tg3_flag(tp, 5780_CLASS)) {
16856                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16857                         mac_offset = 0xcc;
16858                 if (tg3_nvram_lock(tp))
16859                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16860                 else
16861                         tg3_nvram_unlock(tp);
16862         } else if (tg3_flag(tp, 5717_PLUS)) {
16863                 if (tp->pci_fn & 1)
16864                         mac_offset = 0xcc;
16865                 if (tp->pci_fn > 1)
16866                         mac_offset += 0x18c;
16867         } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16868                 mac_offset = 0x10;
16869
16870         /* First try to get it from MAC address mailbox. */
16871         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16872         if ((hi >> 16) == 0x484b) {
16873                 dev->dev_addr[0] = (hi >>  8) & 0xff;
16874                 dev->dev_addr[1] = (hi >>  0) & 0xff;
16875
16876                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16877                 dev->dev_addr[2] = (lo >> 24) & 0xff;
16878                 dev->dev_addr[3] = (lo >> 16) & 0xff;
16879                 dev->dev_addr[4] = (lo >>  8) & 0xff;
16880                 dev->dev_addr[5] = (lo >>  0) & 0xff;
16881
16882                 /* Some old bootcode may report a 0 MAC address in SRAM */
16883                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16884         }
16885         if (!addr_ok) {
16886                 /* Next, try NVRAM. */
16887                 if (!tg3_flag(tp, NO_NVRAM) &&
16888                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16889                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16890                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16891                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16892                 }
16893                 /* Finally just fetch it out of the MAC control regs. */
16894                 else {
16895                         hi = tr32(MAC_ADDR_0_HIGH);
16896                         lo = tr32(MAC_ADDR_0_LOW);
16897
16898                         dev->dev_addr[5] = lo & 0xff;
16899                         dev->dev_addr[4] = (lo >> 8) & 0xff;
16900                         dev->dev_addr[3] = (lo >> 16) & 0xff;
16901                         dev->dev_addr[2] = (lo >> 24) & 0xff;
16902                         dev->dev_addr[1] = hi & 0xff;
16903                         dev->dev_addr[0] = (hi >> 8) & 0xff;
16904                 }
16905         }
16906
16907         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16908 #ifdef CONFIG_SPARC
16909                 if (!tg3_get_default_macaddr_sparc(tp))
16910                         return 0;
16911 #endif
16912                 return -EINVAL;
16913         }
16914         return 0;
16915 }
16916
16917 #define BOUNDARY_SINGLE_CACHELINE       1
16918 #define BOUNDARY_MULTI_CACHELINE        2
16919
16920 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16921 {
16922         int cacheline_size;
16923         u8 byte;
16924         int goal;
16925
16926         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16927         if (byte == 0)
16928                 cacheline_size = 1024;
16929         else
16930                 cacheline_size = (int) byte * 4;
16931
16932         /* On 5703 and later chips, the boundary bits have no
16933          * effect.
16934          */
16935         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16936             tg3_asic_rev(tp) != ASIC_REV_5701 &&
16937             !tg3_flag(tp, PCI_EXPRESS))
16938                 goto out;
16939
16940 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16941         goal = BOUNDARY_MULTI_CACHELINE;
16942 #else
16943 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16944         goal = BOUNDARY_SINGLE_CACHELINE;
16945 #else
16946         goal = 0;
16947 #endif
16948 #endif
16949
16950         if (tg3_flag(tp, 57765_PLUS)) {
16951                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16952                 goto out;
16953         }
16954
16955         if (!goal)
16956                 goto out;
16957
16958         /* PCI controllers on most RISC systems tend to disconnect
16959          * when a device tries to burst across a cache-line boundary.
16960          * Therefore, letting tg3 do so just wastes PCI bandwidth.
16961          *
16962          * Unfortunately, for PCI-E there are only limited
16963          * write-side controls for this, and thus for reads
16964          * we will still get the disconnects.  We'll also waste
16965          * these PCI cycles for both read and write for chips
16966          * other than 5700 and 5701 which do not implement the
16967          * boundary bits.
16968          */
16969         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16970                 switch (cacheline_size) {
16971                 case 16:
16972                 case 32:
16973                 case 64:
16974                 case 128:
16975                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16976                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16977                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16978                         } else {
16979                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16980                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16981                         }
16982                         break;
16983
16984                 case 256:
16985                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16986                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16987                         break;
16988
16989                 default:
16990                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16991                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16992                         break;
16993                 }
16994         } else if (tg3_flag(tp, PCI_EXPRESS)) {
16995                 switch (cacheline_size) {
16996                 case 16:
16997                 case 32:
16998                 case 64:
16999                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17000                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17001                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17002                                 break;
17003                         }
17004                         /* fallthrough */
17005                 case 128:
17006                 default:
17007                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17008                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17009                         break;
17010                 }
17011         } else {
17012                 switch (cacheline_size) {
17013                 case 16:
17014                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17015                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17016                                         DMA_RWCTRL_WRITE_BNDRY_16);
17017                                 break;
17018                         }
17019                         /* fallthrough */
17020                 case 32:
17021                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17022                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17023                                         DMA_RWCTRL_WRITE_BNDRY_32);
17024                                 break;
17025                         }
17026                         /* fallthrough */
17027                 case 64:
17028                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17029                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17030                                         DMA_RWCTRL_WRITE_BNDRY_64);
17031                                 break;
17032                         }
17033                         /* fallthrough */
17034                 case 128:
17035                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17036                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17037                                         DMA_RWCTRL_WRITE_BNDRY_128);
17038                                 break;
17039                         }
17040                         /* fallthrough */
17041                 case 256:
17042                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
17043                                 DMA_RWCTRL_WRITE_BNDRY_256);
17044                         break;
17045                 case 512:
17046                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
17047                                 DMA_RWCTRL_WRITE_BNDRY_512);
17048                         break;
17049                 case 1024:
17050                 default:
17051                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17052                                 DMA_RWCTRL_WRITE_BNDRY_1024);
17053                         break;
17054                 }
17055         }
17056
17057 out:
17058         return val;
17059 }
17060
17061 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17062                            int size, bool to_device)
17063 {
17064         struct tg3_internal_buffer_desc test_desc;
17065         u32 sram_dma_descs;
17066         int i, ret;
17067
17068         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17069
17070         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17071         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17072         tw32(RDMAC_STATUS, 0);
17073         tw32(WDMAC_STATUS, 0);
17074
17075         tw32(BUFMGR_MODE, 0);
17076         tw32(FTQ_RESET, 0);
17077
17078         test_desc.addr_hi = ((u64) buf_dma) >> 32;
17079         test_desc.addr_lo = buf_dma & 0xffffffff;
17080         test_desc.nic_mbuf = 0x00002100;
17081         test_desc.len = size;
17082
17083         /*
17084          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17085          * the *second* time the tg3 driver was getting loaded after an
17086          * initial scan.
17087          *
17088          * Broadcom tells me:
17089          *   ...the DMA engine is connected to the GRC block and a DMA
17090          *   reset may affect the GRC block in some unpredictable way...
17091          *   The behavior of resets to individual blocks has not been tested.
17092          *
17093          * Broadcom noted the GRC reset will also reset all sub-components.
17094          */
17095         if (to_device) {
17096                 test_desc.cqid_sqid = (13 << 8) | 2;
17097
17098                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17099                 udelay(40);
17100         } else {
17101                 test_desc.cqid_sqid = (16 << 8) | 7;
17102
17103                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17104                 udelay(40);
17105         }
17106         test_desc.flags = 0x00000005;
17107
17108         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17109                 u32 val;
17110
17111                 val = *(((u32 *)&test_desc) + i);
17112                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17113                                        sram_dma_descs + (i * sizeof(u32)));
17114                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17115         }
17116         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17117
17118         if (to_device)
17119                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17120         else
17121                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17122
17123         ret = -ENODEV;
17124         for (i = 0; i < 40; i++) {
17125                 u32 val;
17126
17127                 if (to_device)
17128                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17129                 else
17130                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17131                 if ((val & 0xffff) == sram_dma_descs) {
17132                         ret = 0;
17133                         break;
17134                 }
17135
17136                 udelay(100);
17137         }
17138
17139         return ret;
17140 }
17141
17142 #define TEST_BUFFER_SIZE        0x2000
17143
17144 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
17145         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17146         { },
17147 };
17148
17149 static int tg3_test_dma(struct tg3 *tp)
17150 {
17151         dma_addr_t buf_dma;
17152         u32 *buf, saved_dma_rwctrl;
17153         int ret = 0;
17154
17155         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17156                                  &buf_dma, GFP_KERNEL);
17157         if (!buf) {
17158                 ret = -ENOMEM;
17159                 goto out_nofree;
17160         }
17161
17162         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17163                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17164
17165         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17166
17167         if (tg3_flag(tp, 57765_PLUS))
17168                 goto out;
17169
17170         if (tg3_flag(tp, PCI_EXPRESS)) {
17171                 /* DMA read watermark not used on PCIE */
17172                 tp->dma_rwctrl |= 0x00180000;
17173         } else if (!tg3_flag(tp, PCIX_MODE)) {
17174                 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17175                     tg3_asic_rev(tp) == ASIC_REV_5750)
17176                         tp->dma_rwctrl |= 0x003f0000;
17177                 else
17178                         tp->dma_rwctrl |= 0x003f000f;
17179         } else {
17180                 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17181                     tg3_asic_rev(tp) == ASIC_REV_5704) {
17182                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17183                         u32 read_water = 0x7;
17184
17185                         /* If the 5704 is behind the EPB bridge, we can
17186                          * do the less restrictive ONE_DMA workaround for
17187                          * better performance.
17188                          */
17189                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17190                             tg3_asic_rev(tp) == ASIC_REV_5704)
17191                                 tp->dma_rwctrl |= 0x8000;
17192                         else if (ccval == 0x6 || ccval == 0x7)
17193                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17194
17195                         if (tg3_asic_rev(tp) == ASIC_REV_5703)
17196                                 read_water = 4;
17197                         /* Set bit 23 to enable PCIX hw bug fix */
17198                         tp->dma_rwctrl |=
17199                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17200                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17201                                 (1 << 23);
17202                 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17203                         /* 5780 always in PCIX mode */
17204                         tp->dma_rwctrl |= 0x00144000;
17205                 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17206                         /* 5714 always in PCIX mode */
17207                         tp->dma_rwctrl |= 0x00148000;
17208                 } else {
17209                         tp->dma_rwctrl |= 0x001b000f;
17210                 }
17211         }
17212         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17213                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17214
17215         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17216             tg3_asic_rev(tp) == ASIC_REV_5704)
17217                 tp->dma_rwctrl &= 0xfffffff0;
17218
17219         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17220             tg3_asic_rev(tp) == ASIC_REV_5701) {
17221                 /* Remove this if it causes problems for some boards. */
17222                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17223
17224                 /* On 5700/5701 chips, we need to set this bit.
17225                  * Otherwise the chip will issue cacheline transactions
17226                  * to streamable DMA memory with not all the byte
17227                  * enables turned on.  This is an error on several
17228                  * RISC PCI controllers, in particular sparc64.
17229                  *
17230                  * On 5703/5704 chips, this bit has been reassigned
17231                  * a different meaning.  In particular, it is used
17232                  * on those chips to enable a PCI-X workaround.
17233                  */
17234                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17235         }
17236
17237         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17238
17239
17240         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17241             tg3_asic_rev(tp) != ASIC_REV_5701)
17242                 goto out;
17243
17244         /* It is best to perform DMA test with maximum write burst size
17245          * to expose the 5700/5701 write DMA bug.
17246          */
17247         saved_dma_rwctrl = tp->dma_rwctrl;
17248         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17249         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17250
17251         while (1) {
17252                 u32 *p = buf, i;
17253
17254                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17255                         p[i] = i;
17256
17257                 /* Send the buffer to the chip. */
17258                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17259                 if (ret) {
17260                         dev_err(&tp->pdev->dev,
17261                                 "%s: Buffer write failed. err = %d\n",
17262                                 __func__, ret);
17263                         break;
17264                 }
17265
17266                 /* Now read it back. */
17267                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17268                 if (ret) {
17269                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17270                                 "err = %d\n", __func__, ret);
17271                         break;
17272                 }
17273
17274                 /* Verify it. */
17275                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17276                         if (p[i] == i)
17277                                 continue;
17278
17279                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17280                             DMA_RWCTRL_WRITE_BNDRY_16) {
17281                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17282                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17283                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17284                                 break;
17285                         } else {
17286                                 dev_err(&tp->pdev->dev,
17287                                         "%s: Buffer corrupted on read back! "
17288                                         "(%d != %d)\n", __func__, p[i], i);
17289                                 ret = -ENODEV;
17290                                 goto out;
17291                         }
17292                 }
17293
17294                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17295                         /* Success. */
17296                         ret = 0;
17297                         break;
17298                 }
17299         }
17300         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17301             DMA_RWCTRL_WRITE_BNDRY_16) {
17302                 /* DMA test passed without adjusting DMA boundary,
17303                  * now look for chipsets that are known to expose the
17304                  * DMA bug without failing the test.
17305                  */
17306                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17307                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17308                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17309                 } else {
17310                         /* Safe to use the calculated DMA boundary. */
17311                         tp->dma_rwctrl = saved_dma_rwctrl;
17312                 }
17313
17314                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17315         }
17316
17317 out:
17318         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17319 out_nofree:
17320         return ret;
17321 }
17322
17323 static void tg3_init_bufmgr_config(struct tg3 *tp)
17324 {
17325         if (tg3_flag(tp, 57765_PLUS)) {
17326                 tp->bufmgr_config.mbuf_read_dma_low_water =
17327                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17328                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17329                         DEFAULT_MB_MACRX_LOW_WATER_57765;
17330                 tp->bufmgr_config.mbuf_high_water =
17331                         DEFAULT_MB_HIGH_WATER_57765;
17332
17333                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17334                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17335                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17336                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17337                 tp->bufmgr_config.mbuf_high_water_jumbo =
17338                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17339         } else if (tg3_flag(tp, 5705_PLUS)) {
17340                 tp->bufmgr_config.mbuf_read_dma_low_water =
17341                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17342                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17343                         DEFAULT_MB_MACRX_LOW_WATER_5705;
17344                 tp->bufmgr_config.mbuf_high_water =
17345                         DEFAULT_MB_HIGH_WATER_5705;
17346                 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17347                         tp->bufmgr_config.mbuf_mac_rx_low_water =
17348                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
17349                         tp->bufmgr_config.mbuf_high_water =
17350                                 DEFAULT_MB_HIGH_WATER_5906;
17351                 }
17352
17353                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17354                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17355                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17356                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17357                 tp->bufmgr_config.mbuf_high_water_jumbo =
17358                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17359         } else {
17360                 tp->bufmgr_config.mbuf_read_dma_low_water =
17361                         DEFAULT_MB_RDMA_LOW_WATER;
17362                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17363                         DEFAULT_MB_MACRX_LOW_WATER;
17364                 tp->bufmgr_config.mbuf_high_water =
17365                         DEFAULT_MB_HIGH_WATER;
17366
17367                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17368                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17369                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17370                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17371                 tp->bufmgr_config.mbuf_high_water_jumbo =
17372                         DEFAULT_MB_HIGH_WATER_JUMBO;
17373         }
17374
17375         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17376         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17377 }
17378
17379 static char *tg3_phy_string(struct tg3 *tp)
17380 {
17381         switch (tp->phy_id & TG3_PHY_ID_MASK) {
17382         case TG3_PHY_ID_BCM5400:        return "5400";
17383         case TG3_PHY_ID_BCM5401:        return "5401";
17384         case TG3_PHY_ID_BCM5411:        return "5411";
17385         case TG3_PHY_ID_BCM5701:        return "5701";
17386         case TG3_PHY_ID_BCM5703:        return "5703";
17387         case TG3_PHY_ID_BCM5704:        return "5704";
17388         case TG3_PHY_ID_BCM5705:        return "5705";
17389         case TG3_PHY_ID_BCM5750:        return "5750";
17390         case TG3_PHY_ID_BCM5752:        return "5752";
17391         case TG3_PHY_ID_BCM5714:        return "5714";
17392         case TG3_PHY_ID_BCM5780:        return "5780";
17393         case TG3_PHY_ID_BCM5755:        return "5755";
17394         case TG3_PHY_ID_BCM5787:        return "5787";
17395         case TG3_PHY_ID_BCM5784:        return "5784";
17396         case TG3_PHY_ID_BCM5756:        return "5722/5756";
17397         case TG3_PHY_ID_BCM5906:        return "5906";
17398         case TG3_PHY_ID_BCM5761:        return "5761";
17399         case TG3_PHY_ID_BCM5718C:       return "5718C";
17400         case TG3_PHY_ID_BCM5718S:       return "5718S";
17401         case TG3_PHY_ID_BCM57765:       return "57765";
17402         case TG3_PHY_ID_BCM5719C:       return "5719C";
17403         case TG3_PHY_ID_BCM5720C:       return "5720C";
17404         case TG3_PHY_ID_BCM5762:        return "5762C";
17405         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
17406         case 0:                 return "serdes";
17407         default:                return "unknown";
17408         }
17409 }
17410
17411 static char *tg3_bus_string(struct tg3 *tp, char *str)
17412 {
17413         if (tg3_flag(tp, PCI_EXPRESS)) {
17414                 strcpy(str, "PCI Express");
17415                 return str;
17416         } else if (tg3_flag(tp, PCIX_MODE)) {
17417                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17418
17419                 strcpy(str, "PCIX:");
17420
17421                 if ((clock_ctrl == 7) ||
17422                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17423                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17424                         strcat(str, "133MHz");
17425                 else if (clock_ctrl == 0)
17426                         strcat(str, "33MHz");
17427                 else if (clock_ctrl == 2)
17428                         strcat(str, "50MHz");
17429                 else if (clock_ctrl == 4)
17430                         strcat(str, "66MHz");
17431                 else if (clock_ctrl == 6)
17432                         strcat(str, "100MHz");
17433         } else {
17434                 strcpy(str, "PCI:");
17435                 if (tg3_flag(tp, PCI_HIGH_SPEED))
17436                         strcat(str, "66MHz");
17437                 else
17438                         strcat(str, "33MHz");
17439         }
17440         if (tg3_flag(tp, PCI_32BIT))
17441                 strcat(str, ":32-bit");
17442         else
17443                 strcat(str, ":64-bit");
17444         return str;
17445 }
17446
17447 static void tg3_init_coal(struct tg3 *tp)
17448 {
17449         struct ethtool_coalesce *ec = &tp->coal;
17450
17451         memset(ec, 0, sizeof(*ec));
17452         ec->cmd = ETHTOOL_GCOALESCE;
17453         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17454         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17455         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17456         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17457         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17458         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17459         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17460         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17461         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17462
17463         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17464                                  HOSTCC_MODE_CLRTICK_TXBD)) {
17465                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17466                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17467                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17468                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17469         }
17470
17471         if (tg3_flag(tp, 5705_PLUS)) {
17472                 ec->rx_coalesce_usecs_irq = 0;
17473                 ec->tx_coalesce_usecs_irq = 0;
17474                 ec->stats_block_coalesce_usecs = 0;
17475         }
17476 }
17477
17478 static int tg3_init_one(struct pci_dev *pdev,
17479                                   const struct pci_device_id *ent)
17480 {
17481         struct net_device *dev;
17482         struct tg3 *tp;
17483         int i, err;
17484         u32 sndmbx, rcvmbx, intmbx;
17485         char str[40];
17486         u64 dma_mask, persist_dma_mask;
17487         netdev_features_t features = 0;
17488
17489         printk_once(KERN_INFO "%s\n", version);
17490
17491         err = pci_enable_device(pdev);
17492         if (err) {
17493                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17494                 return err;
17495         }
17496
17497         err = pci_request_regions(pdev, DRV_MODULE_NAME);
17498         if (err) {
17499                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17500                 goto err_out_disable_pdev;
17501         }
17502
17503         pci_set_master(pdev);
17504
17505         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17506         if (!dev) {
17507                 err = -ENOMEM;
17508                 goto err_out_free_res;
17509         }
17510
17511         SET_NETDEV_DEV(dev, &pdev->dev);
17512
17513         tp = netdev_priv(dev);
17514         tp->pdev = pdev;
17515         tp->dev = dev;
17516         tp->rx_mode = TG3_DEF_RX_MODE;
17517         tp->tx_mode = TG3_DEF_TX_MODE;
17518         tp->irq_sync = 1;
17519
17520         if (tg3_debug > 0)
17521                 tp->msg_enable = tg3_debug;
17522         else
17523                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17524
17525         if (pdev_is_ssb_gige_core(pdev)) {
17526                 tg3_flag_set(tp, IS_SSB_CORE);
17527                 if (ssb_gige_must_flush_posted_writes(pdev))
17528                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17529                 if (ssb_gige_one_dma_at_once(pdev))
17530                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17531                 if (ssb_gige_have_roboswitch(pdev)) {
17532                         tg3_flag_set(tp, USE_PHYLIB);
17533                         tg3_flag_set(tp, ROBOSWITCH);
17534                 }
17535                 if (ssb_gige_is_rgmii(pdev))
17536                         tg3_flag_set(tp, RGMII_MODE);
17537         }
17538
17539         /* The word/byte swap controls here control register access byte
17540          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17541          * setting below.
17542          */
17543         tp->misc_host_ctrl =
17544                 MISC_HOST_CTRL_MASK_PCI_INT |
17545                 MISC_HOST_CTRL_WORD_SWAP |
17546                 MISC_HOST_CTRL_INDIR_ACCESS |
17547                 MISC_HOST_CTRL_PCISTATE_RW;
17548
17549         /* The NONFRM (non-frame) byte/word swap controls take effect
17550          * on descriptor entries, anything which isn't packet data.
17551          *
17552          * The StrongARM chips on the board (one for tx, one for rx)
17553          * are running in big-endian mode.
17554          */
17555         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17556                         GRC_MODE_WSWAP_NONFRM_DATA);
17557 #ifdef __BIG_ENDIAN
17558         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17559 #endif
17560         spin_lock_init(&tp->lock);
17561         spin_lock_init(&tp->indirect_lock);
17562         INIT_WORK(&tp->reset_task, tg3_reset_task);
17563
17564         tp->regs = pci_ioremap_bar(pdev, BAR_0);
17565         if (!tp->regs) {
17566                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17567                 err = -ENOMEM;
17568                 goto err_out_free_dev;
17569         }
17570
17571         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17572             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17573             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17574             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17575             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17576             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17577             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17578             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17579             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17580             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17581             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17582             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17583             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17584             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17585             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17586                 tg3_flag_set(tp, ENABLE_APE);
17587                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17588                 if (!tp->aperegs) {
17589                         dev_err(&pdev->dev,
17590                                 "Cannot map APE registers, aborting\n");
17591                         err = -ENOMEM;
17592                         goto err_out_iounmap;
17593                 }
17594         }
17595
17596         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17597         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17598
17599         dev->ethtool_ops = &tg3_ethtool_ops;
17600         dev->watchdog_timeo = TG3_TX_TIMEOUT;
17601         dev->netdev_ops = &tg3_netdev_ops;
17602         dev->irq = pdev->irq;
17603
17604         err = tg3_get_invariants(tp, ent);
17605         if (err) {
17606                 dev_err(&pdev->dev,
17607                         "Problem fetching invariants of chip, aborting\n");
17608                 goto err_out_apeunmap;
17609         }
17610
17611         /* The EPB bridge inside 5714, 5715, and 5780 and any
17612          * device behind the EPB cannot support DMA addresses > 40-bit.
17613          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17614          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17615          * do DMA address check in tg3_start_xmit().
17616          */
17617         if (tg3_flag(tp, IS_5788))
17618                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17619         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17620                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17621 #ifdef CONFIG_HIGHMEM
17622                 dma_mask = DMA_BIT_MASK(64);
17623 #endif
17624         } else
17625                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17626
17627         /* Configure DMA attributes. */
17628         if (dma_mask > DMA_BIT_MASK(32)) {
17629                 err = pci_set_dma_mask(pdev, dma_mask);
17630                 if (!err) {
17631                         features |= NETIF_F_HIGHDMA;
17632                         err = pci_set_consistent_dma_mask(pdev,
17633                                                           persist_dma_mask);
17634                         if (err < 0) {
17635                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17636                                         "DMA for consistent allocations\n");
17637                                 goto err_out_apeunmap;
17638                         }
17639                 }
17640         }
17641         if (err || dma_mask == DMA_BIT_MASK(32)) {
17642                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17643                 if (err) {
17644                         dev_err(&pdev->dev,
17645                                 "No usable DMA configuration, aborting\n");
17646                         goto err_out_apeunmap;
17647                 }
17648         }
17649
17650         tg3_init_bufmgr_config(tp);
17651
17652         features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
17653
17654         /* 5700 B0 chips do not support checksumming correctly due
17655          * to hardware bugs.
17656          */
17657         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17658                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17659
17660                 if (tg3_flag(tp, 5755_PLUS))
17661                         features |= NETIF_F_IPV6_CSUM;
17662         }
17663
17664         /* TSO is on by default on chips that support hardware TSO.
17665          * Firmware TSO on older chips gives lower performance, so it
17666          * is off by default, but can be enabled using ethtool.
17667          */
17668         if ((tg3_flag(tp, HW_TSO_1) ||
17669              tg3_flag(tp, HW_TSO_2) ||
17670              tg3_flag(tp, HW_TSO_3)) &&
17671             (features & NETIF_F_IP_CSUM))
17672                 features |= NETIF_F_TSO;
17673         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17674                 if (features & NETIF_F_IPV6_CSUM)
17675                         features |= NETIF_F_TSO6;
17676                 if (tg3_flag(tp, HW_TSO_3) ||
17677                     tg3_asic_rev(tp) == ASIC_REV_5761 ||
17678                     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17679                      tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17680                     tg3_asic_rev(tp) == ASIC_REV_5785 ||
17681                     tg3_asic_rev(tp) == ASIC_REV_57780)
17682                         features |= NETIF_F_TSO_ECN;
17683         }
17684
17685         dev->features |= features;
17686         dev->vlan_features |= features;
17687
17688         /*
17689          * Add loopback capability only for a subset of devices that support
17690          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17691          * loopback for the remaining devices.
17692          */
17693         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17694             !tg3_flag(tp, CPMU_PRESENT))
17695                 /* Add the loopback capability */
17696                 features |= NETIF_F_LOOPBACK;
17697
17698         dev->hw_features |= features;
17699         dev->priv_flags |= IFF_UNICAST_FLT;
17700
17701         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17702             !tg3_flag(tp, TSO_CAPABLE) &&
17703             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17704                 tg3_flag_set(tp, MAX_RXPEND_64);
17705                 tp->rx_pending = 63;
17706         }
17707
17708         err = tg3_get_device_address(tp);
17709         if (err) {
17710                 dev_err(&pdev->dev,
17711                         "Could not obtain valid ethernet address, aborting\n");
17712                 goto err_out_apeunmap;
17713         }
17714
17715         /*
17716          * Reset chip in case UNDI or EFI driver did not shutdown
17717          * DMA self test will enable WDMAC and we'll see (spurious)
17718          * pending DMA on the PCI bus at that point.
17719          */
17720         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17721             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17722                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17723                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17724         }
17725
17726         err = tg3_test_dma(tp);
17727         if (err) {
17728                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17729                 goto err_out_apeunmap;
17730         }
17731
17732         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17733         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17734         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17735         for (i = 0; i < tp->irq_max; i++) {
17736                 struct tg3_napi *tnapi = &tp->napi[i];
17737
17738                 tnapi->tp = tp;
17739                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17740
17741                 tnapi->int_mbox = intmbx;
17742                 if (i <= 4)
17743                         intmbx += 0x8;
17744                 else
17745                         intmbx += 0x4;
17746
17747                 tnapi->consmbox = rcvmbx;
17748                 tnapi->prodmbox = sndmbx;
17749
17750                 if (i)
17751                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17752                 else
17753                         tnapi->coal_now = HOSTCC_MODE_NOW;
17754
17755                 if (!tg3_flag(tp, SUPPORT_MSIX))
17756                         break;
17757
17758                 /*
17759                  * If we support MSIX, we'll be using RSS.  If we're using
17760                  * RSS, the first vector only handles link interrupts and the
17761                  * remaining vectors handle rx and tx interrupts.  Reuse the
17762                  * mailbox values for the next iteration.  The values we setup
17763                  * above are still useful for the single vectored mode.
17764                  */
17765                 if (!i)
17766                         continue;
17767
17768                 rcvmbx += 0x8;
17769
17770                 if (sndmbx & 0x4)
17771                         sndmbx -= 0x4;
17772                 else
17773                         sndmbx += 0xc;
17774         }
17775
17776         tg3_init_coal(tp);
17777
17778         pci_set_drvdata(pdev, dev);
17779
17780         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17781             tg3_asic_rev(tp) == ASIC_REV_5720 ||
17782             tg3_asic_rev(tp) == ASIC_REV_5762)
17783                 tg3_flag_set(tp, PTP_CAPABLE);
17784
17785         tg3_timer_init(tp);
17786
17787         tg3_carrier_off(tp);
17788
17789         err = register_netdev(dev);
17790         if (err) {
17791                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17792                 goto err_out_apeunmap;
17793         }
17794
17795         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17796                     tp->board_part_number,
17797                     tg3_chip_rev_id(tp),
17798                     tg3_bus_string(tp, str),
17799                     dev->dev_addr);
17800
17801         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17802                 struct phy_device *phydev;
17803                 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
17804                 netdev_info(dev,
17805                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17806                             phydev->drv->name, dev_name(&phydev->dev));
17807         } else {
17808                 char *ethtype;
17809
17810                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17811                         ethtype = "10/100Base-TX";
17812                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17813                         ethtype = "1000Base-SX";
17814                 else
17815                         ethtype = "10/100/1000Base-T";
17816
17817                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17818                             "(WireSpeed[%d], EEE[%d])\n",
17819                             tg3_phy_string(tp), ethtype,
17820                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17821                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17822         }
17823
17824         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17825                     (dev->features & NETIF_F_RXCSUM) != 0,
17826                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
17827                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17828                     tg3_flag(tp, ENABLE_ASF) != 0,
17829                     tg3_flag(tp, TSO_CAPABLE) != 0);
17830         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17831                     tp->dma_rwctrl,
17832                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17833                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17834
17835         pci_save_state(pdev);
17836
17837         return 0;
17838
17839 err_out_apeunmap:
17840         if (tp->aperegs) {
17841                 iounmap(tp->aperegs);
17842                 tp->aperegs = NULL;
17843         }
17844
17845 err_out_iounmap:
17846         if (tp->regs) {
17847                 iounmap(tp->regs);
17848                 tp->regs = NULL;
17849         }
17850
17851 err_out_free_dev:
17852         free_netdev(dev);
17853
17854 err_out_free_res:
17855         pci_release_regions(pdev);
17856
17857 err_out_disable_pdev:
17858         if (pci_is_enabled(pdev))
17859                 pci_disable_device(pdev);
17860         return err;
17861 }
17862
17863 static void tg3_remove_one(struct pci_dev *pdev)
17864 {
17865         struct net_device *dev = pci_get_drvdata(pdev);
17866
17867         if (dev) {
17868                 struct tg3 *tp = netdev_priv(dev);
17869
17870                 release_firmware(tp->fw);
17871
17872                 tg3_reset_task_cancel(tp);
17873
17874                 if (tg3_flag(tp, USE_PHYLIB)) {
17875                         tg3_phy_fini(tp);
17876                         tg3_mdio_fini(tp);
17877                 }
17878
17879                 unregister_netdev(dev);
17880                 if (tp->aperegs) {
17881                         iounmap(tp->aperegs);
17882                         tp->aperegs = NULL;
17883                 }
17884                 if (tp->regs) {
17885                         iounmap(tp->regs);
17886                         tp->regs = NULL;
17887                 }
17888                 free_netdev(dev);
17889                 pci_release_regions(pdev);
17890                 pci_disable_device(pdev);
17891         }
17892 }
17893
17894 #ifdef CONFIG_PM_SLEEP
17895 static int tg3_suspend(struct device *device)
17896 {
17897         struct pci_dev *pdev = to_pci_dev(device);
17898         struct net_device *dev = pci_get_drvdata(pdev);
17899         struct tg3 *tp = netdev_priv(dev);
17900         int err = 0;
17901
17902         rtnl_lock();
17903
17904         if (!netif_running(dev))
17905                 goto unlock;
17906
17907         tg3_reset_task_cancel(tp);
17908         tg3_phy_stop(tp);
17909         tg3_netif_stop(tp);
17910
17911         tg3_timer_stop(tp);
17912
17913         tg3_full_lock(tp, 1);
17914         tg3_disable_ints(tp);
17915         tg3_full_unlock(tp);
17916
17917         netif_device_detach(dev);
17918
17919         tg3_full_lock(tp, 0);
17920         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17921         tg3_flag_clear(tp, INIT_COMPLETE);
17922         tg3_full_unlock(tp);
17923
17924         err = tg3_power_down_prepare(tp);
17925         if (err) {
17926                 int err2;
17927
17928                 tg3_full_lock(tp, 0);
17929
17930                 tg3_flag_set(tp, INIT_COMPLETE);
17931                 err2 = tg3_restart_hw(tp, true);
17932                 if (err2)
17933                         goto out;
17934
17935                 tg3_timer_start(tp);
17936
17937                 netif_device_attach(dev);
17938                 tg3_netif_start(tp);
17939
17940 out:
17941                 tg3_full_unlock(tp);
17942
17943                 if (!err2)
17944                         tg3_phy_start(tp);
17945         }
17946
17947 unlock:
17948         rtnl_unlock();
17949         return err;
17950 }
17951
17952 static int tg3_resume(struct device *device)
17953 {
17954         struct pci_dev *pdev = to_pci_dev(device);
17955         struct net_device *dev = pci_get_drvdata(pdev);
17956         struct tg3 *tp = netdev_priv(dev);
17957         int err = 0;
17958
17959         rtnl_lock();
17960
17961         if (!netif_running(dev))
17962                 goto unlock;
17963
17964         netif_device_attach(dev);
17965
17966         tg3_full_lock(tp, 0);
17967
17968         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
17969
17970         tg3_flag_set(tp, INIT_COMPLETE);
17971         err = tg3_restart_hw(tp,
17972                              !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
17973         if (err)
17974                 goto out;
17975
17976         tg3_timer_start(tp);
17977
17978         tg3_netif_start(tp);
17979
17980 out:
17981         tg3_full_unlock(tp);
17982
17983         if (!err)
17984                 tg3_phy_start(tp);
17985
17986 unlock:
17987         rtnl_unlock();
17988         return err;
17989 }
17990 #endif /* CONFIG_PM_SLEEP */
17991
17992 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17993
17994 static void tg3_shutdown(struct pci_dev *pdev)
17995 {
17996         struct net_device *dev = pci_get_drvdata(pdev);
17997         struct tg3 *tp = netdev_priv(dev);
17998
17999         rtnl_lock();
18000         netif_device_detach(dev);
18001
18002         if (netif_running(dev))
18003                 dev_close(dev);
18004
18005         if (system_state == SYSTEM_POWER_OFF)
18006                 tg3_power_down(tp);
18007
18008         rtnl_unlock();
18009 }
18010
18011 /**
18012  * tg3_io_error_detected - called when PCI error is detected
18013  * @pdev: Pointer to PCI device
18014  * @state: The current pci connection state
18015  *
18016  * This function is called after a PCI bus error affecting
18017  * this device has been detected.
18018  */
18019 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18020                                               pci_channel_state_t state)
18021 {
18022         struct net_device *netdev = pci_get_drvdata(pdev);
18023         struct tg3 *tp = netdev_priv(netdev);
18024         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18025
18026         netdev_info(netdev, "PCI I/O error detected\n");
18027
18028         rtnl_lock();
18029
18030         /* We probably don't have netdev yet */
18031         if (!netdev || !netif_running(netdev))
18032                 goto done;
18033
18034         tg3_phy_stop(tp);
18035
18036         tg3_netif_stop(tp);
18037
18038         tg3_timer_stop(tp);
18039
18040         /* Want to make sure that the reset task doesn't run */
18041         tg3_reset_task_cancel(tp);
18042
18043         netif_device_detach(netdev);
18044
18045         /* Clean up software state, even if MMIO is blocked */
18046         tg3_full_lock(tp, 0);
18047         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18048         tg3_full_unlock(tp);
18049
18050 done:
18051         if (state == pci_channel_io_perm_failure) {
18052                 if (netdev) {
18053                         tg3_napi_enable(tp);
18054                         dev_close(netdev);
18055                 }
18056                 err = PCI_ERS_RESULT_DISCONNECT;
18057         } else {
18058                 pci_disable_device(pdev);
18059         }
18060
18061         rtnl_unlock();
18062
18063         return err;
18064 }
18065
18066 /**
18067  * tg3_io_slot_reset - called after the pci bus has been reset.
18068  * @pdev: Pointer to PCI device
18069  *
18070  * Restart the card from scratch, as if from a cold-boot.
18071  * At this point, the card has exprienced a hard reset,
18072  * followed by fixups by BIOS, and has its config space
18073  * set up identically to what it was at cold boot.
18074  */
18075 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18076 {
18077         struct net_device *netdev = pci_get_drvdata(pdev);
18078         struct tg3 *tp = netdev_priv(netdev);
18079         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18080         int err;
18081
18082         rtnl_lock();
18083
18084         if (pci_enable_device(pdev)) {
18085                 dev_err(&pdev->dev,
18086                         "Cannot re-enable PCI device after reset.\n");
18087                 goto done;
18088         }
18089
18090         pci_set_master(pdev);
18091         pci_restore_state(pdev);
18092         pci_save_state(pdev);
18093
18094         if (!netdev || !netif_running(netdev)) {
18095                 rc = PCI_ERS_RESULT_RECOVERED;
18096                 goto done;
18097         }
18098
18099         err = tg3_power_up(tp);
18100         if (err)
18101                 goto done;
18102
18103         rc = PCI_ERS_RESULT_RECOVERED;
18104
18105 done:
18106         if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18107                 tg3_napi_enable(tp);
18108                 dev_close(netdev);
18109         }
18110         rtnl_unlock();
18111
18112         return rc;
18113 }
18114
18115 /**
18116  * tg3_io_resume - called when traffic can start flowing again.
18117  * @pdev: Pointer to PCI device
18118  *
18119  * This callback is called when the error recovery driver tells
18120  * us that its OK to resume normal operation.
18121  */
18122 static void tg3_io_resume(struct pci_dev *pdev)
18123 {
18124         struct net_device *netdev = pci_get_drvdata(pdev);
18125         struct tg3 *tp = netdev_priv(netdev);
18126         int err;
18127
18128         rtnl_lock();
18129
18130         if (!netif_running(netdev))
18131                 goto done;
18132
18133         tg3_full_lock(tp, 0);
18134         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18135         tg3_flag_set(tp, INIT_COMPLETE);
18136         err = tg3_restart_hw(tp, true);
18137         if (err) {
18138                 tg3_full_unlock(tp);
18139                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18140                 goto done;
18141         }
18142
18143         netif_device_attach(netdev);
18144
18145         tg3_timer_start(tp);
18146
18147         tg3_netif_start(tp);
18148
18149         tg3_full_unlock(tp);
18150
18151         tg3_phy_start(tp);
18152
18153 done:
18154         rtnl_unlock();
18155 }
18156
18157 static const struct pci_error_handlers tg3_err_handler = {
18158         .error_detected = tg3_io_error_detected,
18159         .slot_reset     = tg3_io_slot_reset,
18160         .resume         = tg3_io_resume
18161 };
18162
18163 static struct pci_driver tg3_driver = {
18164         .name           = DRV_MODULE_NAME,
18165         .id_table       = tg3_pci_tbl,
18166         .probe          = tg3_init_one,
18167         .remove         = tg3_remove_one,
18168         .err_handler    = &tg3_err_handler,
18169         .driver.pm      = &tg3_pm_ops,
18170         .shutdown       = tg3_shutdown,
18171 };
18172
18173 module_pci_driver(tg3_driver);