]> Pileus Git - ~andy/linux/blob - drivers/net/ethernet/broadcom/tg3.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[~andy/linux] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2013 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if.h>
41 #include <linux/if_vlan.h>
42 #include <linux/ip.h>
43 #include <linux/tcp.h>
44 #include <linux/workqueue.h>
45 #include <linux/prefetch.h>
46 #include <linux/dma-mapping.h>
47 #include <linux/firmware.h>
48 #include <linux/ssb/ssb_driver_gige.h>
49 #include <linux/hwmon.h>
50 #include <linux/hwmon-sysfs.h>
51
52 #include <net/checksum.h>
53 #include <net/ip.h>
54
55 #include <linux/io.h>
56 #include <asm/byteorder.h>
57 #include <linux/uaccess.h>
58
59 #include <uapi/linux/net_tstamp.h>
60 #include <linux/ptp_clock_kernel.h>
61
62 #ifdef CONFIG_SPARC
63 #include <asm/idprom.h>
64 #include <asm/prom.h>
65 #endif
66
67 #define BAR_0   0
68 #define BAR_2   2
69
70 #include "tg3.h"
71
72 /* Functions & macros to verify TG3_FLAGS types */
73
74 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
75 {
76         return test_bit(flag, bits);
77 }
78
79 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
80 {
81         set_bit(flag, bits);
82 }
83
84 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
85 {
86         clear_bit(flag, bits);
87 }
88
89 #define tg3_flag(tp, flag)                              \
90         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
91 #define tg3_flag_set(tp, flag)                          \
92         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
93 #define tg3_flag_clear(tp, flag)                        \
94         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
95
96 #define DRV_MODULE_NAME         "tg3"
97 #define TG3_MAJ_NUM                     3
98 #define TG3_MIN_NUM                     136
99 #define DRV_MODULE_VERSION      \
100         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
101 #define DRV_MODULE_RELDATE      "Jan 03, 2014"
102
103 #define RESET_KIND_SHUTDOWN     0
104 #define RESET_KIND_INIT         1
105 #define RESET_KIND_SUSPEND      2
106
107 #define TG3_DEF_RX_MODE         0
108 #define TG3_DEF_TX_MODE         0
109 #define TG3_DEF_MSG_ENABLE        \
110         (NETIF_MSG_DRV          | \
111          NETIF_MSG_PROBE        | \
112          NETIF_MSG_LINK         | \
113          NETIF_MSG_TIMER        | \
114          NETIF_MSG_IFDOWN       | \
115          NETIF_MSG_IFUP         | \
116          NETIF_MSG_RX_ERR       | \
117          NETIF_MSG_TX_ERR)
118
119 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
120
121 /* length of time before we decide the hardware is borked,
122  * and dev->tx_timeout() should be called to fix the problem
123  */
124
125 #define TG3_TX_TIMEOUT                  (5 * HZ)
126
127 /* hardware minimum and maximum for a single frame's data payload */
128 #define TG3_MIN_MTU                     60
129 #define TG3_MAX_MTU(tp) \
130         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
131
132 /* These numbers seem to be hard coded in the NIC firmware somehow.
133  * You can't change the ring sizes, but you can change where you place
134  * them in the NIC onboard memory.
135  */
136 #define TG3_RX_STD_RING_SIZE(tp) \
137         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
138          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
139 #define TG3_DEF_RX_RING_PENDING         200
140 #define TG3_RX_JMB_RING_SIZE(tp) \
141         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
142          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
143 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
144
145 /* Do not place this n-ring entries value into the tp struct itself,
146  * we really want to expose these constants to GCC so that modulo et
147  * al.  operations are done with shifts and masks instead of with
148  * hw multiply/modulo instructions.  Another solution would be to
149  * replace things like '% foo' with '& (foo - 1)'.
150  */
151
152 #define TG3_TX_RING_SIZE                512
153 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
154
155 #define TG3_RX_STD_RING_BYTES(tp) \
156         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
157 #define TG3_RX_JMB_RING_BYTES(tp) \
158         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
159 #define TG3_RX_RCB_RING_BYTES(tp) \
160         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
161 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
162                                  TG3_TX_RING_SIZE)
163 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
164
165 #define TG3_DMA_BYTE_ENAB               64
166
167 #define TG3_RX_STD_DMA_SZ               1536
168 #define TG3_RX_JMB_DMA_SZ               9046
169
170 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
171
172 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
173 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
174
175 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
176         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
177
178 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
179         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
180
181 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
182  * that are at least dword aligned when used in PCIX mode.  The driver
183  * works around this bug by double copying the packet.  This workaround
184  * is built into the normal double copy length check for efficiency.
185  *
186  * However, the double copy is only necessary on those architectures
187  * where unaligned memory accesses are inefficient.  For those architectures
188  * where unaligned memory accesses incur little penalty, we can reintegrate
189  * the 5701 in the normal rx path.  Doing so saves a device structure
190  * dereference by hardcoding the double copy threshold in place.
191  */
192 #define TG3_RX_COPY_THRESHOLD           256
193 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
194         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
195 #else
196         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
197 #endif
198
199 #if (NET_IP_ALIGN != 0)
200 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
201 #else
202 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
203 #endif
204
205 /* minimum number of free TX descriptors required to wake up TX process */
206 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
207 #define TG3_TX_BD_DMA_MAX_2K            2048
208 #define TG3_TX_BD_DMA_MAX_4K            4096
209
210 #define TG3_RAW_IP_ALIGN 2
211
212 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
213 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
214
215 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
216 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
217
218 #define FIRMWARE_TG3            "tigon/tg3.bin"
219 #define FIRMWARE_TG357766       "tigon/tg357766.bin"
220 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
221 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
222
223 static char version[] =
224         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
225
226 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
227 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
228 MODULE_LICENSE("GPL");
229 MODULE_VERSION(DRV_MODULE_VERSION);
230 MODULE_FIRMWARE(FIRMWARE_TG3);
231 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
232 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
233
234 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
235 module_param(tg3_debug, int, 0);
236 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
237
238 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
239 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
240
241 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
261          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
262                         TG3_DRV_DATA_FLAG_5705_10_100},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
264          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265                         TG3_DRV_DATA_FLAG_5705_10_100},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
268          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
269                         TG3_DRV_DATA_FLAG_5705_10_100},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
276          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
282          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
290         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
291                         PCI_VENDOR_ID_LENOVO,
292                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
293          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
296          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
308         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
309         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
310         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
311         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
312         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
313         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
314         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
315         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
316                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
317          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
318         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
319                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
320          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
322         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
324          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
326         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
327         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
329         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
330         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
332         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
333         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
334          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
335         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
336          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
337         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
338         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
339         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
340         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
341         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
342         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
343         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
344         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
345         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
346         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
347         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
348         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
349         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
350         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
351         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
352         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
353         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
354         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
355         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
356         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
357         {}
358 };
359
360 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
361
362 static const struct {
363         const char string[ETH_GSTRING_LEN];
364 } ethtool_stats_keys[] = {
365         { "rx_octets" },
366         { "rx_fragments" },
367         { "rx_ucast_packets" },
368         { "rx_mcast_packets" },
369         { "rx_bcast_packets" },
370         { "rx_fcs_errors" },
371         { "rx_align_errors" },
372         { "rx_xon_pause_rcvd" },
373         { "rx_xoff_pause_rcvd" },
374         { "rx_mac_ctrl_rcvd" },
375         { "rx_xoff_entered" },
376         { "rx_frame_too_long_errors" },
377         { "rx_jabbers" },
378         { "rx_undersize_packets" },
379         { "rx_in_length_errors" },
380         { "rx_out_length_errors" },
381         { "rx_64_or_less_octet_packets" },
382         { "rx_65_to_127_octet_packets" },
383         { "rx_128_to_255_octet_packets" },
384         { "rx_256_to_511_octet_packets" },
385         { "rx_512_to_1023_octet_packets" },
386         { "rx_1024_to_1522_octet_packets" },
387         { "rx_1523_to_2047_octet_packets" },
388         { "rx_2048_to_4095_octet_packets" },
389         { "rx_4096_to_8191_octet_packets" },
390         { "rx_8192_to_9022_octet_packets" },
391
392         { "tx_octets" },
393         { "tx_collisions" },
394
395         { "tx_xon_sent" },
396         { "tx_xoff_sent" },
397         { "tx_flow_control" },
398         { "tx_mac_errors" },
399         { "tx_single_collisions" },
400         { "tx_mult_collisions" },
401         { "tx_deferred" },
402         { "tx_excessive_collisions" },
403         { "tx_late_collisions" },
404         { "tx_collide_2times" },
405         { "tx_collide_3times" },
406         { "tx_collide_4times" },
407         { "tx_collide_5times" },
408         { "tx_collide_6times" },
409         { "tx_collide_7times" },
410         { "tx_collide_8times" },
411         { "tx_collide_9times" },
412         { "tx_collide_10times" },
413         { "tx_collide_11times" },
414         { "tx_collide_12times" },
415         { "tx_collide_13times" },
416         { "tx_collide_14times" },
417         { "tx_collide_15times" },
418         { "tx_ucast_packets" },
419         { "tx_mcast_packets" },
420         { "tx_bcast_packets" },
421         { "tx_carrier_sense_errors" },
422         { "tx_discards" },
423         { "tx_errors" },
424
425         { "dma_writeq_full" },
426         { "dma_write_prioq_full" },
427         { "rxbds_empty" },
428         { "rx_discards" },
429         { "rx_errors" },
430         { "rx_threshold_hit" },
431
432         { "dma_readq_full" },
433         { "dma_read_prioq_full" },
434         { "tx_comp_queue_full" },
435
436         { "ring_set_send_prod_index" },
437         { "ring_status_update" },
438         { "nic_irqs" },
439         { "nic_avoided_irqs" },
440         { "nic_tx_threshold_hit" },
441
442         { "mbuf_lwm_thresh_hit" },
443 };
444
445 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
446 #define TG3_NVRAM_TEST          0
447 #define TG3_LINK_TEST           1
448 #define TG3_REGISTER_TEST       2
449 #define TG3_MEMORY_TEST         3
450 #define TG3_MAC_LOOPB_TEST      4
451 #define TG3_PHY_LOOPB_TEST      5
452 #define TG3_EXT_LOOPB_TEST      6
453 #define TG3_INTERRUPT_TEST      7
454
455
456 static const struct {
457         const char string[ETH_GSTRING_LEN];
458 } ethtool_test_keys[] = {
459         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
460         [TG3_LINK_TEST]         = { "link test         (online) " },
461         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
462         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
463         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
464         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
465         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
466         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
467 };
468
469 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
470
471
472 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
473 {
474         writel(val, tp->regs + off);
475 }
476
477 static u32 tg3_read32(struct tg3 *tp, u32 off)
478 {
479         return readl(tp->regs + off);
480 }
481
482 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
483 {
484         writel(val, tp->aperegs + off);
485 }
486
487 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
488 {
489         return readl(tp->aperegs + off);
490 }
491
492 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
493 {
494         unsigned long flags;
495
496         spin_lock_irqsave(&tp->indirect_lock, flags);
497         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
498         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
499         spin_unlock_irqrestore(&tp->indirect_lock, flags);
500 }
501
502 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
503 {
504         writel(val, tp->regs + off);
505         readl(tp->regs + off);
506 }
507
508 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
509 {
510         unsigned long flags;
511         u32 val;
512
513         spin_lock_irqsave(&tp->indirect_lock, flags);
514         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
515         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
516         spin_unlock_irqrestore(&tp->indirect_lock, flags);
517         return val;
518 }
519
520 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
521 {
522         unsigned long flags;
523
524         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
525                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
526                                        TG3_64BIT_REG_LOW, val);
527                 return;
528         }
529         if (off == TG3_RX_STD_PROD_IDX_REG) {
530                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
531                                        TG3_64BIT_REG_LOW, val);
532                 return;
533         }
534
535         spin_lock_irqsave(&tp->indirect_lock, flags);
536         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
537         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
538         spin_unlock_irqrestore(&tp->indirect_lock, flags);
539
540         /* In indirect mode when disabling interrupts, we also need
541          * to clear the interrupt bit in the GRC local ctrl register.
542          */
543         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
544             (val == 0x1)) {
545                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
546                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
547         }
548 }
549
550 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
551 {
552         unsigned long flags;
553         u32 val;
554
555         spin_lock_irqsave(&tp->indirect_lock, flags);
556         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
557         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
558         spin_unlock_irqrestore(&tp->indirect_lock, flags);
559         return val;
560 }
561
562 /* usec_wait specifies the wait time in usec when writing to certain registers
563  * where it is unsafe to read back the register without some delay.
564  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
565  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
566  */
567 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
568 {
569         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
570                 /* Non-posted methods */
571                 tp->write32(tp, off, val);
572         else {
573                 /* Posted method */
574                 tg3_write32(tp, off, val);
575                 if (usec_wait)
576                         udelay(usec_wait);
577                 tp->read32(tp, off);
578         }
579         /* Wait again after the read for the posted method to guarantee that
580          * the wait time is met.
581          */
582         if (usec_wait)
583                 udelay(usec_wait);
584 }
585
586 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
587 {
588         tp->write32_mbox(tp, off, val);
589         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
590             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
591              !tg3_flag(tp, ICH_WORKAROUND)))
592                 tp->read32_mbox(tp, off);
593 }
594
595 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
596 {
597         void __iomem *mbox = tp->regs + off;
598         writel(val, mbox);
599         if (tg3_flag(tp, TXD_MBOX_HWBUG))
600                 writel(val, mbox);
601         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
602             tg3_flag(tp, FLUSH_POSTED_WRITES))
603                 readl(mbox);
604 }
605
606 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
607 {
608         return readl(tp->regs + off + GRCMBOX_BASE);
609 }
610
611 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
612 {
613         writel(val, tp->regs + off + GRCMBOX_BASE);
614 }
615
616 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
617 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
618 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
619 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
620 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
621
622 #define tw32(reg, val)                  tp->write32(tp, reg, val)
623 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
624 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
625 #define tr32(reg)                       tp->read32(tp, reg)
626
627 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
628 {
629         unsigned long flags;
630
631         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
632             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
633                 return;
634
635         spin_lock_irqsave(&tp->indirect_lock, flags);
636         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
637                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
638                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
639
640                 /* Always leave this as zero. */
641                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
642         } else {
643                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
644                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
645
646                 /* Always leave this as zero. */
647                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
648         }
649         spin_unlock_irqrestore(&tp->indirect_lock, flags);
650 }
651
652 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
653 {
654         unsigned long flags;
655
656         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
657             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
658                 *val = 0;
659                 return;
660         }
661
662         spin_lock_irqsave(&tp->indirect_lock, flags);
663         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
664                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
665                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
666
667                 /* Always leave this as zero. */
668                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
669         } else {
670                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
671                 *val = tr32(TG3PCI_MEM_WIN_DATA);
672
673                 /* Always leave this as zero. */
674                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
675         }
676         spin_unlock_irqrestore(&tp->indirect_lock, flags);
677 }
678
679 static void tg3_ape_lock_init(struct tg3 *tp)
680 {
681         int i;
682         u32 regbase, bit;
683
684         if (tg3_asic_rev(tp) == ASIC_REV_5761)
685                 regbase = TG3_APE_LOCK_GRANT;
686         else
687                 regbase = TG3_APE_PER_LOCK_GRANT;
688
689         /* Make sure the driver hasn't any stale locks. */
690         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
691                 switch (i) {
692                 case TG3_APE_LOCK_PHY0:
693                 case TG3_APE_LOCK_PHY1:
694                 case TG3_APE_LOCK_PHY2:
695                 case TG3_APE_LOCK_PHY3:
696                         bit = APE_LOCK_GRANT_DRIVER;
697                         break;
698                 default:
699                         if (!tp->pci_fn)
700                                 bit = APE_LOCK_GRANT_DRIVER;
701                         else
702                                 bit = 1 << tp->pci_fn;
703                 }
704                 tg3_ape_write32(tp, regbase + 4 * i, bit);
705         }
706
707 }
708
709 static int tg3_ape_lock(struct tg3 *tp, int locknum)
710 {
711         int i, off;
712         int ret = 0;
713         u32 status, req, gnt, bit;
714
715         if (!tg3_flag(tp, ENABLE_APE))
716                 return 0;
717
718         switch (locknum) {
719         case TG3_APE_LOCK_GPIO:
720                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
721                         return 0;
722         case TG3_APE_LOCK_GRC:
723         case TG3_APE_LOCK_MEM:
724                 if (!tp->pci_fn)
725                         bit = APE_LOCK_REQ_DRIVER;
726                 else
727                         bit = 1 << tp->pci_fn;
728                 break;
729         case TG3_APE_LOCK_PHY0:
730         case TG3_APE_LOCK_PHY1:
731         case TG3_APE_LOCK_PHY2:
732         case TG3_APE_LOCK_PHY3:
733                 bit = APE_LOCK_REQ_DRIVER;
734                 break;
735         default:
736                 return -EINVAL;
737         }
738
739         if (tg3_asic_rev(tp) == ASIC_REV_5761) {
740                 req = TG3_APE_LOCK_REQ;
741                 gnt = TG3_APE_LOCK_GRANT;
742         } else {
743                 req = TG3_APE_PER_LOCK_REQ;
744                 gnt = TG3_APE_PER_LOCK_GRANT;
745         }
746
747         off = 4 * locknum;
748
749         tg3_ape_write32(tp, req + off, bit);
750
751         /* Wait for up to 1 millisecond to acquire lock. */
752         for (i = 0; i < 100; i++) {
753                 status = tg3_ape_read32(tp, gnt + off);
754                 if (status == bit)
755                         break;
756                 if (pci_channel_offline(tp->pdev))
757                         break;
758
759                 udelay(10);
760         }
761
762         if (status != bit) {
763                 /* Revoke the lock request. */
764                 tg3_ape_write32(tp, gnt + off, bit);
765                 ret = -EBUSY;
766         }
767
768         return ret;
769 }
770
771 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
772 {
773         u32 gnt, bit;
774
775         if (!tg3_flag(tp, ENABLE_APE))
776                 return;
777
778         switch (locknum) {
779         case TG3_APE_LOCK_GPIO:
780                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
781                         return;
782         case TG3_APE_LOCK_GRC:
783         case TG3_APE_LOCK_MEM:
784                 if (!tp->pci_fn)
785                         bit = APE_LOCK_GRANT_DRIVER;
786                 else
787                         bit = 1 << tp->pci_fn;
788                 break;
789         case TG3_APE_LOCK_PHY0:
790         case TG3_APE_LOCK_PHY1:
791         case TG3_APE_LOCK_PHY2:
792         case TG3_APE_LOCK_PHY3:
793                 bit = APE_LOCK_GRANT_DRIVER;
794                 break;
795         default:
796                 return;
797         }
798
799         if (tg3_asic_rev(tp) == ASIC_REV_5761)
800                 gnt = TG3_APE_LOCK_GRANT;
801         else
802                 gnt = TG3_APE_PER_LOCK_GRANT;
803
804         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
805 }
806
807 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
808 {
809         u32 apedata;
810
811         while (timeout_us) {
812                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
813                         return -EBUSY;
814
815                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
816                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
817                         break;
818
819                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
820
821                 udelay(10);
822                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
823         }
824
825         return timeout_us ? 0 : -EBUSY;
826 }
827
828 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
829 {
830         u32 i, apedata;
831
832         for (i = 0; i < timeout_us / 10; i++) {
833                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
834
835                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
836                         break;
837
838                 udelay(10);
839         }
840
841         return i == timeout_us / 10;
842 }
843
844 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
845                                    u32 len)
846 {
847         int err;
848         u32 i, bufoff, msgoff, maxlen, apedata;
849
850         if (!tg3_flag(tp, APE_HAS_NCSI))
851                 return 0;
852
853         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
854         if (apedata != APE_SEG_SIG_MAGIC)
855                 return -ENODEV;
856
857         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
858         if (!(apedata & APE_FW_STATUS_READY))
859                 return -EAGAIN;
860
861         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
862                  TG3_APE_SHMEM_BASE;
863         msgoff = bufoff + 2 * sizeof(u32);
864         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
865
866         while (len) {
867                 u32 length;
868
869                 /* Cap xfer sizes to scratchpad limits. */
870                 length = (len > maxlen) ? maxlen : len;
871                 len -= length;
872
873                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
874                 if (!(apedata & APE_FW_STATUS_READY))
875                         return -EAGAIN;
876
877                 /* Wait for up to 1 msec for APE to service previous event. */
878                 err = tg3_ape_event_lock(tp, 1000);
879                 if (err)
880                         return err;
881
882                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
883                           APE_EVENT_STATUS_SCRTCHPD_READ |
884                           APE_EVENT_STATUS_EVENT_PENDING;
885                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
886
887                 tg3_ape_write32(tp, bufoff, base_off);
888                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
889
890                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
891                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
892
893                 base_off += length;
894
895                 if (tg3_ape_wait_for_event(tp, 30000))
896                         return -EAGAIN;
897
898                 for (i = 0; length; i += 4, length -= 4) {
899                         u32 val = tg3_ape_read32(tp, msgoff + i);
900                         memcpy(data, &val, sizeof(u32));
901                         data++;
902                 }
903         }
904
905         return 0;
906 }
907
908 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
909 {
910         int err;
911         u32 apedata;
912
913         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
914         if (apedata != APE_SEG_SIG_MAGIC)
915                 return -EAGAIN;
916
917         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
918         if (!(apedata & APE_FW_STATUS_READY))
919                 return -EAGAIN;
920
921         /* Wait for up to 1 millisecond for APE to service previous event. */
922         err = tg3_ape_event_lock(tp, 1000);
923         if (err)
924                 return err;
925
926         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
927                         event | APE_EVENT_STATUS_EVENT_PENDING);
928
929         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
930         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
931
932         return 0;
933 }
934
935 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
936 {
937         u32 event;
938         u32 apedata;
939
940         if (!tg3_flag(tp, ENABLE_APE))
941                 return;
942
943         switch (kind) {
944         case RESET_KIND_INIT:
945                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
946                                 APE_HOST_SEG_SIG_MAGIC);
947                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
948                                 APE_HOST_SEG_LEN_MAGIC);
949                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
950                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
951                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
952                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
953                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
954                                 APE_HOST_BEHAV_NO_PHYLOCK);
955                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
956                                     TG3_APE_HOST_DRVR_STATE_START);
957
958                 event = APE_EVENT_STATUS_STATE_START;
959                 break;
960         case RESET_KIND_SHUTDOWN:
961                 /* With the interface we are currently using,
962                  * APE does not track driver state.  Wiping
963                  * out the HOST SEGMENT SIGNATURE forces
964                  * the APE to assume OS absent status.
965                  */
966                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
967
968                 if (device_may_wakeup(&tp->pdev->dev) &&
969                     tg3_flag(tp, WOL_ENABLE)) {
970                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
971                                             TG3_APE_HOST_WOL_SPEED_AUTO);
972                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
973                 } else
974                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
975
976                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
977
978                 event = APE_EVENT_STATUS_STATE_UNLOAD;
979                 break;
980         default:
981                 return;
982         }
983
984         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
985
986         tg3_ape_send_event(tp, event);
987 }
988
989 static void tg3_disable_ints(struct tg3 *tp)
990 {
991         int i;
992
993         tw32(TG3PCI_MISC_HOST_CTRL,
994              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
995         for (i = 0; i < tp->irq_max; i++)
996                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
997 }
998
999 static void tg3_enable_ints(struct tg3 *tp)
1000 {
1001         int i;
1002
1003         tp->irq_sync = 0;
1004         wmb();
1005
1006         tw32(TG3PCI_MISC_HOST_CTRL,
1007              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1008
1009         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1010         for (i = 0; i < tp->irq_cnt; i++) {
1011                 struct tg3_napi *tnapi = &tp->napi[i];
1012
1013                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1014                 if (tg3_flag(tp, 1SHOT_MSI))
1015                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1016
1017                 tp->coal_now |= tnapi->coal_now;
1018         }
1019
1020         /* Force an initial interrupt */
1021         if (!tg3_flag(tp, TAGGED_STATUS) &&
1022             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1023                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1024         else
1025                 tw32(HOSTCC_MODE, tp->coal_now);
1026
1027         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1028 }
1029
1030 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1031 {
1032         struct tg3 *tp = tnapi->tp;
1033         struct tg3_hw_status *sblk = tnapi->hw_status;
1034         unsigned int work_exists = 0;
1035
1036         /* check for phy events */
1037         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1038                 if (sblk->status & SD_STATUS_LINK_CHG)
1039                         work_exists = 1;
1040         }
1041
1042         /* check for TX work to do */
1043         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1044                 work_exists = 1;
1045
1046         /* check for RX work to do */
1047         if (tnapi->rx_rcb_prod_idx &&
1048             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1049                 work_exists = 1;
1050
1051         return work_exists;
1052 }
1053
1054 /* tg3_int_reenable
1055  *  similar to tg3_enable_ints, but it accurately determines whether there
1056  *  is new work pending and can return without flushing the PIO write
1057  *  which reenables interrupts
1058  */
1059 static void tg3_int_reenable(struct tg3_napi *tnapi)
1060 {
1061         struct tg3 *tp = tnapi->tp;
1062
1063         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1064         mmiowb();
1065
1066         /* When doing tagged status, this work check is unnecessary.
1067          * The last_tag we write above tells the chip which piece of
1068          * work we've completed.
1069          */
1070         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1071                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1072                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1073 }
1074
1075 static void tg3_switch_clocks(struct tg3 *tp)
1076 {
1077         u32 clock_ctrl;
1078         u32 orig_clock_ctrl;
1079
1080         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1081                 return;
1082
1083         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1084
1085         orig_clock_ctrl = clock_ctrl;
1086         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1087                        CLOCK_CTRL_CLKRUN_OENABLE |
1088                        0x1f);
1089         tp->pci_clock_ctrl = clock_ctrl;
1090
1091         if (tg3_flag(tp, 5705_PLUS)) {
1092                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1093                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1094                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1095                 }
1096         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1097                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1098                             clock_ctrl |
1099                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1100                             40);
1101                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1102                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1103                             40);
1104         }
1105         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1106 }
1107
1108 #define PHY_BUSY_LOOPS  5000
1109
1110 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1111                          u32 *val)
1112 {
1113         u32 frame_val;
1114         unsigned int loops;
1115         int ret;
1116
1117         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1118                 tw32_f(MAC_MI_MODE,
1119                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1120                 udelay(80);
1121         }
1122
1123         tg3_ape_lock(tp, tp->phy_ape_lock);
1124
1125         *val = 0x0;
1126
1127         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1128                       MI_COM_PHY_ADDR_MASK);
1129         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1130                       MI_COM_REG_ADDR_MASK);
1131         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1132
1133         tw32_f(MAC_MI_COM, frame_val);
1134
1135         loops = PHY_BUSY_LOOPS;
1136         while (loops != 0) {
1137                 udelay(10);
1138                 frame_val = tr32(MAC_MI_COM);
1139
1140                 if ((frame_val & MI_COM_BUSY) == 0) {
1141                         udelay(5);
1142                         frame_val = tr32(MAC_MI_COM);
1143                         break;
1144                 }
1145                 loops -= 1;
1146         }
1147
1148         ret = -EBUSY;
1149         if (loops != 0) {
1150                 *val = frame_val & MI_COM_DATA_MASK;
1151                 ret = 0;
1152         }
1153
1154         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1155                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1156                 udelay(80);
1157         }
1158
1159         tg3_ape_unlock(tp, tp->phy_ape_lock);
1160
1161         return ret;
1162 }
1163
1164 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1165 {
1166         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1167 }
1168
1169 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1170                           u32 val)
1171 {
1172         u32 frame_val;
1173         unsigned int loops;
1174         int ret;
1175
1176         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1177             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1178                 return 0;
1179
1180         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1181                 tw32_f(MAC_MI_MODE,
1182                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1183                 udelay(80);
1184         }
1185
1186         tg3_ape_lock(tp, tp->phy_ape_lock);
1187
1188         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1189                       MI_COM_PHY_ADDR_MASK);
1190         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1191                       MI_COM_REG_ADDR_MASK);
1192         frame_val |= (val & MI_COM_DATA_MASK);
1193         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1194
1195         tw32_f(MAC_MI_COM, frame_val);
1196
1197         loops = PHY_BUSY_LOOPS;
1198         while (loops != 0) {
1199                 udelay(10);
1200                 frame_val = tr32(MAC_MI_COM);
1201                 if ((frame_val & MI_COM_BUSY) == 0) {
1202                         udelay(5);
1203                         frame_val = tr32(MAC_MI_COM);
1204                         break;
1205                 }
1206                 loops -= 1;
1207         }
1208
1209         ret = -EBUSY;
1210         if (loops != 0)
1211                 ret = 0;
1212
1213         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1214                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1215                 udelay(80);
1216         }
1217
1218         tg3_ape_unlock(tp, tp->phy_ape_lock);
1219
1220         return ret;
1221 }
1222
1223 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1224 {
1225         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1226 }
1227
1228 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1229 {
1230         int err;
1231
1232         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1233         if (err)
1234                 goto done;
1235
1236         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1237         if (err)
1238                 goto done;
1239
1240         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1241                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1242         if (err)
1243                 goto done;
1244
1245         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1246
1247 done:
1248         return err;
1249 }
1250
1251 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1252 {
1253         int err;
1254
1255         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1256         if (err)
1257                 goto done;
1258
1259         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1260         if (err)
1261                 goto done;
1262
1263         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1264                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1265         if (err)
1266                 goto done;
1267
1268         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1269
1270 done:
1271         return err;
1272 }
1273
1274 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1275 {
1276         int err;
1277
1278         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1279         if (!err)
1280                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1281
1282         return err;
1283 }
1284
1285 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1286 {
1287         int err;
1288
1289         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1290         if (!err)
1291                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1292
1293         return err;
1294 }
1295
1296 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1297 {
1298         int err;
1299
1300         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1301                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1302                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1303         if (!err)
1304                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1305
1306         return err;
1307 }
1308
1309 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1310 {
1311         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1312                 set |= MII_TG3_AUXCTL_MISC_WREN;
1313
1314         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1315 }
1316
1317 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1318 {
1319         u32 val;
1320         int err;
1321
1322         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1323
1324         if (err)
1325                 return err;
1326
1327         if (enable)
1328                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1329         else
1330                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1331
1332         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1333                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1334
1335         return err;
1336 }
1337
1338 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1339 {
1340         return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1341                             reg | val | MII_TG3_MISC_SHDW_WREN);
1342 }
1343
1344 static int tg3_bmcr_reset(struct tg3 *tp)
1345 {
1346         u32 phy_control;
1347         int limit, err;
1348
1349         /* OK, reset it, and poll the BMCR_RESET bit until it
1350          * clears or we time out.
1351          */
1352         phy_control = BMCR_RESET;
1353         err = tg3_writephy(tp, MII_BMCR, phy_control);
1354         if (err != 0)
1355                 return -EBUSY;
1356
1357         limit = 5000;
1358         while (limit--) {
1359                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1360                 if (err != 0)
1361                         return -EBUSY;
1362
1363                 if ((phy_control & BMCR_RESET) == 0) {
1364                         udelay(40);
1365                         break;
1366                 }
1367                 udelay(10);
1368         }
1369         if (limit < 0)
1370                 return -EBUSY;
1371
1372         return 0;
1373 }
1374
1375 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1376 {
1377         struct tg3 *tp = bp->priv;
1378         u32 val;
1379
1380         spin_lock_bh(&tp->lock);
1381
1382         if (__tg3_readphy(tp, mii_id, reg, &val))
1383                 val = -EIO;
1384
1385         spin_unlock_bh(&tp->lock);
1386
1387         return val;
1388 }
1389
1390 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1391 {
1392         struct tg3 *tp = bp->priv;
1393         u32 ret = 0;
1394
1395         spin_lock_bh(&tp->lock);
1396
1397         if (__tg3_writephy(tp, mii_id, reg, val))
1398                 ret = -EIO;
1399
1400         spin_unlock_bh(&tp->lock);
1401
1402         return ret;
1403 }
1404
1405 static int tg3_mdio_reset(struct mii_bus *bp)
1406 {
1407         return 0;
1408 }
1409
1410 static void tg3_mdio_config_5785(struct tg3 *tp)
1411 {
1412         u32 val;
1413         struct phy_device *phydev;
1414
1415         phydev = tp->mdio_bus->phy_map[tp->phy_addr];
1416         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1417         case PHY_ID_BCM50610:
1418         case PHY_ID_BCM50610M:
1419                 val = MAC_PHYCFG2_50610_LED_MODES;
1420                 break;
1421         case PHY_ID_BCMAC131:
1422                 val = MAC_PHYCFG2_AC131_LED_MODES;
1423                 break;
1424         case PHY_ID_RTL8211C:
1425                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1426                 break;
1427         case PHY_ID_RTL8201E:
1428                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1429                 break;
1430         default:
1431                 return;
1432         }
1433
1434         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1435                 tw32(MAC_PHYCFG2, val);
1436
1437                 val = tr32(MAC_PHYCFG1);
1438                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1439                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1440                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1441                 tw32(MAC_PHYCFG1, val);
1442
1443                 return;
1444         }
1445
1446         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1447                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1448                        MAC_PHYCFG2_FMODE_MASK_MASK |
1449                        MAC_PHYCFG2_GMODE_MASK_MASK |
1450                        MAC_PHYCFG2_ACT_MASK_MASK   |
1451                        MAC_PHYCFG2_QUAL_MASK_MASK |
1452                        MAC_PHYCFG2_INBAND_ENABLE;
1453
1454         tw32(MAC_PHYCFG2, val);
1455
1456         val = tr32(MAC_PHYCFG1);
1457         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1458                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1459         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1460                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1461                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1462                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1463                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1464         }
1465         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1466                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1467         tw32(MAC_PHYCFG1, val);
1468
1469         val = tr32(MAC_EXT_RGMII_MODE);
1470         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1471                  MAC_RGMII_MODE_RX_QUALITY |
1472                  MAC_RGMII_MODE_RX_ACTIVITY |
1473                  MAC_RGMII_MODE_RX_ENG_DET |
1474                  MAC_RGMII_MODE_TX_ENABLE |
1475                  MAC_RGMII_MODE_TX_LOWPWR |
1476                  MAC_RGMII_MODE_TX_RESET);
1477         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1478                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1479                         val |= MAC_RGMII_MODE_RX_INT_B |
1480                                MAC_RGMII_MODE_RX_QUALITY |
1481                                MAC_RGMII_MODE_RX_ACTIVITY |
1482                                MAC_RGMII_MODE_RX_ENG_DET;
1483                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1484                         val |= MAC_RGMII_MODE_TX_ENABLE |
1485                                MAC_RGMII_MODE_TX_LOWPWR |
1486                                MAC_RGMII_MODE_TX_RESET;
1487         }
1488         tw32(MAC_EXT_RGMII_MODE, val);
1489 }
1490
1491 static void tg3_mdio_start(struct tg3 *tp)
1492 {
1493         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1494         tw32_f(MAC_MI_MODE, tp->mi_mode);
1495         udelay(80);
1496
1497         if (tg3_flag(tp, MDIOBUS_INITED) &&
1498             tg3_asic_rev(tp) == ASIC_REV_5785)
1499                 tg3_mdio_config_5785(tp);
1500 }
1501
1502 static int tg3_mdio_init(struct tg3 *tp)
1503 {
1504         int i;
1505         u32 reg;
1506         struct phy_device *phydev;
1507
1508         if (tg3_flag(tp, 5717_PLUS)) {
1509                 u32 is_serdes;
1510
1511                 tp->phy_addr = tp->pci_fn + 1;
1512
1513                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1514                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1515                 else
1516                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1517                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1518                 if (is_serdes)
1519                         tp->phy_addr += 7;
1520         } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1521                 int addr;
1522
1523                 addr = ssb_gige_get_phyaddr(tp->pdev);
1524                 if (addr < 0)
1525                         return addr;
1526                 tp->phy_addr = addr;
1527         } else
1528                 tp->phy_addr = TG3_PHY_MII_ADDR;
1529
1530         tg3_mdio_start(tp);
1531
1532         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1533                 return 0;
1534
1535         tp->mdio_bus = mdiobus_alloc();
1536         if (tp->mdio_bus == NULL)
1537                 return -ENOMEM;
1538
1539         tp->mdio_bus->name     = "tg3 mdio bus";
1540         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1541                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1542         tp->mdio_bus->priv     = tp;
1543         tp->mdio_bus->parent   = &tp->pdev->dev;
1544         tp->mdio_bus->read     = &tg3_mdio_read;
1545         tp->mdio_bus->write    = &tg3_mdio_write;
1546         tp->mdio_bus->reset    = &tg3_mdio_reset;
1547         tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1548         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1549
1550         for (i = 0; i < PHY_MAX_ADDR; i++)
1551                 tp->mdio_bus->irq[i] = PHY_POLL;
1552
1553         /* The bus registration will look for all the PHYs on the mdio bus.
1554          * Unfortunately, it does not ensure the PHY is powered up before
1555          * accessing the PHY ID registers.  A chip reset is the
1556          * quickest way to bring the device back to an operational state..
1557          */
1558         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1559                 tg3_bmcr_reset(tp);
1560
1561         i = mdiobus_register(tp->mdio_bus);
1562         if (i) {
1563                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1564                 mdiobus_free(tp->mdio_bus);
1565                 return i;
1566         }
1567
1568         phydev = tp->mdio_bus->phy_map[tp->phy_addr];
1569
1570         if (!phydev || !phydev->drv) {
1571                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1572                 mdiobus_unregister(tp->mdio_bus);
1573                 mdiobus_free(tp->mdio_bus);
1574                 return -ENODEV;
1575         }
1576
1577         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1578         case PHY_ID_BCM57780:
1579                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1580                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1581                 break;
1582         case PHY_ID_BCM50610:
1583         case PHY_ID_BCM50610M:
1584                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1585                                      PHY_BRCM_RX_REFCLK_UNUSED |
1586                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1587                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1588                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1589                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1590                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1591                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1592                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1593                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1594                 /* fallthru */
1595         case PHY_ID_RTL8211C:
1596                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1597                 break;
1598         case PHY_ID_RTL8201E:
1599         case PHY_ID_BCMAC131:
1600                 phydev->interface = PHY_INTERFACE_MODE_MII;
1601                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1602                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1603                 break;
1604         }
1605
1606         tg3_flag_set(tp, MDIOBUS_INITED);
1607
1608         if (tg3_asic_rev(tp) == ASIC_REV_5785)
1609                 tg3_mdio_config_5785(tp);
1610
1611         return 0;
1612 }
1613
1614 static void tg3_mdio_fini(struct tg3 *tp)
1615 {
1616         if (tg3_flag(tp, MDIOBUS_INITED)) {
1617                 tg3_flag_clear(tp, MDIOBUS_INITED);
1618                 mdiobus_unregister(tp->mdio_bus);
1619                 mdiobus_free(tp->mdio_bus);
1620         }
1621 }
1622
1623 /* tp->lock is held. */
1624 static inline void tg3_generate_fw_event(struct tg3 *tp)
1625 {
1626         u32 val;
1627
1628         val = tr32(GRC_RX_CPU_EVENT);
1629         val |= GRC_RX_CPU_DRIVER_EVENT;
1630         tw32_f(GRC_RX_CPU_EVENT, val);
1631
1632         tp->last_event_jiffies = jiffies;
1633 }
1634
1635 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1636
1637 /* tp->lock is held. */
1638 static void tg3_wait_for_event_ack(struct tg3 *tp)
1639 {
1640         int i;
1641         unsigned int delay_cnt;
1642         long time_remain;
1643
1644         /* If enough time has passed, no wait is necessary. */
1645         time_remain = (long)(tp->last_event_jiffies + 1 +
1646                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1647                       (long)jiffies;
1648         if (time_remain < 0)
1649                 return;
1650
1651         /* Check if we can shorten the wait time. */
1652         delay_cnt = jiffies_to_usecs(time_remain);
1653         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1654                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1655         delay_cnt = (delay_cnt >> 3) + 1;
1656
1657         for (i = 0; i < delay_cnt; i++) {
1658                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1659                         break;
1660                 if (pci_channel_offline(tp->pdev))
1661                         break;
1662
1663                 udelay(8);
1664         }
1665 }
1666
1667 /* tp->lock is held. */
1668 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1669 {
1670         u32 reg, val;
1671
1672         val = 0;
1673         if (!tg3_readphy(tp, MII_BMCR, &reg))
1674                 val = reg << 16;
1675         if (!tg3_readphy(tp, MII_BMSR, &reg))
1676                 val |= (reg & 0xffff);
1677         *data++ = val;
1678
1679         val = 0;
1680         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1681                 val = reg << 16;
1682         if (!tg3_readphy(tp, MII_LPA, &reg))
1683                 val |= (reg & 0xffff);
1684         *data++ = val;
1685
1686         val = 0;
1687         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1688                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1689                         val = reg << 16;
1690                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1691                         val |= (reg & 0xffff);
1692         }
1693         *data++ = val;
1694
1695         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1696                 val = reg << 16;
1697         else
1698                 val = 0;
1699         *data++ = val;
1700 }
1701
1702 /* tp->lock is held. */
1703 static void tg3_ump_link_report(struct tg3 *tp)
1704 {
1705         u32 data[4];
1706
1707         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1708                 return;
1709
1710         tg3_phy_gather_ump_data(tp, data);
1711
1712         tg3_wait_for_event_ack(tp);
1713
1714         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1715         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1716         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1717         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1718         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1719         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1720
1721         tg3_generate_fw_event(tp);
1722 }
1723
1724 /* tp->lock is held. */
1725 static void tg3_stop_fw(struct tg3 *tp)
1726 {
1727         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1728                 /* Wait for RX cpu to ACK the previous event. */
1729                 tg3_wait_for_event_ack(tp);
1730
1731                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1732
1733                 tg3_generate_fw_event(tp);
1734
1735                 /* Wait for RX cpu to ACK this event. */
1736                 tg3_wait_for_event_ack(tp);
1737         }
1738 }
1739
1740 /* tp->lock is held. */
1741 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1742 {
1743         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1744                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1745
1746         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1747                 switch (kind) {
1748                 case RESET_KIND_INIT:
1749                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1750                                       DRV_STATE_START);
1751                         break;
1752
1753                 case RESET_KIND_SHUTDOWN:
1754                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1755                                       DRV_STATE_UNLOAD);
1756                         break;
1757
1758                 case RESET_KIND_SUSPEND:
1759                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1760                                       DRV_STATE_SUSPEND);
1761                         break;
1762
1763                 default:
1764                         break;
1765                 }
1766         }
1767 }
1768
1769 /* tp->lock is held. */
1770 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1771 {
1772         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1773                 switch (kind) {
1774                 case RESET_KIND_INIT:
1775                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1776                                       DRV_STATE_START_DONE);
1777                         break;
1778
1779                 case RESET_KIND_SHUTDOWN:
1780                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1781                                       DRV_STATE_UNLOAD_DONE);
1782                         break;
1783
1784                 default:
1785                         break;
1786                 }
1787         }
1788 }
1789
1790 /* tp->lock is held. */
1791 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1792 {
1793         if (tg3_flag(tp, ENABLE_ASF)) {
1794                 switch (kind) {
1795                 case RESET_KIND_INIT:
1796                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1797                                       DRV_STATE_START);
1798                         break;
1799
1800                 case RESET_KIND_SHUTDOWN:
1801                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1802                                       DRV_STATE_UNLOAD);
1803                         break;
1804
1805                 case RESET_KIND_SUSPEND:
1806                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1807                                       DRV_STATE_SUSPEND);
1808                         break;
1809
1810                 default:
1811                         break;
1812                 }
1813         }
1814 }
1815
1816 static int tg3_poll_fw(struct tg3 *tp)
1817 {
1818         int i;
1819         u32 val;
1820
1821         if (tg3_flag(tp, NO_FWARE_REPORTED))
1822                 return 0;
1823
1824         if (tg3_flag(tp, IS_SSB_CORE)) {
1825                 /* We don't use firmware. */
1826                 return 0;
1827         }
1828
1829         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1830                 /* Wait up to 20ms for init done. */
1831                 for (i = 0; i < 200; i++) {
1832                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1833                                 return 0;
1834                         if (pci_channel_offline(tp->pdev))
1835                                 return -ENODEV;
1836
1837                         udelay(100);
1838                 }
1839                 return -ENODEV;
1840         }
1841
1842         /* Wait for firmware initialization to complete. */
1843         for (i = 0; i < 100000; i++) {
1844                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1845                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1846                         break;
1847                 if (pci_channel_offline(tp->pdev)) {
1848                         if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1849                                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1850                                 netdev_info(tp->dev, "No firmware running\n");
1851                         }
1852
1853                         break;
1854                 }
1855
1856                 udelay(10);
1857         }
1858
1859         /* Chip might not be fitted with firmware.  Some Sun onboard
1860          * parts are configured like that.  So don't signal the timeout
1861          * of the above loop as an error, but do report the lack of
1862          * running firmware once.
1863          */
1864         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1865                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1866
1867                 netdev_info(tp->dev, "No firmware running\n");
1868         }
1869
1870         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1871                 /* The 57765 A0 needs a little more
1872                  * time to do some important work.
1873                  */
1874                 mdelay(10);
1875         }
1876
1877         return 0;
1878 }
1879
1880 static void tg3_link_report(struct tg3 *tp)
1881 {
1882         if (!netif_carrier_ok(tp->dev)) {
1883                 netif_info(tp, link, tp->dev, "Link is down\n");
1884                 tg3_ump_link_report(tp);
1885         } else if (netif_msg_link(tp)) {
1886                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1887                             (tp->link_config.active_speed == SPEED_1000 ?
1888                              1000 :
1889                              (tp->link_config.active_speed == SPEED_100 ?
1890                               100 : 10)),
1891                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1892                              "full" : "half"));
1893
1894                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1895                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1896                             "on" : "off",
1897                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1898                             "on" : "off");
1899
1900                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1901                         netdev_info(tp->dev, "EEE is %s\n",
1902                                     tp->setlpicnt ? "enabled" : "disabled");
1903
1904                 tg3_ump_link_report(tp);
1905         }
1906
1907         tp->link_up = netif_carrier_ok(tp->dev);
1908 }
1909
1910 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1911 {
1912         u32 flowctrl = 0;
1913
1914         if (adv & ADVERTISE_PAUSE_CAP) {
1915                 flowctrl |= FLOW_CTRL_RX;
1916                 if (!(adv & ADVERTISE_PAUSE_ASYM))
1917                         flowctrl |= FLOW_CTRL_TX;
1918         } else if (adv & ADVERTISE_PAUSE_ASYM)
1919                 flowctrl |= FLOW_CTRL_TX;
1920
1921         return flowctrl;
1922 }
1923
1924 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1925 {
1926         u16 miireg;
1927
1928         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1929                 miireg = ADVERTISE_1000XPAUSE;
1930         else if (flow_ctrl & FLOW_CTRL_TX)
1931                 miireg = ADVERTISE_1000XPSE_ASYM;
1932         else if (flow_ctrl & FLOW_CTRL_RX)
1933                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1934         else
1935                 miireg = 0;
1936
1937         return miireg;
1938 }
1939
1940 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1941 {
1942         u32 flowctrl = 0;
1943
1944         if (adv & ADVERTISE_1000XPAUSE) {
1945                 flowctrl |= FLOW_CTRL_RX;
1946                 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1947                         flowctrl |= FLOW_CTRL_TX;
1948         } else if (adv & ADVERTISE_1000XPSE_ASYM)
1949                 flowctrl |= FLOW_CTRL_TX;
1950
1951         return flowctrl;
1952 }
1953
1954 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1955 {
1956         u8 cap = 0;
1957
1958         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1959                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1960         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1961                 if (lcladv & ADVERTISE_1000XPAUSE)
1962                         cap = FLOW_CTRL_RX;
1963                 if (rmtadv & ADVERTISE_1000XPAUSE)
1964                         cap = FLOW_CTRL_TX;
1965         }
1966
1967         return cap;
1968 }
1969
1970 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1971 {
1972         u8 autoneg;
1973         u8 flowctrl = 0;
1974         u32 old_rx_mode = tp->rx_mode;
1975         u32 old_tx_mode = tp->tx_mode;
1976
1977         if (tg3_flag(tp, USE_PHYLIB))
1978                 autoneg = tp->mdio_bus->phy_map[tp->phy_addr]->autoneg;
1979         else
1980                 autoneg = tp->link_config.autoneg;
1981
1982         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1983                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1984                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1985                 else
1986                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1987         } else
1988                 flowctrl = tp->link_config.flowctrl;
1989
1990         tp->link_config.active_flowctrl = flowctrl;
1991
1992         if (flowctrl & FLOW_CTRL_RX)
1993                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1994         else
1995                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1996
1997         if (old_rx_mode != tp->rx_mode)
1998                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1999
2000         if (flowctrl & FLOW_CTRL_TX)
2001                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
2002         else
2003                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
2004
2005         if (old_tx_mode != tp->tx_mode)
2006                 tw32_f(MAC_TX_MODE, tp->tx_mode);
2007 }
2008
2009 static void tg3_adjust_link(struct net_device *dev)
2010 {
2011         u8 oldflowctrl, linkmesg = 0;
2012         u32 mac_mode, lcl_adv, rmt_adv;
2013         struct tg3 *tp = netdev_priv(dev);
2014         struct phy_device *phydev = tp->mdio_bus->phy_map[tp->phy_addr];
2015
2016         spin_lock_bh(&tp->lock);
2017
2018         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2019                                     MAC_MODE_HALF_DUPLEX);
2020
2021         oldflowctrl = tp->link_config.active_flowctrl;
2022
2023         if (phydev->link) {
2024                 lcl_adv = 0;
2025                 rmt_adv = 0;
2026
2027                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2028                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2029                 else if (phydev->speed == SPEED_1000 ||
2030                          tg3_asic_rev(tp) != ASIC_REV_5785)
2031                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
2032                 else
2033                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2034
2035                 if (phydev->duplex == DUPLEX_HALF)
2036                         mac_mode |= MAC_MODE_HALF_DUPLEX;
2037                 else {
2038                         lcl_adv = mii_advertise_flowctrl(
2039                                   tp->link_config.flowctrl);
2040
2041                         if (phydev->pause)
2042                                 rmt_adv = LPA_PAUSE_CAP;
2043                         if (phydev->asym_pause)
2044                                 rmt_adv |= LPA_PAUSE_ASYM;
2045                 }
2046
2047                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2048         } else
2049                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2050
2051         if (mac_mode != tp->mac_mode) {
2052                 tp->mac_mode = mac_mode;
2053                 tw32_f(MAC_MODE, tp->mac_mode);
2054                 udelay(40);
2055         }
2056
2057         if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2058                 if (phydev->speed == SPEED_10)
2059                         tw32(MAC_MI_STAT,
2060                              MAC_MI_STAT_10MBPS_MODE |
2061                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2062                 else
2063                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2064         }
2065
2066         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2067                 tw32(MAC_TX_LENGTHS,
2068                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2069                       (6 << TX_LENGTHS_IPG_SHIFT) |
2070                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2071         else
2072                 tw32(MAC_TX_LENGTHS,
2073                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2074                       (6 << TX_LENGTHS_IPG_SHIFT) |
2075                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2076
2077         if (phydev->link != tp->old_link ||
2078             phydev->speed != tp->link_config.active_speed ||
2079             phydev->duplex != tp->link_config.active_duplex ||
2080             oldflowctrl != tp->link_config.active_flowctrl)
2081                 linkmesg = 1;
2082
2083         tp->old_link = phydev->link;
2084         tp->link_config.active_speed = phydev->speed;
2085         tp->link_config.active_duplex = phydev->duplex;
2086
2087         spin_unlock_bh(&tp->lock);
2088
2089         if (linkmesg)
2090                 tg3_link_report(tp);
2091 }
2092
2093 static int tg3_phy_init(struct tg3 *tp)
2094 {
2095         struct phy_device *phydev;
2096
2097         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2098                 return 0;
2099
2100         /* Bring the PHY back to a known state. */
2101         tg3_bmcr_reset(tp);
2102
2103         phydev = tp->mdio_bus->phy_map[tp->phy_addr];
2104
2105         /* Attach the MAC to the PHY. */
2106         phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2107                              tg3_adjust_link, phydev->interface);
2108         if (IS_ERR(phydev)) {
2109                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2110                 return PTR_ERR(phydev);
2111         }
2112
2113         /* Mask with MAC supported features. */
2114         switch (phydev->interface) {
2115         case PHY_INTERFACE_MODE_GMII:
2116         case PHY_INTERFACE_MODE_RGMII:
2117                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2118                         phydev->supported &= (PHY_GBIT_FEATURES |
2119                                               SUPPORTED_Pause |
2120                                               SUPPORTED_Asym_Pause);
2121                         break;
2122                 }
2123                 /* fallthru */
2124         case PHY_INTERFACE_MODE_MII:
2125                 phydev->supported &= (PHY_BASIC_FEATURES |
2126                                       SUPPORTED_Pause |
2127                                       SUPPORTED_Asym_Pause);
2128                 break;
2129         default:
2130                 phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
2131                 return -EINVAL;
2132         }
2133
2134         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2135
2136         phydev->advertising = phydev->supported;
2137
2138         return 0;
2139 }
2140
2141 static void tg3_phy_start(struct tg3 *tp)
2142 {
2143         struct phy_device *phydev;
2144
2145         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2146                 return;
2147
2148         phydev = tp->mdio_bus->phy_map[tp->phy_addr];
2149
2150         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2151                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2152                 phydev->speed = tp->link_config.speed;
2153                 phydev->duplex = tp->link_config.duplex;
2154                 phydev->autoneg = tp->link_config.autoneg;
2155                 phydev->advertising = tp->link_config.advertising;
2156         }
2157
2158         phy_start(phydev);
2159
2160         phy_start_aneg(phydev);
2161 }
2162
2163 static void tg3_phy_stop(struct tg3 *tp)
2164 {
2165         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2166                 return;
2167
2168         phy_stop(tp->mdio_bus->phy_map[tp->phy_addr]);
2169 }
2170
2171 static void tg3_phy_fini(struct tg3 *tp)
2172 {
2173         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2174                 phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
2175                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2176         }
2177 }
2178
2179 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2180 {
2181         int err;
2182         u32 val;
2183
2184         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2185                 return 0;
2186
2187         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2188                 /* Cannot do read-modify-write on 5401 */
2189                 err = tg3_phy_auxctl_write(tp,
2190                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2191                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2192                                            0x4c20);
2193                 goto done;
2194         }
2195
2196         err = tg3_phy_auxctl_read(tp,
2197                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2198         if (err)
2199                 return err;
2200
2201         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2202         err = tg3_phy_auxctl_write(tp,
2203                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2204
2205 done:
2206         return err;
2207 }
2208
2209 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2210 {
2211         u32 phytest;
2212
2213         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2214                 u32 phy;
2215
2216                 tg3_writephy(tp, MII_TG3_FET_TEST,
2217                              phytest | MII_TG3_FET_SHADOW_EN);
2218                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2219                         if (enable)
2220                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2221                         else
2222                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2223                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2224                 }
2225                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2226         }
2227 }
2228
2229 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2230 {
2231         u32 reg;
2232
2233         if (!tg3_flag(tp, 5705_PLUS) ||
2234             (tg3_flag(tp, 5717_PLUS) &&
2235              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2236                 return;
2237
2238         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2239                 tg3_phy_fet_toggle_apd(tp, enable);
2240                 return;
2241         }
2242
2243         reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2244               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2245               MII_TG3_MISC_SHDW_SCR5_SDTL |
2246               MII_TG3_MISC_SHDW_SCR5_C125OE;
2247         if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2248                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2249
2250         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2251
2252
2253         reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2254         if (enable)
2255                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2256
2257         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2258 }
2259
2260 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2261 {
2262         u32 phy;
2263
2264         if (!tg3_flag(tp, 5705_PLUS) ||
2265             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2266                 return;
2267
2268         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2269                 u32 ephy;
2270
2271                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2272                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2273
2274                         tg3_writephy(tp, MII_TG3_FET_TEST,
2275                                      ephy | MII_TG3_FET_SHADOW_EN);
2276                         if (!tg3_readphy(tp, reg, &phy)) {
2277                                 if (enable)
2278                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2279                                 else
2280                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2281                                 tg3_writephy(tp, reg, phy);
2282                         }
2283                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2284                 }
2285         } else {
2286                 int ret;
2287
2288                 ret = tg3_phy_auxctl_read(tp,
2289                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2290                 if (!ret) {
2291                         if (enable)
2292                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2293                         else
2294                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2295                         tg3_phy_auxctl_write(tp,
2296                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2297                 }
2298         }
2299 }
2300
2301 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2302 {
2303         int ret;
2304         u32 val;
2305
2306         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2307                 return;
2308
2309         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2310         if (!ret)
2311                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2312                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2313 }
2314
2315 static void tg3_phy_apply_otp(struct tg3 *tp)
2316 {
2317         u32 otp, phy;
2318
2319         if (!tp->phy_otp)
2320                 return;
2321
2322         otp = tp->phy_otp;
2323
2324         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2325                 return;
2326
2327         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2328         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2329         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2330
2331         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2332               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2333         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2334
2335         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2336         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2337         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2338
2339         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2340         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2341
2342         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2343         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2344
2345         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2346               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2347         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2348
2349         tg3_phy_toggle_auxctl_smdsp(tp, false);
2350 }
2351
2352 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2353 {
2354         u32 val;
2355         struct ethtool_eee *dest = &tp->eee;
2356
2357         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2358                 return;
2359
2360         if (eee)
2361                 dest = eee;
2362
2363         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2364                 return;
2365
2366         /* Pull eee_active */
2367         if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2368             val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2369                 dest->eee_active = 1;
2370         } else
2371                 dest->eee_active = 0;
2372
2373         /* Pull lp advertised settings */
2374         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2375                 return;
2376         dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2377
2378         /* Pull advertised and eee_enabled settings */
2379         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2380                 return;
2381         dest->eee_enabled = !!val;
2382         dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2383
2384         /* Pull tx_lpi_enabled */
2385         val = tr32(TG3_CPMU_EEE_MODE);
2386         dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2387
2388         /* Pull lpi timer value */
2389         dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2390 }
2391
2392 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2393 {
2394         u32 val;
2395
2396         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2397                 return;
2398
2399         tp->setlpicnt = 0;
2400
2401         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2402             current_link_up &&
2403             tp->link_config.active_duplex == DUPLEX_FULL &&
2404             (tp->link_config.active_speed == SPEED_100 ||
2405              tp->link_config.active_speed == SPEED_1000)) {
2406                 u32 eeectl;
2407
2408                 if (tp->link_config.active_speed == SPEED_1000)
2409                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2410                 else
2411                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2412
2413                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2414
2415                 tg3_eee_pull_config(tp, NULL);
2416                 if (tp->eee.eee_active)
2417                         tp->setlpicnt = 2;
2418         }
2419
2420         if (!tp->setlpicnt) {
2421                 if (current_link_up &&
2422                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2423                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2424                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2425                 }
2426
2427                 val = tr32(TG3_CPMU_EEE_MODE);
2428                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2429         }
2430 }
2431
2432 static void tg3_phy_eee_enable(struct tg3 *tp)
2433 {
2434         u32 val;
2435
2436         if (tp->link_config.active_speed == SPEED_1000 &&
2437             (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2438              tg3_asic_rev(tp) == ASIC_REV_5719 ||
2439              tg3_flag(tp, 57765_CLASS)) &&
2440             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2441                 val = MII_TG3_DSP_TAP26_ALNOKO |
2442                       MII_TG3_DSP_TAP26_RMRXSTO;
2443                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2444                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2445         }
2446
2447         val = tr32(TG3_CPMU_EEE_MODE);
2448         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2449 }
2450
2451 static int tg3_wait_macro_done(struct tg3 *tp)
2452 {
2453         int limit = 100;
2454
2455         while (limit--) {
2456                 u32 tmp32;
2457
2458                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2459                         if ((tmp32 & 0x1000) == 0)
2460                                 break;
2461                 }
2462         }
2463         if (limit < 0)
2464                 return -EBUSY;
2465
2466         return 0;
2467 }
2468
2469 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2470 {
2471         static const u32 test_pat[4][6] = {
2472         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2473         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2474         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2475         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2476         };
2477         int chan;
2478
2479         for (chan = 0; chan < 4; chan++) {
2480                 int i;
2481
2482                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2483                              (chan * 0x2000) | 0x0200);
2484                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2485
2486                 for (i = 0; i < 6; i++)
2487                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2488                                      test_pat[chan][i]);
2489
2490                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2491                 if (tg3_wait_macro_done(tp)) {
2492                         *resetp = 1;
2493                         return -EBUSY;
2494                 }
2495
2496                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2497                              (chan * 0x2000) | 0x0200);
2498                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2499                 if (tg3_wait_macro_done(tp)) {
2500                         *resetp = 1;
2501                         return -EBUSY;
2502                 }
2503
2504                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2505                 if (tg3_wait_macro_done(tp)) {
2506                         *resetp = 1;
2507                         return -EBUSY;
2508                 }
2509
2510                 for (i = 0; i < 6; i += 2) {
2511                         u32 low, high;
2512
2513                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2514                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2515                             tg3_wait_macro_done(tp)) {
2516                                 *resetp = 1;
2517                                 return -EBUSY;
2518                         }
2519                         low &= 0x7fff;
2520                         high &= 0x000f;
2521                         if (low != test_pat[chan][i] ||
2522                             high != test_pat[chan][i+1]) {
2523                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2524                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2525                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2526
2527                                 return -EBUSY;
2528                         }
2529                 }
2530         }
2531
2532         return 0;
2533 }
2534
2535 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2536 {
2537         int chan;
2538
2539         for (chan = 0; chan < 4; chan++) {
2540                 int i;
2541
2542                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2543                              (chan * 0x2000) | 0x0200);
2544                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2545                 for (i = 0; i < 6; i++)
2546                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2547                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2548                 if (tg3_wait_macro_done(tp))
2549                         return -EBUSY;
2550         }
2551
2552         return 0;
2553 }
2554
2555 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2556 {
2557         u32 reg32, phy9_orig;
2558         int retries, do_phy_reset, err;
2559
2560         retries = 10;
2561         do_phy_reset = 1;
2562         do {
2563                 if (do_phy_reset) {
2564                         err = tg3_bmcr_reset(tp);
2565                         if (err)
2566                                 return err;
2567                         do_phy_reset = 0;
2568                 }
2569
2570                 /* Disable transmitter and interrupt.  */
2571                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2572                         continue;
2573
2574                 reg32 |= 0x3000;
2575                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2576
2577                 /* Set full-duplex, 1000 mbps.  */
2578                 tg3_writephy(tp, MII_BMCR,
2579                              BMCR_FULLDPLX | BMCR_SPEED1000);
2580
2581                 /* Set to master mode.  */
2582                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2583                         continue;
2584
2585                 tg3_writephy(tp, MII_CTRL1000,
2586                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2587
2588                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2589                 if (err)
2590                         return err;
2591
2592                 /* Block the PHY control access.  */
2593                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2594
2595                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2596                 if (!err)
2597                         break;
2598         } while (--retries);
2599
2600         err = tg3_phy_reset_chanpat(tp);
2601         if (err)
2602                 return err;
2603
2604         tg3_phydsp_write(tp, 0x8005, 0x0000);
2605
2606         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2607         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2608
2609         tg3_phy_toggle_auxctl_smdsp(tp, false);
2610
2611         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2612
2613         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2614                 reg32 &= ~0x3000;
2615                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2616         } else if (!err)
2617                 err = -EBUSY;
2618
2619         return err;
2620 }
2621
2622 static void tg3_carrier_off(struct tg3 *tp)
2623 {
2624         netif_carrier_off(tp->dev);
2625         tp->link_up = false;
2626 }
2627
2628 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2629 {
2630         if (tg3_flag(tp, ENABLE_ASF))
2631                 netdev_warn(tp->dev,
2632                             "Management side-band traffic will be interrupted during phy settings change\n");
2633 }
2634
2635 /* This will reset the tigon3 PHY if there is no valid
2636  * link unless the FORCE argument is non-zero.
2637  */
2638 static int tg3_phy_reset(struct tg3 *tp)
2639 {
2640         u32 val, cpmuctrl;
2641         int err;
2642
2643         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2644                 val = tr32(GRC_MISC_CFG);
2645                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2646                 udelay(40);
2647         }
2648         err  = tg3_readphy(tp, MII_BMSR, &val);
2649         err |= tg3_readphy(tp, MII_BMSR, &val);
2650         if (err != 0)
2651                 return -EBUSY;
2652
2653         if (netif_running(tp->dev) && tp->link_up) {
2654                 netif_carrier_off(tp->dev);
2655                 tg3_link_report(tp);
2656         }
2657
2658         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2659             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2660             tg3_asic_rev(tp) == ASIC_REV_5705) {
2661                 err = tg3_phy_reset_5703_4_5(tp);
2662                 if (err)
2663                         return err;
2664                 goto out;
2665         }
2666
2667         cpmuctrl = 0;
2668         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2669             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2670                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2671                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2672                         tw32(TG3_CPMU_CTRL,
2673                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2674         }
2675
2676         err = tg3_bmcr_reset(tp);
2677         if (err)
2678                 return err;
2679
2680         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2681                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2682                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2683
2684                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2685         }
2686
2687         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2688             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2689                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2690                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2691                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2692                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2693                         udelay(40);
2694                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2695                 }
2696         }
2697
2698         if (tg3_flag(tp, 5717_PLUS) &&
2699             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2700                 return 0;
2701
2702         tg3_phy_apply_otp(tp);
2703
2704         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2705                 tg3_phy_toggle_apd(tp, true);
2706         else
2707                 tg3_phy_toggle_apd(tp, false);
2708
2709 out:
2710         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2711             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2712                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2713                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2714                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2715         }
2716
2717         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2718                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2719                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2720         }
2721
2722         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2723                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2724                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2725                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2726                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2727                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2728                 }
2729         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2730                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2731                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2732                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2733                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2734                                 tg3_writephy(tp, MII_TG3_TEST1,
2735                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2736                         } else
2737                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2738
2739                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2740                 }
2741         }
2742
2743         /* Set Extended packet length bit (bit 14) on all chips that */
2744         /* support jumbo frames */
2745         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2746                 /* Cannot do read-modify-write on 5401 */
2747                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2748         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2749                 /* Set bit 14 with read-modify-write to preserve other bits */
2750                 err = tg3_phy_auxctl_read(tp,
2751                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2752                 if (!err)
2753                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2754                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2755         }
2756
2757         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2758          * jumbo frames transmission.
2759          */
2760         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2761                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2762                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2763                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2764         }
2765
2766         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2767                 /* adjust output voltage */
2768                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2769         }
2770
2771         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2772                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2773
2774         tg3_phy_toggle_automdix(tp, true);
2775         tg3_phy_set_wirespeed(tp);
2776         return 0;
2777 }
2778
2779 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2780 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2781 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2782                                           TG3_GPIO_MSG_NEED_VAUX)
2783 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2784         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2785          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2786          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2787          (TG3_GPIO_MSG_DRVR_PRES << 12))
2788
2789 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2790         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2791          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2792          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2793          (TG3_GPIO_MSG_NEED_VAUX << 12))
2794
2795 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2796 {
2797         u32 status, shift;
2798
2799         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2800             tg3_asic_rev(tp) == ASIC_REV_5719)
2801                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2802         else
2803                 status = tr32(TG3_CPMU_DRV_STATUS);
2804
2805         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2806         status &= ~(TG3_GPIO_MSG_MASK << shift);
2807         status |= (newstat << shift);
2808
2809         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2810             tg3_asic_rev(tp) == ASIC_REV_5719)
2811                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2812         else
2813                 tw32(TG3_CPMU_DRV_STATUS, status);
2814
2815         return status >> TG3_APE_GPIO_MSG_SHIFT;
2816 }
2817
2818 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2819 {
2820         if (!tg3_flag(tp, IS_NIC))
2821                 return 0;
2822
2823         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2824             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2825             tg3_asic_rev(tp) == ASIC_REV_5720) {
2826                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2827                         return -EIO;
2828
2829                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2830
2831                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2832                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2833
2834                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2835         } else {
2836                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2837                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2838         }
2839
2840         return 0;
2841 }
2842
2843 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2844 {
2845         u32 grc_local_ctrl;
2846
2847         if (!tg3_flag(tp, IS_NIC) ||
2848             tg3_asic_rev(tp) == ASIC_REV_5700 ||
2849             tg3_asic_rev(tp) == ASIC_REV_5701)
2850                 return;
2851
2852         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2853
2854         tw32_wait_f(GRC_LOCAL_CTRL,
2855                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2856                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2857
2858         tw32_wait_f(GRC_LOCAL_CTRL,
2859                     grc_local_ctrl,
2860                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2861
2862         tw32_wait_f(GRC_LOCAL_CTRL,
2863                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2864                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2865 }
2866
2867 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2868 {
2869         if (!tg3_flag(tp, IS_NIC))
2870                 return;
2871
2872         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2873             tg3_asic_rev(tp) == ASIC_REV_5701) {
2874                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2875                             (GRC_LCLCTRL_GPIO_OE0 |
2876                              GRC_LCLCTRL_GPIO_OE1 |
2877                              GRC_LCLCTRL_GPIO_OE2 |
2878                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2879                              GRC_LCLCTRL_GPIO_OUTPUT1),
2880                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2881         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2882                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2883                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2884                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2885                                      GRC_LCLCTRL_GPIO_OE1 |
2886                                      GRC_LCLCTRL_GPIO_OE2 |
2887                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2888                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2889                                      tp->grc_local_ctrl;
2890                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2891                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2892
2893                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2894                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2895                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2896
2897                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2898                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2899                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2900         } else {
2901                 u32 no_gpio2;
2902                 u32 grc_local_ctrl = 0;
2903
2904                 /* Workaround to prevent overdrawing Amps. */
2905                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2906                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2907                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2908                                     grc_local_ctrl,
2909                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2910                 }
2911
2912                 /* On 5753 and variants, GPIO2 cannot be used. */
2913                 no_gpio2 = tp->nic_sram_data_cfg &
2914                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2915
2916                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2917                                   GRC_LCLCTRL_GPIO_OE1 |
2918                                   GRC_LCLCTRL_GPIO_OE2 |
2919                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2920                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2921                 if (no_gpio2) {
2922                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2923                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2924                 }
2925                 tw32_wait_f(GRC_LOCAL_CTRL,
2926                             tp->grc_local_ctrl | grc_local_ctrl,
2927                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2928
2929                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2930
2931                 tw32_wait_f(GRC_LOCAL_CTRL,
2932                             tp->grc_local_ctrl | grc_local_ctrl,
2933                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2934
2935                 if (!no_gpio2) {
2936                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2937                         tw32_wait_f(GRC_LOCAL_CTRL,
2938                                     tp->grc_local_ctrl | grc_local_ctrl,
2939                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2940                 }
2941         }
2942 }
2943
2944 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2945 {
2946         u32 msg = 0;
2947
2948         /* Serialize power state transitions */
2949         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2950                 return;
2951
2952         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2953                 msg = TG3_GPIO_MSG_NEED_VAUX;
2954
2955         msg = tg3_set_function_status(tp, msg);
2956
2957         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2958                 goto done;
2959
2960         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2961                 tg3_pwrsrc_switch_to_vaux(tp);
2962         else
2963                 tg3_pwrsrc_die_with_vmain(tp);
2964
2965 done:
2966         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2967 }
2968
2969 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2970 {
2971         bool need_vaux = false;
2972
2973         /* The GPIOs do something completely different on 57765. */
2974         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2975                 return;
2976
2977         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2978             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2979             tg3_asic_rev(tp) == ASIC_REV_5720) {
2980                 tg3_frob_aux_power_5717(tp, include_wol ?
2981                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2982                 return;
2983         }
2984
2985         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2986                 struct net_device *dev_peer;
2987
2988                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2989
2990                 /* remove_one() may have been run on the peer. */
2991                 if (dev_peer) {
2992                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2993
2994                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2995                                 return;
2996
2997                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2998                             tg3_flag(tp_peer, ENABLE_ASF))
2999                                 need_vaux = true;
3000                 }
3001         }
3002
3003         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
3004             tg3_flag(tp, ENABLE_ASF))
3005                 need_vaux = true;
3006
3007         if (need_vaux)
3008                 tg3_pwrsrc_switch_to_vaux(tp);
3009         else
3010                 tg3_pwrsrc_die_with_vmain(tp);
3011 }
3012
3013 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3014 {
3015         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3016                 return 1;
3017         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3018                 if (speed != SPEED_10)
3019                         return 1;
3020         } else if (speed == SPEED_10)
3021                 return 1;
3022
3023         return 0;
3024 }
3025
3026 static bool tg3_phy_power_bug(struct tg3 *tp)
3027 {
3028         switch (tg3_asic_rev(tp)) {
3029         case ASIC_REV_5700:
3030         case ASIC_REV_5704:
3031                 return true;
3032         case ASIC_REV_5780:
3033                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3034                         return true;
3035                 return false;
3036         case ASIC_REV_5717:
3037                 if (!tp->pci_fn)
3038                         return true;
3039                 return false;
3040         case ASIC_REV_5719:
3041         case ASIC_REV_5720:
3042                 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3043                     !tp->pci_fn)
3044                         return true;
3045                 return false;
3046         }
3047
3048         return false;
3049 }
3050
3051 static bool tg3_phy_led_bug(struct tg3 *tp)
3052 {
3053         switch (tg3_asic_rev(tp)) {
3054         case ASIC_REV_5719:
3055         case ASIC_REV_5720:
3056                 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3057                     !tp->pci_fn)
3058                         return true;
3059                 return false;
3060         }
3061
3062         return false;
3063 }
3064
3065 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3066 {
3067         u32 val;
3068
3069         if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3070                 return;
3071
3072         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3073                 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3074                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3075                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3076
3077                         sg_dig_ctrl |=
3078                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3079                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
3080                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3081                 }
3082                 return;
3083         }
3084
3085         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3086                 tg3_bmcr_reset(tp);
3087                 val = tr32(GRC_MISC_CFG);
3088                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3089                 udelay(40);
3090                 return;
3091         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3092                 u32 phytest;
3093                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3094                         u32 phy;
3095
3096                         tg3_writephy(tp, MII_ADVERTISE, 0);
3097                         tg3_writephy(tp, MII_BMCR,
3098                                      BMCR_ANENABLE | BMCR_ANRESTART);
3099
3100                         tg3_writephy(tp, MII_TG3_FET_TEST,
3101                                      phytest | MII_TG3_FET_SHADOW_EN);
3102                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3103                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3104                                 tg3_writephy(tp,
3105                                              MII_TG3_FET_SHDW_AUXMODE4,
3106                                              phy);
3107                         }
3108                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3109                 }
3110                 return;
3111         } else if (do_low_power) {
3112                 if (!tg3_phy_led_bug(tp))
3113                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3114                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3115
3116                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3117                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3118                       MII_TG3_AUXCTL_PCTL_VREG_11V;
3119                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3120         }
3121
3122         /* The PHY should not be powered down on some chips because
3123          * of bugs.
3124          */
3125         if (tg3_phy_power_bug(tp))
3126                 return;
3127
3128         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3129             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3130                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3131                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3132                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3133                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3134         }
3135
3136         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3137 }
3138
3139 /* tp->lock is held. */
3140 static int tg3_nvram_lock(struct tg3 *tp)
3141 {
3142         if (tg3_flag(tp, NVRAM)) {
3143                 int i;
3144
3145                 if (tp->nvram_lock_cnt == 0) {
3146                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3147                         for (i = 0; i < 8000; i++) {
3148                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3149                                         break;
3150                                 udelay(20);
3151                         }
3152                         if (i == 8000) {
3153                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3154                                 return -ENODEV;
3155                         }
3156                 }
3157                 tp->nvram_lock_cnt++;
3158         }
3159         return 0;
3160 }
3161
3162 /* tp->lock is held. */
3163 static void tg3_nvram_unlock(struct tg3 *tp)
3164 {
3165         if (tg3_flag(tp, NVRAM)) {
3166                 if (tp->nvram_lock_cnt > 0)
3167                         tp->nvram_lock_cnt--;
3168                 if (tp->nvram_lock_cnt == 0)
3169                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3170         }
3171 }
3172
3173 /* tp->lock is held. */
3174 static void tg3_enable_nvram_access(struct tg3 *tp)
3175 {
3176         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3177                 u32 nvaccess = tr32(NVRAM_ACCESS);
3178
3179                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3180         }
3181 }
3182
3183 /* tp->lock is held. */
3184 static void tg3_disable_nvram_access(struct tg3 *tp)
3185 {
3186         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3187                 u32 nvaccess = tr32(NVRAM_ACCESS);
3188
3189                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3190         }
3191 }
3192
3193 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3194                                         u32 offset, u32 *val)
3195 {
3196         u32 tmp;
3197         int i;
3198
3199         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3200                 return -EINVAL;
3201
3202         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3203                                         EEPROM_ADDR_DEVID_MASK |
3204                                         EEPROM_ADDR_READ);
3205         tw32(GRC_EEPROM_ADDR,
3206              tmp |
3207              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3208              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3209               EEPROM_ADDR_ADDR_MASK) |
3210              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3211
3212         for (i = 0; i < 1000; i++) {
3213                 tmp = tr32(GRC_EEPROM_ADDR);
3214
3215                 if (tmp & EEPROM_ADDR_COMPLETE)
3216                         break;
3217                 msleep(1);
3218         }
3219         if (!(tmp & EEPROM_ADDR_COMPLETE))
3220                 return -EBUSY;
3221
3222         tmp = tr32(GRC_EEPROM_DATA);
3223
3224         /*
3225          * The data will always be opposite the native endian
3226          * format.  Perform a blind byteswap to compensate.
3227          */
3228         *val = swab32(tmp);
3229
3230         return 0;
3231 }
3232
3233 #define NVRAM_CMD_TIMEOUT 10000
3234
3235 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3236 {
3237         int i;
3238
3239         tw32(NVRAM_CMD, nvram_cmd);
3240         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3241                 udelay(10);
3242                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3243                         udelay(10);
3244                         break;
3245                 }
3246         }
3247
3248         if (i == NVRAM_CMD_TIMEOUT)
3249                 return -EBUSY;
3250
3251         return 0;
3252 }
3253
3254 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3255 {
3256         if (tg3_flag(tp, NVRAM) &&
3257             tg3_flag(tp, NVRAM_BUFFERED) &&
3258             tg3_flag(tp, FLASH) &&
3259             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3260             (tp->nvram_jedecnum == JEDEC_ATMEL))
3261
3262                 addr = ((addr / tp->nvram_pagesize) <<
3263                         ATMEL_AT45DB0X1B_PAGE_POS) +
3264                        (addr % tp->nvram_pagesize);
3265
3266         return addr;
3267 }
3268
3269 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3270 {
3271         if (tg3_flag(tp, NVRAM) &&
3272             tg3_flag(tp, NVRAM_BUFFERED) &&
3273             tg3_flag(tp, FLASH) &&
3274             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3275             (tp->nvram_jedecnum == JEDEC_ATMEL))
3276
3277                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3278                         tp->nvram_pagesize) +
3279                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3280
3281         return addr;
3282 }
3283
3284 /* NOTE: Data read in from NVRAM is byteswapped according to
3285  * the byteswapping settings for all other register accesses.
3286  * tg3 devices are BE devices, so on a BE machine, the data
3287  * returned will be exactly as it is seen in NVRAM.  On a LE
3288  * machine, the 32-bit value will be byteswapped.
3289  */
3290 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3291 {
3292         int ret;
3293
3294         if (!tg3_flag(tp, NVRAM))
3295                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3296
3297         offset = tg3_nvram_phys_addr(tp, offset);
3298
3299         if (offset > NVRAM_ADDR_MSK)
3300                 return -EINVAL;
3301
3302         ret = tg3_nvram_lock(tp);
3303         if (ret)
3304                 return ret;
3305
3306         tg3_enable_nvram_access(tp);
3307
3308         tw32(NVRAM_ADDR, offset);
3309         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3310                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3311
3312         if (ret == 0)
3313                 *val = tr32(NVRAM_RDDATA);
3314
3315         tg3_disable_nvram_access(tp);
3316
3317         tg3_nvram_unlock(tp);
3318
3319         return ret;
3320 }
3321
3322 /* Ensures NVRAM data is in bytestream format. */
3323 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3324 {
3325         u32 v;
3326         int res = tg3_nvram_read(tp, offset, &v);
3327         if (!res)
3328                 *val = cpu_to_be32(v);
3329         return res;
3330 }
3331
3332 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3333                                     u32 offset, u32 len, u8 *buf)
3334 {
3335         int i, j, rc = 0;
3336         u32 val;
3337
3338         for (i = 0; i < len; i += 4) {
3339                 u32 addr;
3340                 __be32 data;
3341
3342                 addr = offset + i;
3343
3344                 memcpy(&data, buf + i, 4);
3345
3346                 /*
3347                  * The SEEPROM interface expects the data to always be opposite
3348                  * the native endian format.  We accomplish this by reversing
3349                  * all the operations that would have been performed on the
3350                  * data from a call to tg3_nvram_read_be32().
3351                  */
3352                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3353
3354                 val = tr32(GRC_EEPROM_ADDR);
3355                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3356
3357                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3358                         EEPROM_ADDR_READ);
3359                 tw32(GRC_EEPROM_ADDR, val |
3360                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3361                         (addr & EEPROM_ADDR_ADDR_MASK) |
3362                         EEPROM_ADDR_START |
3363                         EEPROM_ADDR_WRITE);
3364
3365                 for (j = 0; j < 1000; j++) {
3366                         val = tr32(GRC_EEPROM_ADDR);
3367
3368                         if (val & EEPROM_ADDR_COMPLETE)
3369                                 break;
3370                         msleep(1);
3371                 }
3372                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3373                         rc = -EBUSY;
3374                         break;
3375                 }
3376         }
3377
3378         return rc;
3379 }
3380
3381 /* offset and length are dword aligned */
3382 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3383                 u8 *buf)
3384 {
3385         int ret = 0;
3386         u32 pagesize = tp->nvram_pagesize;
3387         u32 pagemask = pagesize - 1;
3388         u32 nvram_cmd;
3389         u8 *tmp;
3390
3391         tmp = kmalloc(pagesize, GFP_KERNEL);
3392         if (tmp == NULL)
3393                 return -ENOMEM;
3394
3395         while (len) {
3396                 int j;
3397                 u32 phy_addr, page_off, size;
3398
3399                 phy_addr = offset & ~pagemask;
3400
3401                 for (j = 0; j < pagesize; j += 4) {
3402                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3403                                                   (__be32 *) (tmp + j));
3404                         if (ret)
3405                                 break;
3406                 }
3407                 if (ret)
3408                         break;
3409
3410                 page_off = offset & pagemask;
3411                 size = pagesize;
3412                 if (len < size)
3413                         size = len;
3414
3415                 len -= size;
3416
3417                 memcpy(tmp + page_off, buf, size);
3418
3419                 offset = offset + (pagesize - page_off);
3420
3421                 tg3_enable_nvram_access(tp);
3422
3423                 /*
3424                  * Before we can erase the flash page, we need
3425                  * to issue a special "write enable" command.
3426                  */
3427                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3428
3429                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3430                         break;
3431
3432                 /* Erase the target page */
3433                 tw32(NVRAM_ADDR, phy_addr);
3434
3435                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3436                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3437
3438                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3439                         break;
3440
3441                 /* Issue another write enable to start the write. */
3442                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3443
3444                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3445                         break;
3446
3447                 for (j = 0; j < pagesize; j += 4) {
3448                         __be32 data;
3449
3450                         data = *((__be32 *) (tmp + j));
3451
3452                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3453
3454                         tw32(NVRAM_ADDR, phy_addr + j);
3455
3456                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3457                                 NVRAM_CMD_WR;
3458
3459                         if (j == 0)
3460                                 nvram_cmd |= NVRAM_CMD_FIRST;
3461                         else if (j == (pagesize - 4))
3462                                 nvram_cmd |= NVRAM_CMD_LAST;
3463
3464                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3465                         if (ret)
3466                                 break;
3467                 }
3468                 if (ret)
3469                         break;
3470         }
3471
3472         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3473         tg3_nvram_exec_cmd(tp, nvram_cmd);
3474
3475         kfree(tmp);
3476
3477         return ret;
3478 }
3479
3480 /* offset and length are dword aligned */
3481 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3482                 u8 *buf)
3483 {
3484         int i, ret = 0;
3485
3486         for (i = 0; i < len; i += 4, offset += 4) {
3487                 u32 page_off, phy_addr, nvram_cmd;
3488                 __be32 data;
3489
3490                 memcpy(&data, buf + i, 4);
3491                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3492
3493                 page_off = offset % tp->nvram_pagesize;
3494
3495                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3496
3497                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3498
3499                 if (page_off == 0 || i == 0)
3500                         nvram_cmd |= NVRAM_CMD_FIRST;
3501                 if (page_off == (tp->nvram_pagesize - 4))
3502                         nvram_cmd |= NVRAM_CMD_LAST;
3503
3504                 if (i == (len - 4))
3505                         nvram_cmd |= NVRAM_CMD_LAST;
3506
3507                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3508                     !tg3_flag(tp, FLASH) ||
3509                     !tg3_flag(tp, 57765_PLUS))
3510                         tw32(NVRAM_ADDR, phy_addr);
3511
3512                 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3513                     !tg3_flag(tp, 5755_PLUS) &&
3514                     (tp->nvram_jedecnum == JEDEC_ST) &&
3515                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3516                         u32 cmd;
3517
3518                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3519                         ret = tg3_nvram_exec_cmd(tp, cmd);
3520                         if (ret)
3521                                 break;
3522                 }
3523                 if (!tg3_flag(tp, FLASH)) {
3524                         /* We always do complete word writes to eeprom. */
3525                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3526                 }
3527
3528                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3529                 if (ret)
3530                         break;
3531         }
3532         return ret;
3533 }
3534
3535 /* offset and length are dword aligned */
3536 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3537 {
3538         int ret;
3539
3540         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3541                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3542                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3543                 udelay(40);
3544         }
3545
3546         if (!tg3_flag(tp, NVRAM)) {
3547                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3548         } else {
3549                 u32 grc_mode;
3550
3551                 ret = tg3_nvram_lock(tp);
3552                 if (ret)
3553                         return ret;
3554
3555                 tg3_enable_nvram_access(tp);
3556                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3557                         tw32(NVRAM_WRITE1, 0x406);
3558
3559                 grc_mode = tr32(GRC_MODE);
3560                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3561
3562                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3563                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3564                                 buf);
3565                 } else {
3566                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3567                                 buf);
3568                 }
3569
3570                 grc_mode = tr32(GRC_MODE);
3571                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3572
3573                 tg3_disable_nvram_access(tp);
3574                 tg3_nvram_unlock(tp);
3575         }
3576
3577         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3578                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3579                 udelay(40);
3580         }
3581
3582         return ret;
3583 }
3584
3585 #define RX_CPU_SCRATCH_BASE     0x30000
3586 #define RX_CPU_SCRATCH_SIZE     0x04000
3587 #define TX_CPU_SCRATCH_BASE     0x34000
3588 #define TX_CPU_SCRATCH_SIZE     0x04000
3589
3590 /* tp->lock is held. */
3591 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3592 {
3593         int i;
3594         const int iters = 10000;
3595
3596         for (i = 0; i < iters; i++) {
3597                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3598                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3599                 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3600                         break;
3601                 if (pci_channel_offline(tp->pdev))
3602                         return -EBUSY;
3603         }
3604
3605         return (i == iters) ? -EBUSY : 0;
3606 }
3607
3608 /* tp->lock is held. */
3609 static int tg3_rxcpu_pause(struct tg3 *tp)
3610 {
3611         int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3612
3613         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3614         tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3615         udelay(10);
3616
3617         return rc;
3618 }
3619
3620 /* tp->lock is held. */
3621 static int tg3_txcpu_pause(struct tg3 *tp)
3622 {
3623         return tg3_pause_cpu(tp, TX_CPU_BASE);
3624 }
3625
3626 /* tp->lock is held. */
3627 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3628 {
3629         tw32(cpu_base + CPU_STATE, 0xffffffff);
3630         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3631 }
3632
3633 /* tp->lock is held. */
3634 static void tg3_rxcpu_resume(struct tg3 *tp)
3635 {
3636         tg3_resume_cpu(tp, RX_CPU_BASE);
3637 }
3638
3639 /* tp->lock is held. */
3640 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3641 {
3642         int rc;
3643
3644         BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3645
3646         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3647                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3648
3649                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3650                 return 0;
3651         }
3652         if (cpu_base == RX_CPU_BASE) {
3653                 rc = tg3_rxcpu_pause(tp);
3654         } else {
3655                 /*
3656                  * There is only an Rx CPU for the 5750 derivative in the
3657                  * BCM4785.
3658                  */
3659                 if (tg3_flag(tp, IS_SSB_CORE))
3660                         return 0;
3661
3662                 rc = tg3_txcpu_pause(tp);
3663         }
3664
3665         if (rc) {
3666                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3667                            __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3668                 return -ENODEV;
3669         }
3670
3671         /* Clear firmware's nvram arbitration. */
3672         if (tg3_flag(tp, NVRAM))
3673                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3674         return 0;
3675 }
3676
3677 static int tg3_fw_data_len(struct tg3 *tp,
3678                            const struct tg3_firmware_hdr *fw_hdr)
3679 {
3680         int fw_len;
3681
3682         /* Non fragmented firmware have one firmware header followed by a
3683          * contiguous chunk of data to be written. The length field in that
3684          * header is not the length of data to be written but the complete
3685          * length of the bss. The data length is determined based on
3686          * tp->fw->size minus headers.
3687          *
3688          * Fragmented firmware have a main header followed by multiple
3689          * fragments. Each fragment is identical to non fragmented firmware
3690          * with a firmware header followed by a contiguous chunk of data. In
3691          * the main header, the length field is unused and set to 0xffffffff.
3692          * In each fragment header the length is the entire size of that
3693          * fragment i.e. fragment data + header length. Data length is
3694          * therefore length field in the header minus TG3_FW_HDR_LEN.
3695          */
3696         if (tp->fw_len == 0xffffffff)
3697                 fw_len = be32_to_cpu(fw_hdr->len);
3698         else
3699                 fw_len = tp->fw->size;
3700
3701         return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3702 }
3703
3704 /* tp->lock is held. */
3705 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3706                                  u32 cpu_scratch_base, int cpu_scratch_size,
3707                                  const struct tg3_firmware_hdr *fw_hdr)
3708 {
3709         int err, i;
3710         void (*write_op)(struct tg3 *, u32, u32);
3711         int total_len = tp->fw->size;
3712
3713         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3714                 netdev_err(tp->dev,
3715                            "%s: Trying to load TX cpu firmware which is 5705\n",
3716                            __func__);
3717                 return -EINVAL;
3718         }
3719
3720         if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3721                 write_op = tg3_write_mem;
3722         else
3723                 write_op = tg3_write_indirect_reg32;
3724
3725         if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3726                 /* It is possible that bootcode is still loading at this point.
3727                  * Get the nvram lock first before halting the cpu.
3728                  */
3729                 int lock_err = tg3_nvram_lock(tp);
3730                 err = tg3_halt_cpu(tp, cpu_base);
3731                 if (!lock_err)
3732                         tg3_nvram_unlock(tp);
3733                 if (err)
3734                         goto out;
3735
3736                 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3737                         write_op(tp, cpu_scratch_base + i, 0);
3738                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3739                 tw32(cpu_base + CPU_MODE,
3740                      tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3741         } else {
3742                 /* Subtract additional main header for fragmented firmware and
3743                  * advance to the first fragment
3744                  */
3745                 total_len -= TG3_FW_HDR_LEN;
3746                 fw_hdr++;
3747         }
3748
3749         do {
3750                 u32 *fw_data = (u32 *)(fw_hdr + 1);
3751                 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3752                         write_op(tp, cpu_scratch_base +
3753                                      (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3754                                      (i * sizeof(u32)),
3755                                  be32_to_cpu(fw_data[i]));
3756
3757                 total_len -= be32_to_cpu(fw_hdr->len);
3758
3759                 /* Advance to next fragment */
3760                 fw_hdr = (struct tg3_firmware_hdr *)
3761                          ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3762         } while (total_len > 0);
3763
3764         err = 0;
3765
3766 out:
3767         return err;
3768 }
3769
3770 /* tp->lock is held. */
3771 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3772 {
3773         int i;
3774         const int iters = 5;
3775
3776         tw32(cpu_base + CPU_STATE, 0xffffffff);
3777         tw32_f(cpu_base + CPU_PC, pc);
3778
3779         for (i = 0; i < iters; i++) {
3780                 if (tr32(cpu_base + CPU_PC) == pc)
3781                         break;
3782                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3783                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3784                 tw32_f(cpu_base + CPU_PC, pc);
3785                 udelay(1000);
3786         }
3787
3788         return (i == iters) ? -EBUSY : 0;
3789 }
3790
3791 /* tp->lock is held. */
3792 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3793 {
3794         const struct tg3_firmware_hdr *fw_hdr;
3795         int err;
3796
3797         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3798
3799         /* Firmware blob starts with version numbers, followed by
3800            start address and length. We are setting complete length.
3801            length = end_address_of_bss - start_address_of_text.
3802            Remainder is the blob to be loaded contiguously
3803            from start address. */
3804
3805         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3806                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3807                                     fw_hdr);
3808         if (err)
3809                 return err;
3810
3811         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3812                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3813                                     fw_hdr);
3814         if (err)
3815                 return err;
3816
3817         /* Now startup only the RX cpu. */
3818         err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3819                                        be32_to_cpu(fw_hdr->base_addr));
3820         if (err) {
3821                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3822                            "should be %08x\n", __func__,
3823                            tr32(RX_CPU_BASE + CPU_PC),
3824                                 be32_to_cpu(fw_hdr->base_addr));
3825                 return -ENODEV;
3826         }
3827
3828         tg3_rxcpu_resume(tp);
3829
3830         return 0;
3831 }
3832
3833 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3834 {
3835         const int iters = 1000;
3836         int i;
3837         u32 val;
3838
3839         /* Wait for boot code to complete initialization and enter service
3840          * loop. It is then safe to download service patches
3841          */
3842         for (i = 0; i < iters; i++) {
3843                 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3844                         break;
3845
3846                 udelay(10);
3847         }
3848
3849         if (i == iters) {
3850                 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3851                 return -EBUSY;
3852         }
3853
3854         val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3855         if (val & 0xff) {
3856                 netdev_warn(tp->dev,
3857                             "Other patches exist. Not downloading EEE patch\n");
3858                 return -EEXIST;
3859         }
3860
3861         return 0;
3862 }
3863
3864 /* tp->lock is held. */
3865 static void tg3_load_57766_firmware(struct tg3 *tp)
3866 {
3867         struct tg3_firmware_hdr *fw_hdr;
3868
3869         if (!tg3_flag(tp, NO_NVRAM))
3870                 return;
3871
3872         if (tg3_validate_rxcpu_state(tp))
3873                 return;
3874
3875         if (!tp->fw)
3876                 return;
3877
3878         /* This firmware blob has a different format than older firmware
3879          * releases as given below. The main difference is we have fragmented
3880          * data to be written to non-contiguous locations.
3881          *
3882          * In the beginning we have a firmware header identical to other
3883          * firmware which consists of version, base addr and length. The length
3884          * here is unused and set to 0xffffffff.
3885          *
3886          * This is followed by a series of firmware fragments which are
3887          * individually identical to previous firmware. i.e. they have the
3888          * firmware header and followed by data for that fragment. The version
3889          * field of the individual fragment header is unused.
3890          */
3891
3892         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3893         if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3894                 return;
3895
3896         if (tg3_rxcpu_pause(tp))
3897                 return;
3898
3899         /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3900         tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3901
3902         tg3_rxcpu_resume(tp);
3903 }
3904
3905 /* tp->lock is held. */
3906 static int tg3_load_tso_firmware(struct tg3 *tp)
3907 {
3908         const struct tg3_firmware_hdr *fw_hdr;
3909         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3910         int err;
3911
3912         if (!tg3_flag(tp, FW_TSO))
3913                 return 0;
3914
3915         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3916
3917         /* Firmware blob starts with version numbers, followed by
3918            start address and length. We are setting complete length.
3919            length = end_address_of_bss - start_address_of_text.
3920            Remainder is the blob to be loaded contiguously
3921            from start address. */
3922
3923         cpu_scratch_size = tp->fw_len;
3924
3925         if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3926                 cpu_base = RX_CPU_BASE;
3927                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3928         } else {
3929                 cpu_base = TX_CPU_BASE;
3930                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3931                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3932         }
3933
3934         err = tg3_load_firmware_cpu(tp, cpu_base,
3935                                     cpu_scratch_base, cpu_scratch_size,
3936                                     fw_hdr);
3937         if (err)
3938                 return err;
3939
3940         /* Now startup the cpu. */
3941         err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3942                                        be32_to_cpu(fw_hdr->base_addr));
3943         if (err) {
3944                 netdev_err(tp->dev,
3945                            "%s fails to set CPU PC, is %08x should be %08x\n",
3946                            __func__, tr32(cpu_base + CPU_PC),
3947                            be32_to_cpu(fw_hdr->base_addr));
3948                 return -ENODEV;
3949         }
3950
3951         tg3_resume_cpu(tp, cpu_base);
3952         return 0;
3953 }
3954
3955 /* tp->lock is held. */
3956 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3957 {
3958         u32 addr_high, addr_low;
3959
3960         addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3961         addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3962                     (mac_addr[4] <<  8) | mac_addr[5]);
3963
3964         if (index < 4) {
3965                 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3966                 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3967         } else {
3968                 index -= 4;
3969                 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3970                 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3971         }
3972 }
3973
3974 /* tp->lock is held. */
3975 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3976 {
3977         u32 addr_high;
3978         int i;
3979
3980         for (i = 0; i < 4; i++) {
3981                 if (i == 1 && skip_mac_1)
3982                         continue;
3983                 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3984         }
3985
3986         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3987             tg3_asic_rev(tp) == ASIC_REV_5704) {
3988                 for (i = 4; i < 16; i++)
3989                         __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3990         }
3991
3992         addr_high = (tp->dev->dev_addr[0] +
3993                      tp->dev->dev_addr[1] +
3994                      tp->dev->dev_addr[2] +
3995                      tp->dev->dev_addr[3] +
3996                      tp->dev->dev_addr[4] +
3997                      tp->dev->dev_addr[5]) &
3998                 TX_BACKOFF_SEED_MASK;
3999         tw32(MAC_TX_BACKOFF_SEED, addr_high);
4000 }
4001
4002 static void tg3_enable_register_access(struct tg3 *tp)
4003 {
4004         /*
4005          * Make sure register accesses (indirect or otherwise) will function
4006          * correctly.
4007          */
4008         pci_write_config_dword(tp->pdev,
4009                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4010 }
4011
4012 static int tg3_power_up(struct tg3 *tp)
4013 {
4014         int err;
4015
4016         tg3_enable_register_access(tp);
4017
4018         err = pci_set_power_state(tp->pdev, PCI_D0);
4019         if (!err) {
4020                 /* Switch out of Vaux if it is a NIC */
4021                 tg3_pwrsrc_switch_to_vmain(tp);
4022         } else {
4023                 netdev_err(tp->dev, "Transition to D0 failed\n");
4024         }
4025
4026         return err;
4027 }
4028
4029 static int tg3_setup_phy(struct tg3 *, bool);
4030
4031 static int tg3_power_down_prepare(struct tg3 *tp)
4032 {
4033         u32 misc_host_ctrl;
4034         bool device_should_wake, do_low_power;
4035
4036         tg3_enable_register_access(tp);
4037
4038         /* Restore the CLKREQ setting. */
4039         if (tg3_flag(tp, CLKREQ_BUG))
4040                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4041                                          PCI_EXP_LNKCTL_CLKREQ_EN);
4042
4043         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4044         tw32(TG3PCI_MISC_HOST_CTRL,
4045              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4046
4047         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4048                              tg3_flag(tp, WOL_ENABLE);
4049
4050         if (tg3_flag(tp, USE_PHYLIB)) {
4051                 do_low_power = false;
4052                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4053                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4054                         struct phy_device *phydev;
4055                         u32 phyid, advertising;
4056
4057                         phydev = tp->mdio_bus->phy_map[tp->phy_addr];
4058
4059                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4060
4061                         tp->link_config.speed = phydev->speed;
4062                         tp->link_config.duplex = phydev->duplex;
4063                         tp->link_config.autoneg = phydev->autoneg;
4064                         tp->link_config.advertising = phydev->advertising;
4065
4066                         advertising = ADVERTISED_TP |
4067                                       ADVERTISED_Pause |
4068                                       ADVERTISED_Autoneg |
4069                                       ADVERTISED_10baseT_Half;
4070
4071                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4072                                 if (tg3_flag(tp, WOL_SPEED_100MB))
4073                                         advertising |=
4074                                                 ADVERTISED_100baseT_Half |
4075                                                 ADVERTISED_100baseT_Full |
4076                                                 ADVERTISED_10baseT_Full;
4077                                 else
4078                                         advertising |= ADVERTISED_10baseT_Full;
4079                         }
4080
4081                         phydev->advertising = advertising;
4082
4083                         phy_start_aneg(phydev);
4084
4085                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4086                         if (phyid != PHY_ID_BCMAC131) {
4087                                 phyid &= PHY_BCM_OUI_MASK;
4088                                 if (phyid == PHY_BCM_OUI_1 ||
4089                                     phyid == PHY_BCM_OUI_2 ||
4090                                     phyid == PHY_BCM_OUI_3)
4091                                         do_low_power = true;
4092                         }
4093                 }
4094         } else {
4095                 do_low_power = true;
4096
4097                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4098                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4099
4100                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4101                         tg3_setup_phy(tp, false);
4102         }
4103
4104         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4105                 u32 val;
4106
4107                 val = tr32(GRC_VCPU_EXT_CTRL);
4108                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4109         } else if (!tg3_flag(tp, ENABLE_ASF)) {
4110                 int i;
4111                 u32 val;
4112
4113                 for (i = 0; i < 200; i++) {
4114                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4115                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4116                                 break;
4117                         msleep(1);
4118                 }
4119         }
4120         if (tg3_flag(tp, WOL_CAP))
4121                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4122                                                      WOL_DRV_STATE_SHUTDOWN |
4123                                                      WOL_DRV_WOL |
4124                                                      WOL_SET_MAGIC_PKT);
4125
4126         if (device_should_wake) {
4127                 u32 mac_mode;
4128
4129                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4130                         if (do_low_power &&
4131                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4132                                 tg3_phy_auxctl_write(tp,
4133                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4134                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
4135                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4136                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4137                                 udelay(40);
4138                         }
4139
4140                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4141                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
4142                         else if (tp->phy_flags &
4143                                  TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4144                                 if (tp->link_config.active_speed == SPEED_1000)
4145                                         mac_mode = MAC_MODE_PORT_MODE_GMII;
4146                                 else
4147                                         mac_mode = MAC_MODE_PORT_MODE_MII;
4148                         } else
4149                                 mac_mode = MAC_MODE_PORT_MODE_MII;
4150
4151                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4152                         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4153                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4154                                              SPEED_100 : SPEED_10;
4155                                 if (tg3_5700_link_polarity(tp, speed))
4156                                         mac_mode |= MAC_MODE_LINK_POLARITY;
4157                                 else
4158                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
4159                         }
4160                 } else {
4161                         mac_mode = MAC_MODE_PORT_MODE_TBI;
4162                 }
4163
4164                 if (!tg3_flag(tp, 5750_PLUS))
4165                         tw32(MAC_LED_CTRL, tp->led_ctrl);
4166
4167                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4168                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4169                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4170                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4171
4172                 if (tg3_flag(tp, ENABLE_APE))
4173                         mac_mode |= MAC_MODE_APE_TX_EN |
4174                                     MAC_MODE_APE_RX_EN |
4175                                     MAC_MODE_TDE_ENABLE;
4176
4177                 tw32_f(MAC_MODE, mac_mode);
4178                 udelay(100);
4179
4180                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4181                 udelay(10);
4182         }
4183
4184         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4185             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4186              tg3_asic_rev(tp) == ASIC_REV_5701)) {
4187                 u32 base_val;
4188
4189                 base_val = tp->pci_clock_ctrl;
4190                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4191                              CLOCK_CTRL_TXCLK_DISABLE);
4192
4193                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4194                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
4195         } else if (tg3_flag(tp, 5780_CLASS) ||
4196                    tg3_flag(tp, CPMU_PRESENT) ||
4197                    tg3_asic_rev(tp) == ASIC_REV_5906) {
4198                 /* do nothing */
4199         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4200                 u32 newbits1, newbits2;
4201
4202                 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4203                     tg3_asic_rev(tp) == ASIC_REV_5701) {
4204                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4205                                     CLOCK_CTRL_TXCLK_DISABLE |
4206                                     CLOCK_CTRL_ALTCLK);
4207                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4208                 } else if (tg3_flag(tp, 5705_PLUS)) {
4209                         newbits1 = CLOCK_CTRL_625_CORE;
4210                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4211                 } else {
4212                         newbits1 = CLOCK_CTRL_ALTCLK;
4213                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4214                 }
4215
4216                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4217                             40);
4218
4219                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4220                             40);
4221
4222                 if (!tg3_flag(tp, 5705_PLUS)) {
4223                         u32 newbits3;
4224
4225                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4226                             tg3_asic_rev(tp) == ASIC_REV_5701) {
4227                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4228                                             CLOCK_CTRL_TXCLK_DISABLE |
4229                                             CLOCK_CTRL_44MHZ_CORE);
4230                         } else {
4231                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4232                         }
4233
4234                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
4235                                     tp->pci_clock_ctrl | newbits3, 40);
4236                 }
4237         }
4238
4239         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4240                 tg3_power_down_phy(tp, do_low_power);
4241
4242         tg3_frob_aux_power(tp, true);
4243
4244         /* Workaround for unstable PLL clock */
4245         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4246             ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4247              (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4248                 u32 val = tr32(0x7d00);
4249
4250                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4251                 tw32(0x7d00, val);
4252                 if (!tg3_flag(tp, ENABLE_ASF)) {
4253                         int err;
4254
4255                         err = tg3_nvram_lock(tp);
4256                         tg3_halt_cpu(tp, RX_CPU_BASE);
4257                         if (!err)
4258                                 tg3_nvram_unlock(tp);
4259                 }
4260         }
4261
4262         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4263
4264         tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4265
4266         return 0;
4267 }
4268
4269 static void tg3_power_down(struct tg3 *tp)
4270 {
4271         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4272         pci_set_power_state(tp->pdev, PCI_D3hot);
4273 }
4274
4275 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4276 {
4277         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4278         case MII_TG3_AUX_STAT_10HALF:
4279                 *speed = SPEED_10;
4280                 *duplex = DUPLEX_HALF;
4281                 break;
4282
4283         case MII_TG3_AUX_STAT_10FULL:
4284                 *speed = SPEED_10;
4285                 *duplex = DUPLEX_FULL;
4286                 break;
4287
4288         case MII_TG3_AUX_STAT_100HALF:
4289                 *speed = SPEED_100;
4290                 *duplex = DUPLEX_HALF;
4291                 break;
4292
4293         case MII_TG3_AUX_STAT_100FULL:
4294                 *speed = SPEED_100;
4295                 *duplex = DUPLEX_FULL;
4296                 break;
4297
4298         case MII_TG3_AUX_STAT_1000HALF:
4299                 *speed = SPEED_1000;
4300                 *duplex = DUPLEX_HALF;
4301                 break;
4302
4303         case MII_TG3_AUX_STAT_1000FULL:
4304                 *speed = SPEED_1000;
4305                 *duplex = DUPLEX_FULL;
4306                 break;
4307
4308         default:
4309                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4310                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4311                                  SPEED_10;
4312                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4313                                   DUPLEX_HALF;
4314                         break;
4315                 }
4316                 *speed = SPEED_UNKNOWN;
4317                 *duplex = DUPLEX_UNKNOWN;
4318                 break;
4319         }
4320 }
4321
4322 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4323 {
4324         int err = 0;
4325         u32 val, new_adv;
4326
4327         new_adv = ADVERTISE_CSMA;
4328         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4329         new_adv |= mii_advertise_flowctrl(flowctrl);
4330
4331         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4332         if (err)
4333                 goto done;
4334
4335         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4336                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4337
4338                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4339                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4340                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4341
4342                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4343                 if (err)
4344                         goto done;
4345         }
4346
4347         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4348                 goto done;
4349
4350         tw32(TG3_CPMU_EEE_MODE,
4351              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4352
4353         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4354         if (!err) {
4355                 u32 err2;
4356
4357                 val = 0;
4358                 /* Advertise 100-BaseTX EEE ability */
4359                 if (advertise & ADVERTISED_100baseT_Full)
4360                         val |= MDIO_AN_EEE_ADV_100TX;
4361                 /* Advertise 1000-BaseT EEE ability */
4362                 if (advertise & ADVERTISED_1000baseT_Full)
4363                         val |= MDIO_AN_EEE_ADV_1000T;
4364
4365                 if (!tp->eee.eee_enabled) {
4366                         val = 0;
4367                         tp->eee.advertised = 0;
4368                 } else {
4369                         tp->eee.advertised = advertise &
4370                                              (ADVERTISED_100baseT_Full |
4371                                               ADVERTISED_1000baseT_Full);
4372                 }
4373
4374                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4375                 if (err)
4376                         val = 0;
4377
4378                 switch (tg3_asic_rev(tp)) {
4379                 case ASIC_REV_5717:
4380                 case ASIC_REV_57765:
4381                 case ASIC_REV_57766:
4382                 case ASIC_REV_5719:
4383                         /* If we advertised any eee advertisements above... */
4384                         if (val)
4385                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4386                                       MII_TG3_DSP_TAP26_RMRXSTO |
4387                                       MII_TG3_DSP_TAP26_OPCSINPT;
4388                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4389                         /* Fall through */
4390                 case ASIC_REV_5720:
4391                 case ASIC_REV_5762:
4392                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4393                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4394                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4395                 }
4396
4397                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4398                 if (!err)
4399                         err = err2;
4400         }
4401
4402 done:
4403         return err;
4404 }
4405
4406 static void tg3_phy_copper_begin(struct tg3 *tp)
4407 {
4408         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4409             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4410                 u32 adv, fc;
4411
4412                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4413                     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4414                         adv = ADVERTISED_10baseT_Half |
4415                               ADVERTISED_10baseT_Full;
4416                         if (tg3_flag(tp, WOL_SPEED_100MB))
4417                                 adv |= ADVERTISED_100baseT_Half |
4418                                        ADVERTISED_100baseT_Full;
4419                         if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4420                                 if (!(tp->phy_flags &
4421                                       TG3_PHYFLG_DISABLE_1G_HD_ADV))
4422                                         adv |= ADVERTISED_1000baseT_Half;
4423                                 adv |= ADVERTISED_1000baseT_Full;
4424                         }
4425
4426                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4427                 } else {
4428                         adv = tp->link_config.advertising;
4429                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4430                                 adv &= ~(ADVERTISED_1000baseT_Half |
4431                                          ADVERTISED_1000baseT_Full);
4432
4433                         fc = tp->link_config.flowctrl;
4434                 }
4435
4436                 tg3_phy_autoneg_cfg(tp, adv, fc);
4437
4438                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4439                     (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4440                         /* Normally during power down we want to autonegotiate
4441                          * the lowest possible speed for WOL. However, to avoid
4442                          * link flap, we leave it untouched.
4443                          */
4444                         return;
4445                 }
4446
4447                 tg3_writephy(tp, MII_BMCR,
4448                              BMCR_ANENABLE | BMCR_ANRESTART);
4449         } else {
4450                 int i;
4451                 u32 bmcr, orig_bmcr;
4452
4453                 tp->link_config.active_speed = tp->link_config.speed;
4454                 tp->link_config.active_duplex = tp->link_config.duplex;
4455
4456                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4457                         /* With autoneg disabled, 5715 only links up when the
4458                          * advertisement register has the configured speed
4459                          * enabled.
4460                          */
4461                         tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4462                 }
4463
4464                 bmcr = 0;
4465                 switch (tp->link_config.speed) {
4466                 default:
4467                 case SPEED_10:
4468                         break;
4469
4470                 case SPEED_100:
4471                         bmcr |= BMCR_SPEED100;
4472                         break;
4473
4474                 case SPEED_1000:
4475                         bmcr |= BMCR_SPEED1000;
4476                         break;
4477                 }
4478
4479                 if (tp->link_config.duplex == DUPLEX_FULL)
4480                         bmcr |= BMCR_FULLDPLX;
4481
4482                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4483                     (bmcr != orig_bmcr)) {
4484                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4485                         for (i = 0; i < 1500; i++) {
4486                                 u32 tmp;
4487
4488                                 udelay(10);
4489                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4490                                     tg3_readphy(tp, MII_BMSR, &tmp))
4491                                         continue;
4492                                 if (!(tmp & BMSR_LSTATUS)) {
4493                                         udelay(40);
4494                                         break;
4495                                 }
4496                         }
4497                         tg3_writephy(tp, MII_BMCR, bmcr);
4498                         udelay(40);
4499                 }
4500         }
4501 }
4502
4503 static int tg3_phy_pull_config(struct tg3 *tp)
4504 {
4505         int err;
4506         u32 val;
4507
4508         err = tg3_readphy(tp, MII_BMCR, &val);
4509         if (err)
4510                 goto done;
4511
4512         if (!(val & BMCR_ANENABLE)) {
4513                 tp->link_config.autoneg = AUTONEG_DISABLE;
4514                 tp->link_config.advertising = 0;
4515                 tg3_flag_clear(tp, PAUSE_AUTONEG);
4516
4517                 err = -EIO;
4518
4519                 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4520                 case 0:
4521                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4522                                 goto done;
4523
4524                         tp->link_config.speed = SPEED_10;
4525                         break;
4526                 case BMCR_SPEED100:
4527                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4528                                 goto done;
4529
4530                         tp->link_config.speed = SPEED_100;
4531                         break;
4532                 case BMCR_SPEED1000:
4533                         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4534                                 tp->link_config.speed = SPEED_1000;
4535                                 break;
4536                         }
4537                         /* Fall through */
4538                 default:
4539                         goto done;
4540                 }
4541
4542                 if (val & BMCR_FULLDPLX)
4543                         tp->link_config.duplex = DUPLEX_FULL;
4544                 else
4545                         tp->link_config.duplex = DUPLEX_HALF;
4546
4547                 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4548
4549                 err = 0;
4550                 goto done;
4551         }
4552
4553         tp->link_config.autoneg = AUTONEG_ENABLE;
4554         tp->link_config.advertising = ADVERTISED_Autoneg;
4555         tg3_flag_set(tp, PAUSE_AUTONEG);
4556
4557         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4558                 u32 adv;
4559
4560                 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4561                 if (err)
4562                         goto done;
4563
4564                 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4565                 tp->link_config.advertising |= adv | ADVERTISED_TP;
4566
4567                 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4568         } else {
4569                 tp->link_config.advertising |= ADVERTISED_FIBRE;
4570         }
4571
4572         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4573                 u32 adv;
4574
4575                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4576                         err = tg3_readphy(tp, MII_CTRL1000, &val);
4577                         if (err)
4578                                 goto done;
4579
4580                         adv = mii_ctrl1000_to_ethtool_adv_t(val);
4581                 } else {
4582                         err = tg3_readphy(tp, MII_ADVERTISE, &val);
4583                         if (err)
4584                                 goto done;
4585
4586                         adv = tg3_decode_flowctrl_1000X(val);
4587                         tp->link_config.flowctrl = adv;
4588
4589                         val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4590                         adv = mii_adv_to_ethtool_adv_x(val);
4591                 }
4592
4593                 tp->link_config.advertising |= adv;
4594         }
4595
4596 done:
4597         return err;
4598 }
4599
4600 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4601 {
4602         int err;
4603
4604         /* Turn off tap power management. */
4605         /* Set Extended packet length bit */
4606         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4607
4608         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4609         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4610         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4611         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4612         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4613
4614         udelay(40);
4615
4616         return err;
4617 }
4618
4619 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4620 {
4621         struct ethtool_eee eee;
4622
4623         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4624                 return true;
4625
4626         tg3_eee_pull_config(tp, &eee);
4627
4628         if (tp->eee.eee_enabled) {
4629                 if (tp->eee.advertised != eee.advertised ||
4630                     tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4631                     tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4632                         return false;
4633         } else {
4634                 /* EEE is disabled but we're advertising */
4635                 if (eee.advertised)
4636                         return false;
4637         }
4638
4639         return true;
4640 }
4641
4642 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4643 {
4644         u32 advmsk, tgtadv, advertising;
4645
4646         advertising = tp->link_config.advertising;
4647         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4648
4649         advmsk = ADVERTISE_ALL;
4650         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4651                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4652                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4653         }
4654
4655         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4656                 return false;
4657
4658         if ((*lcladv & advmsk) != tgtadv)
4659                 return false;
4660
4661         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4662                 u32 tg3_ctrl;
4663
4664                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4665
4666                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4667                         return false;
4668
4669                 if (tgtadv &&
4670                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4671                      tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4672                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4673                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4674                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4675                 } else {
4676                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4677                 }
4678
4679                 if (tg3_ctrl != tgtadv)
4680                         return false;
4681         }
4682
4683         return true;
4684 }
4685
4686 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4687 {
4688         u32 lpeth = 0;
4689
4690         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4691                 u32 val;
4692
4693                 if (tg3_readphy(tp, MII_STAT1000, &val))
4694                         return false;
4695
4696                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4697         }
4698
4699         if (tg3_readphy(tp, MII_LPA, rmtadv))
4700                 return false;
4701
4702         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4703         tp->link_config.rmt_adv = lpeth;
4704
4705         return true;
4706 }
4707
4708 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4709 {
4710         if (curr_link_up != tp->link_up) {
4711                 if (curr_link_up) {
4712                         netif_carrier_on(tp->dev);
4713                 } else {
4714                         netif_carrier_off(tp->dev);
4715                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4716                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4717                 }
4718
4719                 tg3_link_report(tp);
4720                 return true;
4721         }
4722
4723         return false;
4724 }
4725
4726 static void tg3_clear_mac_status(struct tg3 *tp)
4727 {
4728         tw32(MAC_EVENT, 0);
4729
4730         tw32_f(MAC_STATUS,
4731                MAC_STATUS_SYNC_CHANGED |
4732                MAC_STATUS_CFG_CHANGED |
4733                MAC_STATUS_MI_COMPLETION |
4734                MAC_STATUS_LNKSTATE_CHANGED);
4735         udelay(40);
4736 }
4737
4738 static void tg3_setup_eee(struct tg3 *tp)
4739 {
4740         u32 val;
4741
4742         val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4743               TG3_CPMU_EEE_LNKIDL_UART_IDL;
4744         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4745                 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4746
4747         tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4748
4749         tw32_f(TG3_CPMU_EEE_CTRL,
4750                TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4751
4752         val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4753               (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4754               TG3_CPMU_EEEMD_LPI_IN_RX |
4755               TG3_CPMU_EEEMD_EEE_ENABLE;
4756
4757         if (tg3_asic_rev(tp) != ASIC_REV_5717)
4758                 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4759
4760         if (tg3_flag(tp, ENABLE_APE))
4761                 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4762
4763         tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4764
4765         tw32_f(TG3_CPMU_EEE_DBTMR1,
4766                TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4767                (tp->eee.tx_lpi_timer & 0xffff));
4768
4769         tw32_f(TG3_CPMU_EEE_DBTMR2,
4770                TG3_CPMU_DBTMR2_APE_TX_2047US |
4771                TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4772 }
4773
4774 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4775 {
4776         bool current_link_up;
4777         u32 bmsr, val;
4778         u32 lcl_adv, rmt_adv;
4779         u16 current_speed;
4780         u8 current_duplex;
4781         int i, err;
4782
4783         tg3_clear_mac_status(tp);
4784
4785         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4786                 tw32_f(MAC_MI_MODE,
4787                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4788                 udelay(80);
4789         }
4790
4791         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4792
4793         /* Some third-party PHYs need to be reset on link going
4794          * down.
4795          */
4796         if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4797              tg3_asic_rev(tp) == ASIC_REV_5704 ||
4798              tg3_asic_rev(tp) == ASIC_REV_5705) &&
4799             tp->link_up) {
4800                 tg3_readphy(tp, MII_BMSR, &bmsr);
4801                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4802                     !(bmsr & BMSR_LSTATUS))
4803                         force_reset = true;
4804         }
4805         if (force_reset)
4806                 tg3_phy_reset(tp);
4807
4808         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4809                 tg3_readphy(tp, MII_BMSR, &bmsr);
4810                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4811                     !tg3_flag(tp, INIT_COMPLETE))
4812                         bmsr = 0;
4813
4814                 if (!(bmsr & BMSR_LSTATUS)) {
4815                         err = tg3_init_5401phy_dsp(tp);
4816                         if (err)
4817                                 return err;
4818
4819                         tg3_readphy(tp, MII_BMSR, &bmsr);
4820                         for (i = 0; i < 1000; i++) {
4821                                 udelay(10);
4822                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4823                                     (bmsr & BMSR_LSTATUS)) {
4824                                         udelay(40);
4825                                         break;
4826                                 }
4827                         }
4828
4829                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4830                             TG3_PHY_REV_BCM5401_B0 &&
4831                             !(bmsr & BMSR_LSTATUS) &&
4832                             tp->link_config.active_speed == SPEED_1000) {
4833                                 err = tg3_phy_reset(tp);
4834                                 if (!err)
4835                                         err = tg3_init_5401phy_dsp(tp);
4836                                 if (err)
4837                                         return err;
4838                         }
4839                 }
4840         } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4841                    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4842                 /* 5701 {A0,B0} CRC bug workaround */
4843                 tg3_writephy(tp, 0x15, 0x0a75);
4844                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4845                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4846                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4847         }
4848
4849         /* Clear pending interrupts... */
4850         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4851         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4852
4853         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4854                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4855         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4856                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4857
4858         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4859             tg3_asic_rev(tp) == ASIC_REV_5701) {
4860                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4861                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4862                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4863                 else
4864                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4865         }
4866
4867         current_link_up = false;
4868         current_speed = SPEED_UNKNOWN;
4869         current_duplex = DUPLEX_UNKNOWN;
4870         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4871         tp->link_config.rmt_adv = 0;
4872
4873         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4874                 err = tg3_phy_auxctl_read(tp,
4875                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4876                                           &val);
4877                 if (!err && !(val & (1 << 10))) {
4878                         tg3_phy_auxctl_write(tp,
4879                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4880                                              val | (1 << 10));
4881                         goto relink;
4882                 }
4883         }
4884
4885         bmsr = 0;
4886         for (i = 0; i < 100; i++) {
4887                 tg3_readphy(tp, MII_BMSR, &bmsr);
4888                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4889                     (bmsr & BMSR_LSTATUS))
4890                         break;
4891                 udelay(40);
4892         }
4893
4894         if (bmsr & BMSR_LSTATUS) {
4895                 u32 aux_stat, bmcr;
4896
4897                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4898                 for (i = 0; i < 2000; i++) {
4899                         udelay(10);
4900                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4901                             aux_stat)
4902                                 break;
4903                 }
4904
4905                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4906                                              &current_speed,
4907                                              &current_duplex);
4908
4909                 bmcr = 0;
4910                 for (i = 0; i < 200; i++) {
4911                         tg3_readphy(tp, MII_BMCR, &bmcr);
4912                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4913                                 continue;
4914                         if (bmcr && bmcr != 0x7fff)
4915                                 break;
4916                         udelay(10);
4917                 }
4918
4919                 lcl_adv = 0;
4920                 rmt_adv = 0;
4921
4922                 tp->link_config.active_speed = current_speed;
4923                 tp->link_config.active_duplex = current_duplex;
4924
4925                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4926                         bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4927
4928                         if ((bmcr & BMCR_ANENABLE) &&
4929                             eee_config_ok &&
4930                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4931                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4932                                 current_link_up = true;
4933
4934                         /* EEE settings changes take effect only after a phy
4935                          * reset.  If we have skipped a reset due to Link Flap
4936                          * Avoidance being enabled, do it now.
4937                          */
4938                         if (!eee_config_ok &&
4939                             (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4940                             !force_reset) {
4941                                 tg3_setup_eee(tp);
4942                                 tg3_phy_reset(tp);
4943                         }
4944                 } else {
4945                         if (!(bmcr & BMCR_ANENABLE) &&
4946                             tp->link_config.speed == current_speed &&
4947                             tp->link_config.duplex == current_duplex) {
4948                                 current_link_up = true;
4949                         }
4950                 }
4951
4952                 if (current_link_up &&
4953                     tp->link_config.active_duplex == DUPLEX_FULL) {
4954                         u32 reg, bit;
4955
4956                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4957                                 reg = MII_TG3_FET_GEN_STAT;
4958                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4959                         } else {
4960                                 reg = MII_TG3_EXT_STAT;
4961                                 bit = MII_TG3_EXT_STAT_MDIX;
4962                         }
4963
4964                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4965                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4966
4967                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4968                 }
4969         }
4970
4971 relink:
4972         if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4973                 tg3_phy_copper_begin(tp);
4974
4975                 if (tg3_flag(tp, ROBOSWITCH)) {
4976                         current_link_up = true;
4977                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4978                         current_speed = SPEED_1000;
4979                         current_duplex = DUPLEX_FULL;
4980                         tp->link_config.active_speed = current_speed;
4981                         tp->link_config.active_duplex = current_duplex;
4982                 }
4983
4984                 tg3_readphy(tp, MII_BMSR, &bmsr);
4985                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4986                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4987                         current_link_up = true;
4988         }
4989
4990         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4991         if (current_link_up) {
4992                 if (tp->link_config.active_speed == SPEED_100 ||
4993                     tp->link_config.active_speed == SPEED_10)
4994                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4995                 else
4996                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4997         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4998                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4999         else
5000                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5001
5002         /* In order for the 5750 core in BCM4785 chip to work properly
5003          * in RGMII mode, the Led Control Register must be set up.
5004          */
5005         if (tg3_flag(tp, RGMII_MODE)) {
5006                 u32 led_ctrl = tr32(MAC_LED_CTRL);
5007                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5008
5009                 if (tp->link_config.active_speed == SPEED_10)
5010                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5011                 else if (tp->link_config.active_speed == SPEED_100)
5012                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5013                                      LED_CTRL_100MBPS_ON);
5014                 else if (tp->link_config.active_speed == SPEED_1000)
5015                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5016                                      LED_CTRL_1000MBPS_ON);
5017
5018                 tw32(MAC_LED_CTRL, led_ctrl);
5019                 udelay(40);
5020         }
5021
5022         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5023         if (tp->link_config.active_duplex == DUPLEX_HALF)
5024                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5025
5026         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5027                 if (current_link_up &&
5028                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5029                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5030                 else
5031                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5032         }
5033
5034         /* ??? Without this setting Netgear GA302T PHY does not
5035          * ??? send/receive packets...
5036          */
5037         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5038             tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5039                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5040                 tw32_f(MAC_MI_MODE, tp->mi_mode);
5041                 udelay(80);
5042         }
5043
5044         tw32_f(MAC_MODE, tp->mac_mode);
5045         udelay(40);
5046
5047         tg3_phy_eee_adjust(tp, current_link_up);
5048
5049         if (tg3_flag(tp, USE_LINKCHG_REG)) {
5050                 /* Polled via timer. */
5051                 tw32_f(MAC_EVENT, 0);
5052         } else {
5053                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5054         }
5055         udelay(40);
5056
5057         if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5058             current_link_up &&
5059             tp->link_config.active_speed == SPEED_1000 &&
5060             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5061                 udelay(120);
5062                 tw32_f(MAC_STATUS,
5063                      (MAC_STATUS_SYNC_CHANGED |
5064                       MAC_STATUS_CFG_CHANGED));
5065                 udelay(40);
5066                 tg3_write_mem(tp,
5067                               NIC_SRAM_FIRMWARE_MBOX,
5068                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5069         }
5070
5071         /* Prevent send BD corruption. */
5072         if (tg3_flag(tp, CLKREQ_BUG)) {
5073                 if (tp->link_config.active_speed == SPEED_100 ||
5074                     tp->link_config.active_speed == SPEED_10)
5075                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5076                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
5077                 else
5078                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5079                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
5080         }
5081
5082         tg3_test_and_report_link_chg(tp, current_link_up);
5083
5084         return 0;
5085 }
5086
5087 struct tg3_fiber_aneginfo {
5088         int state;
5089 #define ANEG_STATE_UNKNOWN              0
5090 #define ANEG_STATE_AN_ENABLE            1
5091 #define ANEG_STATE_RESTART_INIT         2
5092 #define ANEG_STATE_RESTART              3
5093 #define ANEG_STATE_DISABLE_LINK_OK      4
5094 #define ANEG_STATE_ABILITY_DETECT_INIT  5
5095 #define ANEG_STATE_ABILITY_DETECT       6
5096 #define ANEG_STATE_ACK_DETECT_INIT      7
5097 #define ANEG_STATE_ACK_DETECT           8
5098 #define ANEG_STATE_COMPLETE_ACK_INIT    9
5099 #define ANEG_STATE_COMPLETE_ACK         10
5100 #define ANEG_STATE_IDLE_DETECT_INIT     11
5101 #define ANEG_STATE_IDLE_DETECT          12
5102 #define ANEG_STATE_LINK_OK              13
5103 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
5104 #define ANEG_STATE_NEXT_PAGE_WAIT       15
5105
5106         u32 flags;
5107 #define MR_AN_ENABLE            0x00000001
5108 #define MR_RESTART_AN           0x00000002
5109 #define MR_AN_COMPLETE          0x00000004
5110 #define MR_PAGE_RX              0x00000008
5111 #define MR_NP_LOADED            0x00000010
5112 #define MR_TOGGLE_TX            0x00000020
5113 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
5114 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
5115 #define MR_LP_ADV_SYM_PAUSE     0x00000100
5116 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
5117 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5118 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5119 #define MR_LP_ADV_NEXT_PAGE     0x00001000
5120 #define MR_TOGGLE_RX            0x00002000
5121 #define MR_NP_RX                0x00004000
5122
5123 #define MR_LINK_OK              0x80000000
5124
5125         unsigned long link_time, cur_time;
5126
5127         u32 ability_match_cfg;
5128         int ability_match_count;
5129
5130         char ability_match, idle_match, ack_match;
5131
5132         u32 txconfig, rxconfig;
5133 #define ANEG_CFG_NP             0x00000080
5134 #define ANEG_CFG_ACK            0x00000040
5135 #define ANEG_CFG_RF2            0x00000020
5136 #define ANEG_CFG_RF1            0x00000010
5137 #define ANEG_CFG_PS2            0x00000001
5138 #define ANEG_CFG_PS1            0x00008000
5139 #define ANEG_CFG_HD             0x00004000
5140 #define ANEG_CFG_FD             0x00002000
5141 #define ANEG_CFG_INVAL          0x00001f06
5142
5143 };
5144 #define ANEG_OK         0
5145 #define ANEG_DONE       1
5146 #define ANEG_TIMER_ENAB 2
5147 #define ANEG_FAILED     -1
5148
5149 #define ANEG_STATE_SETTLE_TIME  10000
5150
5151 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5152                                    struct tg3_fiber_aneginfo *ap)
5153 {
5154         u16 flowctrl;
5155         unsigned long delta;
5156         u32 rx_cfg_reg;
5157         int ret;
5158
5159         if (ap->state == ANEG_STATE_UNKNOWN) {
5160                 ap->rxconfig = 0;
5161                 ap->link_time = 0;
5162                 ap->cur_time = 0;
5163                 ap->ability_match_cfg = 0;
5164                 ap->ability_match_count = 0;
5165                 ap->ability_match = 0;
5166                 ap->idle_match = 0;
5167                 ap->ack_match = 0;
5168         }
5169         ap->cur_time++;
5170
5171         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5172                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5173
5174                 if (rx_cfg_reg != ap->ability_match_cfg) {
5175                         ap->ability_match_cfg = rx_cfg_reg;
5176                         ap->ability_match = 0;
5177                         ap->ability_match_count = 0;
5178                 } else {
5179                         if (++ap->ability_match_count > 1) {
5180                                 ap->ability_match = 1;
5181                                 ap->ability_match_cfg = rx_cfg_reg;
5182                         }
5183                 }
5184                 if (rx_cfg_reg & ANEG_CFG_ACK)
5185                         ap->ack_match = 1;
5186                 else
5187                         ap->ack_match = 0;
5188
5189                 ap->idle_match = 0;
5190         } else {
5191                 ap->idle_match = 1;
5192                 ap->ability_match_cfg = 0;
5193                 ap->ability_match_count = 0;
5194                 ap->ability_match = 0;
5195                 ap->ack_match = 0;
5196
5197                 rx_cfg_reg = 0;
5198         }
5199
5200         ap->rxconfig = rx_cfg_reg;
5201         ret = ANEG_OK;
5202
5203         switch (ap->state) {
5204         case ANEG_STATE_UNKNOWN:
5205                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5206                         ap->state = ANEG_STATE_AN_ENABLE;
5207
5208                 /* fallthru */
5209         case ANEG_STATE_AN_ENABLE:
5210                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5211                 if (ap->flags & MR_AN_ENABLE) {
5212                         ap->link_time = 0;
5213                         ap->cur_time = 0;
5214                         ap->ability_match_cfg = 0;
5215                         ap->ability_match_count = 0;
5216                         ap->ability_match = 0;
5217                         ap->idle_match = 0;
5218                         ap->ack_match = 0;
5219
5220                         ap->state = ANEG_STATE_RESTART_INIT;
5221                 } else {
5222                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
5223                 }
5224                 break;
5225
5226         case ANEG_STATE_RESTART_INIT:
5227                 ap->link_time = ap->cur_time;
5228                 ap->flags &= ~(MR_NP_LOADED);
5229                 ap->txconfig = 0;
5230                 tw32(MAC_TX_AUTO_NEG, 0);
5231                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5232                 tw32_f(MAC_MODE, tp->mac_mode);
5233                 udelay(40);
5234
5235                 ret = ANEG_TIMER_ENAB;
5236                 ap->state = ANEG_STATE_RESTART;
5237
5238                 /* fallthru */
5239         case ANEG_STATE_RESTART:
5240                 delta = ap->cur_time - ap->link_time;
5241                 if (delta > ANEG_STATE_SETTLE_TIME)
5242                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5243                 else
5244                         ret = ANEG_TIMER_ENAB;
5245                 break;
5246
5247         case ANEG_STATE_DISABLE_LINK_OK:
5248                 ret = ANEG_DONE;
5249                 break;
5250
5251         case ANEG_STATE_ABILITY_DETECT_INIT:
5252                 ap->flags &= ~(MR_TOGGLE_TX);
5253                 ap->txconfig = ANEG_CFG_FD;
5254                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5255                 if (flowctrl & ADVERTISE_1000XPAUSE)
5256                         ap->txconfig |= ANEG_CFG_PS1;
5257                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5258                         ap->txconfig |= ANEG_CFG_PS2;
5259                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5260                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5261                 tw32_f(MAC_MODE, tp->mac_mode);
5262                 udelay(40);
5263
5264                 ap->state = ANEG_STATE_ABILITY_DETECT;
5265                 break;
5266
5267         case ANEG_STATE_ABILITY_DETECT:
5268                 if (ap->ability_match != 0 && ap->rxconfig != 0)
5269                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
5270                 break;
5271
5272         case ANEG_STATE_ACK_DETECT_INIT:
5273                 ap->txconfig |= ANEG_CFG_ACK;
5274                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5275                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5276                 tw32_f(MAC_MODE, tp->mac_mode);
5277                 udelay(40);
5278
5279                 ap->state = ANEG_STATE_ACK_DETECT;
5280
5281                 /* fallthru */
5282         case ANEG_STATE_ACK_DETECT:
5283                 if (ap->ack_match != 0) {
5284                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5285                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5286                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5287                         } else {
5288                                 ap->state = ANEG_STATE_AN_ENABLE;
5289                         }
5290                 } else if (ap->ability_match != 0 &&
5291                            ap->rxconfig == 0) {
5292                         ap->state = ANEG_STATE_AN_ENABLE;
5293                 }
5294                 break;
5295
5296         case ANEG_STATE_COMPLETE_ACK_INIT:
5297                 if (ap->rxconfig & ANEG_CFG_INVAL) {
5298                         ret = ANEG_FAILED;
5299                         break;
5300                 }
5301                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5302                                MR_LP_ADV_HALF_DUPLEX |
5303                                MR_LP_ADV_SYM_PAUSE |
5304                                MR_LP_ADV_ASYM_PAUSE |
5305                                MR_LP_ADV_REMOTE_FAULT1 |
5306                                MR_LP_ADV_REMOTE_FAULT2 |
5307                                MR_LP_ADV_NEXT_PAGE |
5308                                MR_TOGGLE_RX |
5309                                MR_NP_RX);
5310                 if (ap->rxconfig & ANEG_CFG_FD)
5311                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5312                 if (ap->rxconfig & ANEG_CFG_HD)
5313                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5314                 if (ap->rxconfig & ANEG_CFG_PS1)
5315                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
5316                 if (ap->rxconfig & ANEG_CFG_PS2)
5317                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5318                 if (ap->rxconfig & ANEG_CFG_RF1)
5319                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5320                 if (ap->rxconfig & ANEG_CFG_RF2)
5321                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5322                 if (ap->rxconfig & ANEG_CFG_NP)
5323                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
5324
5325                 ap->link_time = ap->cur_time;
5326
5327                 ap->flags ^= (MR_TOGGLE_TX);
5328                 if (ap->rxconfig & 0x0008)
5329                         ap->flags |= MR_TOGGLE_RX;
5330                 if (ap->rxconfig & ANEG_CFG_NP)
5331                         ap->flags |= MR_NP_RX;
5332                 ap->flags |= MR_PAGE_RX;
5333
5334                 ap->state = ANEG_STATE_COMPLETE_ACK;
5335                 ret = ANEG_TIMER_ENAB;
5336                 break;
5337
5338         case ANEG_STATE_COMPLETE_ACK:
5339                 if (ap->ability_match != 0 &&
5340                     ap->rxconfig == 0) {
5341                         ap->state = ANEG_STATE_AN_ENABLE;
5342                         break;
5343                 }
5344                 delta = ap->cur_time - ap->link_time;
5345                 if (delta > ANEG_STATE_SETTLE_TIME) {
5346                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5347                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5348                         } else {
5349                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5350                                     !(ap->flags & MR_NP_RX)) {
5351                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5352                                 } else {
5353                                         ret = ANEG_FAILED;
5354                                 }
5355                         }
5356                 }
5357                 break;
5358
5359         case ANEG_STATE_IDLE_DETECT_INIT:
5360                 ap->link_time = ap->cur_time;
5361                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5362                 tw32_f(MAC_MODE, tp->mac_mode);
5363                 udelay(40);
5364
5365                 ap->state = ANEG_STATE_IDLE_DETECT;
5366                 ret = ANEG_TIMER_ENAB;
5367                 break;
5368
5369         case ANEG_STATE_IDLE_DETECT:
5370                 if (ap->ability_match != 0 &&
5371                     ap->rxconfig == 0) {
5372                         ap->state = ANEG_STATE_AN_ENABLE;
5373                         break;
5374                 }
5375                 delta = ap->cur_time - ap->link_time;
5376                 if (delta > ANEG_STATE_SETTLE_TIME) {
5377                         /* XXX another gem from the Broadcom driver :( */
5378                         ap->state = ANEG_STATE_LINK_OK;
5379                 }
5380                 break;
5381
5382         case ANEG_STATE_LINK_OK:
5383                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5384                 ret = ANEG_DONE;
5385                 break;
5386
5387         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5388                 /* ??? unimplemented */
5389                 break;
5390
5391         case ANEG_STATE_NEXT_PAGE_WAIT:
5392                 /* ??? unimplemented */
5393                 break;
5394
5395         default:
5396                 ret = ANEG_FAILED;
5397                 break;
5398         }
5399
5400         return ret;
5401 }
5402
5403 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5404 {
5405         int res = 0;
5406         struct tg3_fiber_aneginfo aninfo;
5407         int status = ANEG_FAILED;
5408         unsigned int tick;
5409         u32 tmp;
5410
5411         tw32_f(MAC_TX_AUTO_NEG, 0);
5412
5413         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5414         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5415         udelay(40);
5416
5417         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5418         udelay(40);
5419
5420         memset(&aninfo, 0, sizeof(aninfo));
5421         aninfo.flags |= MR_AN_ENABLE;
5422         aninfo.state = ANEG_STATE_UNKNOWN;
5423         aninfo.cur_time = 0;
5424         tick = 0;
5425         while (++tick < 195000) {
5426                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5427                 if (status == ANEG_DONE || status == ANEG_FAILED)
5428                         break;
5429
5430                 udelay(1);
5431         }
5432
5433         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5434         tw32_f(MAC_MODE, tp->mac_mode);
5435         udelay(40);
5436
5437         *txflags = aninfo.txconfig;
5438         *rxflags = aninfo.flags;
5439
5440         if (status == ANEG_DONE &&
5441             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5442                              MR_LP_ADV_FULL_DUPLEX)))
5443                 res = 1;
5444
5445         return res;
5446 }
5447
5448 static void tg3_init_bcm8002(struct tg3 *tp)
5449 {
5450         u32 mac_status = tr32(MAC_STATUS);
5451         int i;
5452
5453         /* Reset when initting first time or we have a link. */
5454         if (tg3_flag(tp, INIT_COMPLETE) &&
5455             !(mac_status & MAC_STATUS_PCS_SYNCED))
5456                 return;
5457
5458         /* Set PLL lock range. */
5459         tg3_writephy(tp, 0x16, 0x8007);
5460
5461         /* SW reset */
5462         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5463
5464         /* Wait for reset to complete. */
5465         /* XXX schedule_timeout() ... */
5466         for (i = 0; i < 500; i++)
5467                 udelay(10);
5468
5469         /* Config mode; select PMA/Ch 1 regs. */
5470         tg3_writephy(tp, 0x10, 0x8411);
5471
5472         /* Enable auto-lock and comdet, select txclk for tx. */
5473         tg3_writephy(tp, 0x11, 0x0a10);
5474
5475         tg3_writephy(tp, 0x18, 0x00a0);
5476         tg3_writephy(tp, 0x16, 0x41ff);
5477
5478         /* Assert and deassert POR. */
5479         tg3_writephy(tp, 0x13, 0x0400);
5480         udelay(40);
5481         tg3_writephy(tp, 0x13, 0x0000);
5482
5483         tg3_writephy(tp, 0x11, 0x0a50);
5484         udelay(40);
5485         tg3_writephy(tp, 0x11, 0x0a10);
5486
5487         /* Wait for signal to stabilize */
5488         /* XXX schedule_timeout() ... */
5489         for (i = 0; i < 15000; i++)
5490                 udelay(10);
5491
5492         /* Deselect the channel register so we can read the PHYID
5493          * later.
5494          */
5495         tg3_writephy(tp, 0x10, 0x8011);
5496 }
5497
5498 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5499 {
5500         u16 flowctrl;
5501         bool current_link_up;
5502         u32 sg_dig_ctrl, sg_dig_status;
5503         u32 serdes_cfg, expected_sg_dig_ctrl;
5504         int workaround, port_a;
5505
5506         serdes_cfg = 0;
5507         expected_sg_dig_ctrl = 0;
5508         workaround = 0;
5509         port_a = 1;
5510         current_link_up = false;
5511
5512         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5513             tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5514                 workaround = 1;
5515                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5516                         port_a = 0;
5517
5518                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5519                 /* preserve bits 20-23 for voltage regulator */
5520                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5521         }
5522
5523         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5524
5525         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5526                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5527                         if (workaround) {
5528                                 u32 val = serdes_cfg;
5529
5530                                 if (port_a)
5531                                         val |= 0xc010000;
5532                                 else
5533                                         val |= 0x4010000;
5534                                 tw32_f(MAC_SERDES_CFG, val);
5535                         }
5536
5537                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5538                 }
5539                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5540                         tg3_setup_flow_control(tp, 0, 0);
5541                         current_link_up = true;
5542                 }
5543                 goto out;
5544         }
5545
5546         /* Want auto-negotiation.  */
5547         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5548
5549         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5550         if (flowctrl & ADVERTISE_1000XPAUSE)
5551                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5552         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5553                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5554
5555         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5556                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5557                     tp->serdes_counter &&
5558                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5559                                     MAC_STATUS_RCVD_CFG)) ==
5560                      MAC_STATUS_PCS_SYNCED)) {
5561                         tp->serdes_counter--;
5562                         current_link_up = true;
5563                         goto out;
5564                 }
5565 restart_autoneg:
5566                 if (workaround)
5567                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5568                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5569                 udelay(5);
5570                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5571
5572                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5573                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5574         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5575                                  MAC_STATUS_SIGNAL_DET)) {
5576                 sg_dig_status = tr32(SG_DIG_STATUS);
5577                 mac_status = tr32(MAC_STATUS);
5578
5579                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5580                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5581                         u32 local_adv = 0, remote_adv = 0;
5582
5583                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5584                                 local_adv |= ADVERTISE_1000XPAUSE;
5585                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5586                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5587
5588                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5589                                 remote_adv |= LPA_1000XPAUSE;
5590                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5591                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5592
5593                         tp->link_config.rmt_adv =
5594                                            mii_adv_to_ethtool_adv_x(remote_adv);
5595
5596                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5597                         current_link_up = true;
5598                         tp->serdes_counter = 0;
5599                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5600                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5601                         if (tp->serdes_counter)
5602                                 tp->serdes_counter--;
5603                         else {
5604                                 if (workaround) {
5605                                         u32 val = serdes_cfg;
5606
5607                                         if (port_a)
5608                                                 val |= 0xc010000;
5609                                         else
5610                                                 val |= 0x4010000;
5611
5612                                         tw32_f(MAC_SERDES_CFG, val);
5613                                 }
5614
5615                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5616                                 udelay(40);
5617
5618                                 /* Link parallel detection - link is up */
5619                                 /* only if we have PCS_SYNC and not */
5620                                 /* receiving config code words */
5621                                 mac_status = tr32(MAC_STATUS);
5622                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5623                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5624                                         tg3_setup_flow_control(tp, 0, 0);
5625                                         current_link_up = true;
5626                                         tp->phy_flags |=
5627                                                 TG3_PHYFLG_PARALLEL_DETECT;
5628                                         tp->serdes_counter =
5629                                                 SERDES_PARALLEL_DET_TIMEOUT;
5630                                 } else
5631                                         goto restart_autoneg;
5632                         }
5633                 }
5634         } else {
5635                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5636                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5637         }
5638
5639 out:
5640         return current_link_up;
5641 }
5642
5643 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5644 {
5645         bool current_link_up = false;
5646
5647         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5648                 goto out;
5649
5650         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5651                 u32 txflags, rxflags;
5652                 int i;
5653
5654                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5655                         u32 local_adv = 0, remote_adv = 0;
5656
5657                         if (txflags & ANEG_CFG_PS1)
5658                                 local_adv |= ADVERTISE_1000XPAUSE;
5659                         if (txflags & ANEG_CFG_PS2)
5660                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5661
5662                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5663                                 remote_adv |= LPA_1000XPAUSE;
5664                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5665                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5666
5667                         tp->link_config.rmt_adv =
5668                                            mii_adv_to_ethtool_adv_x(remote_adv);
5669
5670                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5671
5672                         current_link_up = true;
5673                 }
5674                 for (i = 0; i < 30; i++) {
5675                         udelay(20);
5676                         tw32_f(MAC_STATUS,
5677                                (MAC_STATUS_SYNC_CHANGED |
5678                                 MAC_STATUS_CFG_CHANGED));
5679                         udelay(40);
5680                         if ((tr32(MAC_STATUS) &
5681                              (MAC_STATUS_SYNC_CHANGED |
5682                               MAC_STATUS_CFG_CHANGED)) == 0)
5683                                 break;
5684                 }
5685
5686                 mac_status = tr32(MAC_STATUS);
5687                 if (!current_link_up &&
5688                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5689                     !(mac_status & MAC_STATUS_RCVD_CFG))
5690                         current_link_up = true;
5691         } else {
5692                 tg3_setup_flow_control(tp, 0, 0);
5693
5694                 /* Forcing 1000FD link up. */
5695                 current_link_up = true;
5696
5697                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5698                 udelay(40);
5699
5700                 tw32_f(MAC_MODE, tp->mac_mode);
5701                 udelay(40);
5702         }
5703
5704 out:
5705         return current_link_up;
5706 }
5707
5708 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5709 {
5710         u32 orig_pause_cfg;
5711         u16 orig_active_speed;
5712         u8 orig_active_duplex;
5713         u32 mac_status;
5714         bool current_link_up;
5715         int i;
5716
5717         orig_pause_cfg = tp->link_config.active_flowctrl;
5718         orig_active_speed = tp->link_config.active_speed;
5719         orig_active_duplex = tp->link_config.active_duplex;
5720
5721         if (!tg3_flag(tp, HW_AUTONEG) &&
5722             tp->link_up &&
5723             tg3_flag(tp, INIT_COMPLETE)) {
5724                 mac_status = tr32(MAC_STATUS);
5725                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5726                                MAC_STATUS_SIGNAL_DET |
5727                                MAC_STATUS_CFG_CHANGED |
5728                                MAC_STATUS_RCVD_CFG);
5729                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5730                                    MAC_STATUS_SIGNAL_DET)) {
5731                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5732                                             MAC_STATUS_CFG_CHANGED));
5733                         return 0;
5734                 }
5735         }
5736
5737         tw32_f(MAC_TX_AUTO_NEG, 0);
5738
5739         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5740         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5741         tw32_f(MAC_MODE, tp->mac_mode);
5742         udelay(40);
5743
5744         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5745                 tg3_init_bcm8002(tp);
5746
5747         /* Enable link change event even when serdes polling.  */
5748         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5749         udelay(40);
5750
5751         current_link_up = false;
5752         tp->link_config.rmt_adv = 0;
5753         mac_status = tr32(MAC_STATUS);
5754
5755         if (tg3_flag(tp, HW_AUTONEG))
5756                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5757         else
5758                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5759
5760         tp->napi[0].hw_status->status =
5761                 (SD_STATUS_UPDATED |
5762                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5763
5764         for (i = 0; i < 100; i++) {
5765                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5766                                     MAC_STATUS_CFG_CHANGED));
5767                 udelay(5);
5768                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5769                                          MAC_STATUS_CFG_CHANGED |
5770                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5771                         break;
5772         }
5773
5774         mac_status = tr32(MAC_STATUS);
5775         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5776                 current_link_up = false;
5777                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5778                     tp->serdes_counter == 0) {
5779                         tw32_f(MAC_MODE, (tp->mac_mode |
5780                                           MAC_MODE_SEND_CONFIGS));
5781                         udelay(1);
5782                         tw32_f(MAC_MODE, tp->mac_mode);
5783                 }
5784         }
5785
5786         if (current_link_up) {
5787                 tp->link_config.active_speed = SPEED_1000;
5788                 tp->link_config.active_duplex = DUPLEX_FULL;
5789                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5790                                     LED_CTRL_LNKLED_OVERRIDE |
5791                                     LED_CTRL_1000MBPS_ON));
5792         } else {
5793                 tp->link_config.active_speed = SPEED_UNKNOWN;
5794                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5795                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5796                                     LED_CTRL_LNKLED_OVERRIDE |
5797                                     LED_CTRL_TRAFFIC_OVERRIDE));
5798         }
5799
5800         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5801                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5802                 if (orig_pause_cfg != now_pause_cfg ||
5803                     orig_active_speed != tp->link_config.active_speed ||
5804                     orig_active_duplex != tp->link_config.active_duplex)
5805                         tg3_link_report(tp);
5806         }
5807
5808         return 0;
5809 }
5810
5811 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5812 {
5813         int err = 0;
5814         u32 bmsr, bmcr;
5815         u16 current_speed = SPEED_UNKNOWN;
5816         u8 current_duplex = DUPLEX_UNKNOWN;
5817         bool current_link_up = false;
5818         u32 local_adv, remote_adv, sgsr;
5819
5820         if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5821              tg3_asic_rev(tp) == ASIC_REV_5720) &&
5822              !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5823              (sgsr & SERDES_TG3_SGMII_MODE)) {
5824
5825                 if (force_reset)
5826                         tg3_phy_reset(tp);
5827
5828                 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5829
5830                 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5831                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5832                 } else {
5833                         current_link_up = true;
5834                         if (sgsr & SERDES_TG3_SPEED_1000) {
5835                                 current_speed = SPEED_1000;
5836                                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5837                         } else if (sgsr & SERDES_TG3_SPEED_100) {
5838                                 current_speed = SPEED_100;
5839                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5840                         } else {
5841                                 current_speed = SPEED_10;
5842                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5843                         }
5844
5845                         if (sgsr & SERDES_TG3_FULL_DUPLEX)
5846                                 current_duplex = DUPLEX_FULL;
5847                         else
5848                                 current_duplex = DUPLEX_HALF;
5849                 }
5850
5851                 tw32_f(MAC_MODE, tp->mac_mode);
5852                 udelay(40);
5853
5854                 tg3_clear_mac_status(tp);
5855
5856                 goto fiber_setup_done;
5857         }
5858
5859         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5860         tw32_f(MAC_MODE, tp->mac_mode);
5861         udelay(40);
5862
5863         tg3_clear_mac_status(tp);
5864
5865         if (force_reset)
5866                 tg3_phy_reset(tp);
5867
5868         tp->link_config.rmt_adv = 0;
5869
5870         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5871         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5872         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5873                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5874                         bmsr |= BMSR_LSTATUS;
5875                 else
5876                         bmsr &= ~BMSR_LSTATUS;
5877         }
5878
5879         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5880
5881         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5882             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5883                 /* do nothing, just check for link up at the end */
5884         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5885                 u32 adv, newadv;
5886
5887                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5888                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5889                                  ADVERTISE_1000XPAUSE |
5890                                  ADVERTISE_1000XPSE_ASYM |
5891                                  ADVERTISE_SLCT);
5892
5893                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5894                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5895
5896                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5897                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5898                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5899                         tg3_writephy(tp, MII_BMCR, bmcr);
5900
5901                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5902                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5903                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5904
5905                         return err;
5906                 }
5907         } else {
5908                 u32 new_bmcr;
5909
5910                 bmcr &= ~BMCR_SPEED1000;
5911                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5912
5913                 if (tp->link_config.duplex == DUPLEX_FULL)
5914                         new_bmcr |= BMCR_FULLDPLX;
5915
5916                 if (new_bmcr != bmcr) {
5917                         /* BMCR_SPEED1000 is a reserved bit that needs
5918                          * to be set on write.
5919                          */
5920                         new_bmcr |= BMCR_SPEED1000;
5921
5922                         /* Force a linkdown */
5923                         if (tp->link_up) {
5924                                 u32 adv;
5925
5926                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5927                                 adv &= ~(ADVERTISE_1000XFULL |
5928                                          ADVERTISE_1000XHALF |
5929                                          ADVERTISE_SLCT);
5930                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5931                                 tg3_writephy(tp, MII_BMCR, bmcr |
5932                                                            BMCR_ANRESTART |
5933                                                            BMCR_ANENABLE);
5934                                 udelay(10);
5935                                 tg3_carrier_off(tp);
5936                         }
5937                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5938                         bmcr = new_bmcr;
5939                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5940                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5941                         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5942                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5943                                         bmsr |= BMSR_LSTATUS;
5944                                 else
5945                                         bmsr &= ~BMSR_LSTATUS;
5946                         }
5947                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5948                 }
5949         }
5950
5951         if (bmsr & BMSR_LSTATUS) {
5952                 current_speed = SPEED_1000;
5953                 current_link_up = true;
5954                 if (bmcr & BMCR_FULLDPLX)
5955                         current_duplex = DUPLEX_FULL;
5956                 else
5957                         current_duplex = DUPLEX_HALF;
5958
5959                 local_adv = 0;
5960                 remote_adv = 0;
5961
5962                 if (bmcr & BMCR_ANENABLE) {
5963                         u32 common;
5964
5965                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5966                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5967                         common = local_adv & remote_adv;
5968                         if (common & (ADVERTISE_1000XHALF |
5969                                       ADVERTISE_1000XFULL)) {
5970                                 if (common & ADVERTISE_1000XFULL)
5971                                         current_duplex = DUPLEX_FULL;
5972                                 else
5973                                         current_duplex = DUPLEX_HALF;
5974
5975                                 tp->link_config.rmt_adv =
5976                                            mii_adv_to_ethtool_adv_x(remote_adv);
5977                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5978                                 /* Link is up via parallel detect */
5979                         } else {
5980                                 current_link_up = false;
5981                         }
5982                 }
5983         }
5984
5985 fiber_setup_done:
5986         if (current_link_up && current_duplex == DUPLEX_FULL)
5987                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5988
5989         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5990         if (tp->link_config.active_duplex == DUPLEX_HALF)
5991                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5992
5993         tw32_f(MAC_MODE, tp->mac_mode);
5994         udelay(40);
5995
5996         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5997
5998         tp->link_config.active_speed = current_speed;
5999         tp->link_config.active_duplex = current_duplex;
6000
6001         tg3_test_and_report_link_chg(tp, current_link_up);
6002         return err;
6003 }
6004
6005 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6006 {
6007         if (tp->serdes_counter) {
6008                 /* Give autoneg time to complete. */
6009                 tp->serdes_counter--;
6010                 return;
6011         }
6012
6013         if (!tp->link_up &&
6014             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6015                 u32 bmcr;
6016
6017                 tg3_readphy(tp, MII_BMCR, &bmcr);
6018                 if (bmcr & BMCR_ANENABLE) {
6019                         u32 phy1, phy2;
6020
6021                         /* Select shadow register 0x1f */
6022                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6023                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6024
6025                         /* Select expansion interrupt status register */
6026                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6027                                          MII_TG3_DSP_EXP1_INT_STAT);
6028                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6029                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6030
6031                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6032                                 /* We have signal detect and not receiving
6033                                  * config code words, link is up by parallel
6034                                  * detection.
6035                                  */
6036
6037                                 bmcr &= ~BMCR_ANENABLE;
6038                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6039                                 tg3_writephy(tp, MII_BMCR, bmcr);
6040                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6041                         }
6042                 }
6043         } else if (tp->link_up &&
6044                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6045                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6046                 u32 phy2;
6047
6048                 /* Select expansion interrupt status register */
6049                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6050                                  MII_TG3_DSP_EXP1_INT_STAT);
6051                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6052                 if (phy2 & 0x20) {
6053                         u32 bmcr;
6054
6055                         /* Config code words received, turn on autoneg. */
6056                         tg3_readphy(tp, MII_BMCR, &bmcr);
6057                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6058
6059                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6060
6061                 }
6062         }
6063 }
6064
6065 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6066 {
6067         u32 val;
6068         int err;
6069
6070         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6071                 err = tg3_setup_fiber_phy(tp, force_reset);
6072         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6073                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6074         else
6075                 err = tg3_setup_copper_phy(tp, force_reset);
6076
6077         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6078                 u32 scale;
6079
6080                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6081                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6082                         scale = 65;
6083                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6084                         scale = 6;
6085                 else
6086                         scale = 12;
6087
6088                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6089                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6090                 tw32(GRC_MISC_CFG, val);
6091         }
6092
6093         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6094               (6 << TX_LENGTHS_IPG_SHIFT);
6095         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6096             tg3_asic_rev(tp) == ASIC_REV_5762)
6097                 val |= tr32(MAC_TX_LENGTHS) &
6098                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
6099                         TX_LENGTHS_CNT_DWN_VAL_MSK);
6100
6101         if (tp->link_config.active_speed == SPEED_1000 &&
6102             tp->link_config.active_duplex == DUPLEX_HALF)
6103                 tw32(MAC_TX_LENGTHS, val |
6104                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6105         else
6106                 tw32(MAC_TX_LENGTHS, val |
6107                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6108
6109         if (!tg3_flag(tp, 5705_PLUS)) {
6110                 if (tp->link_up) {
6111                         tw32(HOSTCC_STAT_COAL_TICKS,
6112                              tp->coal.stats_block_coalesce_usecs);
6113                 } else {
6114                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
6115                 }
6116         }
6117
6118         if (tg3_flag(tp, ASPM_WORKAROUND)) {
6119                 val = tr32(PCIE_PWR_MGMT_THRESH);
6120                 if (!tp->link_up)
6121                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6122                               tp->pwrmgmt_thresh;
6123                 else
6124                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6125                 tw32(PCIE_PWR_MGMT_THRESH, val);
6126         }
6127
6128         return err;
6129 }
6130
6131 /* tp->lock must be held */
6132 static u64 tg3_refclk_read(struct tg3 *tp)
6133 {
6134         u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6135         return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6136 }
6137
6138 /* tp->lock must be held */
6139 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6140 {
6141         u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6142
6143         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6144         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6145         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6146         tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6147 }
6148
6149 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6150 static inline void tg3_full_unlock(struct tg3 *tp);
6151 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6152 {
6153         struct tg3 *tp = netdev_priv(dev);
6154
6155         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6156                                 SOF_TIMESTAMPING_RX_SOFTWARE |
6157                                 SOF_TIMESTAMPING_SOFTWARE;
6158
6159         if (tg3_flag(tp, PTP_CAPABLE)) {
6160                 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6161                                         SOF_TIMESTAMPING_RX_HARDWARE |
6162                                         SOF_TIMESTAMPING_RAW_HARDWARE;
6163         }
6164
6165         if (tp->ptp_clock)
6166                 info->phc_index = ptp_clock_index(tp->ptp_clock);
6167         else
6168                 info->phc_index = -1;
6169
6170         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6171
6172         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6173                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6174                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6175                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6176         return 0;
6177 }
6178
6179 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6180 {
6181         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6182         bool neg_adj = false;
6183         u32 correction = 0;
6184
6185         if (ppb < 0) {
6186                 neg_adj = true;
6187                 ppb = -ppb;
6188         }
6189
6190         /* Frequency adjustment is performed using hardware with a 24 bit
6191          * accumulator and a programmable correction value. On each clk, the
6192          * correction value gets added to the accumulator and when it
6193          * overflows, the time counter is incremented/decremented.
6194          *
6195          * So conversion from ppb to correction value is
6196          *              ppb * (1 << 24) / 1000000000
6197          */
6198         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6199                      TG3_EAV_REF_CLK_CORRECT_MASK;
6200
6201         tg3_full_lock(tp, 0);
6202
6203         if (correction)
6204                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6205                      TG3_EAV_REF_CLK_CORRECT_EN |
6206                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6207         else
6208                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6209
6210         tg3_full_unlock(tp);
6211
6212         return 0;
6213 }
6214
6215 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6216 {
6217         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6218
6219         tg3_full_lock(tp, 0);
6220         tp->ptp_adjust += delta;
6221         tg3_full_unlock(tp);
6222
6223         return 0;
6224 }
6225
6226 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
6227 {
6228         u64 ns;
6229         u32 remainder;
6230         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6231
6232         tg3_full_lock(tp, 0);
6233         ns = tg3_refclk_read(tp);
6234         ns += tp->ptp_adjust;
6235         tg3_full_unlock(tp);
6236
6237         ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
6238         ts->tv_nsec = remainder;
6239
6240         return 0;
6241 }
6242
6243 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6244                            const struct timespec *ts)
6245 {
6246         u64 ns;
6247         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6248
6249         ns = timespec_to_ns(ts);
6250
6251         tg3_full_lock(tp, 0);
6252         tg3_refclk_write(tp, ns);
6253         tp->ptp_adjust = 0;
6254         tg3_full_unlock(tp);
6255
6256         return 0;
6257 }
6258
6259 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6260                           struct ptp_clock_request *rq, int on)
6261 {
6262         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6263         u32 clock_ctl;
6264         int rval = 0;
6265
6266         switch (rq->type) {
6267         case PTP_CLK_REQ_PEROUT:
6268                 if (rq->perout.index != 0)
6269                         return -EINVAL;
6270
6271                 tg3_full_lock(tp, 0);
6272                 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6273                 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6274
6275                 if (on) {
6276                         u64 nsec;
6277
6278                         nsec = rq->perout.start.sec * 1000000000ULL +
6279                                rq->perout.start.nsec;
6280
6281                         if (rq->perout.period.sec || rq->perout.period.nsec) {
6282                                 netdev_warn(tp->dev,
6283                                             "Device supports only a one-shot timesync output, period must be 0\n");
6284                                 rval = -EINVAL;
6285                                 goto err_out;
6286                         }
6287
6288                         if (nsec & (1ULL << 63)) {
6289                                 netdev_warn(tp->dev,
6290                                             "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6291                                 rval = -EINVAL;
6292                                 goto err_out;
6293                         }
6294
6295                         tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6296                         tw32(TG3_EAV_WATCHDOG0_MSB,
6297                              TG3_EAV_WATCHDOG0_EN |
6298                              ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6299
6300                         tw32(TG3_EAV_REF_CLCK_CTL,
6301                              clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6302                 } else {
6303                         tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6304                         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6305                 }
6306
6307 err_out:
6308                 tg3_full_unlock(tp);
6309                 return rval;
6310
6311         default:
6312                 break;
6313         }
6314
6315         return -EOPNOTSUPP;
6316 }
6317
6318 static const struct ptp_clock_info tg3_ptp_caps = {
6319         .owner          = THIS_MODULE,
6320         .name           = "tg3 clock",
6321         .max_adj        = 250000000,
6322         .n_alarm        = 0,
6323         .n_ext_ts       = 0,
6324         .n_per_out      = 1,
6325         .pps            = 0,
6326         .adjfreq        = tg3_ptp_adjfreq,
6327         .adjtime        = tg3_ptp_adjtime,
6328         .gettime        = tg3_ptp_gettime,
6329         .settime        = tg3_ptp_settime,
6330         .enable         = tg3_ptp_enable,
6331 };
6332
6333 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6334                                      struct skb_shared_hwtstamps *timestamp)
6335 {
6336         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6337         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6338                                            tp->ptp_adjust);
6339 }
6340
6341 /* tp->lock must be held */
6342 static void tg3_ptp_init(struct tg3 *tp)
6343 {
6344         if (!tg3_flag(tp, PTP_CAPABLE))
6345                 return;
6346
6347         /* Initialize the hardware clock to the system time. */
6348         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6349         tp->ptp_adjust = 0;
6350         tp->ptp_info = tg3_ptp_caps;
6351 }
6352
6353 /* tp->lock must be held */
6354 static void tg3_ptp_resume(struct tg3 *tp)
6355 {
6356         if (!tg3_flag(tp, PTP_CAPABLE))
6357                 return;
6358
6359         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6360         tp->ptp_adjust = 0;
6361 }
6362
6363 static void tg3_ptp_fini(struct tg3 *tp)
6364 {
6365         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6366                 return;
6367
6368         ptp_clock_unregister(tp->ptp_clock);
6369         tp->ptp_clock = NULL;
6370         tp->ptp_adjust = 0;
6371 }
6372
6373 static inline int tg3_irq_sync(struct tg3 *tp)
6374 {
6375         return tp->irq_sync;
6376 }
6377
6378 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6379 {
6380         int i;
6381
6382         dst = (u32 *)((u8 *)dst + off);
6383         for (i = 0; i < len; i += sizeof(u32))
6384                 *dst++ = tr32(off + i);
6385 }
6386
6387 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6388 {
6389         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6390         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6391         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6392         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6393         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6394         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6395         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6396         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6397         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6398         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6399         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6400         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6401         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6402         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6403         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6404         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6405         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6406         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6407         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6408
6409         if (tg3_flag(tp, SUPPORT_MSIX))
6410                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6411
6412         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6413         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6414         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6415         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6416         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6417         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6418         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6419         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6420
6421         if (!tg3_flag(tp, 5705_PLUS)) {
6422                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6423                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6424                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6425         }
6426
6427         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6428         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6429         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6430         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6431         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6432
6433         if (tg3_flag(tp, NVRAM))
6434                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6435 }
6436
6437 static void tg3_dump_state(struct tg3 *tp)
6438 {
6439         int i;
6440         u32 *regs;
6441
6442         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6443         if (!regs)
6444                 return;
6445
6446         if (tg3_flag(tp, PCI_EXPRESS)) {
6447                 /* Read up to but not including private PCI registers */
6448                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6449                         regs[i / sizeof(u32)] = tr32(i);
6450         } else
6451                 tg3_dump_legacy_regs(tp, regs);
6452
6453         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6454                 if (!regs[i + 0] && !regs[i + 1] &&
6455                     !regs[i + 2] && !regs[i + 3])
6456                         continue;
6457
6458                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6459                            i * 4,
6460                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6461         }
6462
6463         kfree(regs);
6464
6465         for (i = 0; i < tp->irq_cnt; i++) {
6466                 struct tg3_napi *tnapi = &tp->napi[i];
6467
6468                 /* SW status block */
6469                 netdev_err(tp->dev,
6470                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6471                            i,
6472                            tnapi->hw_status->status,
6473                            tnapi->hw_status->status_tag,
6474                            tnapi->hw_status->rx_jumbo_consumer,
6475                            tnapi->hw_status->rx_consumer,
6476                            tnapi->hw_status->rx_mini_consumer,
6477                            tnapi->hw_status->idx[0].rx_producer,
6478                            tnapi->hw_status->idx[0].tx_consumer);
6479
6480                 netdev_err(tp->dev,
6481                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6482                            i,
6483                            tnapi->last_tag, tnapi->last_irq_tag,
6484                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6485                            tnapi->rx_rcb_ptr,
6486                            tnapi->prodring.rx_std_prod_idx,
6487                            tnapi->prodring.rx_std_cons_idx,
6488                            tnapi->prodring.rx_jmb_prod_idx,
6489                            tnapi->prodring.rx_jmb_cons_idx);
6490         }
6491 }
6492
6493 /* This is called whenever we suspect that the system chipset is re-
6494  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6495  * is bogus tx completions. We try to recover by setting the
6496  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6497  * in the workqueue.
6498  */
6499 static void tg3_tx_recover(struct tg3 *tp)
6500 {
6501         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6502                tp->write32_tx_mbox == tg3_write_indirect_mbox);
6503
6504         netdev_warn(tp->dev,
6505                     "The system may be re-ordering memory-mapped I/O "
6506                     "cycles to the network device, attempting to recover. "
6507                     "Please report the problem to the driver maintainer "
6508                     "and include system chipset information.\n");
6509
6510         tg3_flag_set(tp, TX_RECOVERY_PENDING);
6511 }
6512
6513 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6514 {
6515         /* Tell compiler to fetch tx indices from memory. */
6516         barrier();
6517         return tnapi->tx_pending -
6518                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6519 }
6520
6521 /* Tigon3 never reports partial packet sends.  So we do not
6522  * need special logic to handle SKBs that have not had all
6523  * of their frags sent yet, like SunGEM does.
6524  */
6525 static void tg3_tx(struct tg3_napi *tnapi)
6526 {
6527         struct tg3 *tp = tnapi->tp;
6528         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6529         u32 sw_idx = tnapi->tx_cons;
6530         struct netdev_queue *txq;
6531         int index = tnapi - tp->napi;
6532         unsigned int pkts_compl = 0, bytes_compl = 0;
6533
6534         if (tg3_flag(tp, ENABLE_TSS))
6535                 index--;
6536
6537         txq = netdev_get_tx_queue(tp->dev, index);
6538
6539         while (sw_idx != hw_idx) {
6540                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6541                 struct sk_buff *skb = ri->skb;
6542                 int i, tx_bug = 0;
6543
6544                 if (unlikely(skb == NULL)) {
6545                         tg3_tx_recover(tp);
6546                         return;
6547                 }
6548
6549                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6550                         struct skb_shared_hwtstamps timestamp;
6551                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6552                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6553
6554                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6555
6556                         skb_tstamp_tx(skb, &timestamp);
6557                 }
6558
6559                 pci_unmap_single(tp->pdev,
6560                                  dma_unmap_addr(ri, mapping),
6561                                  skb_headlen(skb),
6562                                  PCI_DMA_TODEVICE);
6563
6564                 ri->skb = NULL;
6565
6566                 while (ri->fragmented) {
6567                         ri->fragmented = false;
6568                         sw_idx = NEXT_TX(sw_idx);
6569                         ri = &tnapi->tx_buffers[sw_idx];
6570                 }
6571
6572                 sw_idx = NEXT_TX(sw_idx);
6573
6574                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6575                         ri = &tnapi->tx_buffers[sw_idx];
6576                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6577                                 tx_bug = 1;
6578
6579                         pci_unmap_page(tp->pdev,
6580                                        dma_unmap_addr(ri, mapping),
6581                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
6582                                        PCI_DMA_TODEVICE);
6583
6584                         while (ri->fragmented) {
6585                                 ri->fragmented = false;
6586                                 sw_idx = NEXT_TX(sw_idx);
6587                                 ri = &tnapi->tx_buffers[sw_idx];
6588                         }
6589
6590                         sw_idx = NEXT_TX(sw_idx);
6591                 }
6592
6593                 pkts_compl++;
6594                 bytes_compl += skb->len;
6595
6596                 dev_kfree_skb(skb);
6597
6598                 if (unlikely(tx_bug)) {
6599                         tg3_tx_recover(tp);
6600                         return;
6601                 }
6602         }
6603
6604         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6605
6606         tnapi->tx_cons = sw_idx;
6607
6608         /* Need to make the tx_cons update visible to tg3_start_xmit()
6609          * before checking for netif_queue_stopped().  Without the
6610          * memory barrier, there is a small possibility that tg3_start_xmit()
6611          * will miss it and cause the queue to be stopped forever.
6612          */
6613         smp_mb();
6614
6615         if (unlikely(netif_tx_queue_stopped(txq) &&
6616                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6617                 __netif_tx_lock(txq, smp_processor_id());
6618                 if (netif_tx_queue_stopped(txq) &&
6619                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6620                         netif_tx_wake_queue(txq);
6621                 __netif_tx_unlock(txq);
6622         }
6623 }
6624
6625 static void tg3_frag_free(bool is_frag, void *data)
6626 {
6627         if (is_frag)
6628                 put_page(virt_to_head_page(data));
6629         else
6630                 kfree(data);
6631 }
6632
6633 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6634 {
6635         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6636                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6637
6638         if (!ri->data)
6639                 return;
6640
6641         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6642                          map_sz, PCI_DMA_FROMDEVICE);
6643         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6644         ri->data = NULL;
6645 }
6646
6647
6648 /* Returns size of skb allocated or < 0 on error.
6649  *
6650  * We only need to fill in the address because the other members
6651  * of the RX descriptor are invariant, see tg3_init_rings.
6652  *
6653  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6654  * posting buffers we only dirty the first cache line of the RX
6655  * descriptor (containing the address).  Whereas for the RX status
6656  * buffers the cpu only reads the last cacheline of the RX descriptor
6657  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6658  */
6659 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6660                              u32 opaque_key, u32 dest_idx_unmasked,
6661                              unsigned int *frag_size)
6662 {
6663         struct tg3_rx_buffer_desc *desc;
6664         struct ring_info *map;
6665         u8 *data;
6666         dma_addr_t mapping;
6667         int skb_size, data_size, dest_idx;
6668
6669         switch (opaque_key) {
6670         case RXD_OPAQUE_RING_STD:
6671                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6672                 desc = &tpr->rx_std[dest_idx];
6673                 map = &tpr->rx_std_buffers[dest_idx];
6674                 data_size = tp->rx_pkt_map_sz;
6675                 break;
6676
6677         case RXD_OPAQUE_RING_JUMBO:
6678                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6679                 desc = &tpr->rx_jmb[dest_idx].std;
6680                 map = &tpr->rx_jmb_buffers[dest_idx];
6681                 data_size = TG3_RX_JMB_MAP_SZ;
6682                 break;
6683
6684         default:
6685                 return -EINVAL;
6686         }
6687
6688         /* Do not overwrite any of the map or rp information
6689          * until we are sure we can commit to a new buffer.
6690          *
6691          * Callers depend upon this behavior and assume that
6692          * we leave everything unchanged if we fail.
6693          */
6694         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6695                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6696         if (skb_size <= PAGE_SIZE) {
6697                 data = netdev_alloc_frag(skb_size);
6698                 *frag_size = skb_size;
6699         } else {
6700                 data = kmalloc(skb_size, GFP_ATOMIC);
6701                 *frag_size = 0;
6702         }
6703         if (!data)
6704                 return -ENOMEM;
6705
6706         mapping = pci_map_single(tp->pdev,
6707                                  data + TG3_RX_OFFSET(tp),
6708                                  data_size,
6709                                  PCI_DMA_FROMDEVICE);
6710         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6711                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6712                 return -EIO;
6713         }
6714
6715         map->data = data;
6716         dma_unmap_addr_set(map, mapping, mapping);
6717
6718         desc->addr_hi = ((u64)mapping >> 32);
6719         desc->addr_lo = ((u64)mapping & 0xffffffff);
6720
6721         return data_size;
6722 }
6723
6724 /* We only need to move over in the address because the other
6725  * members of the RX descriptor are invariant.  See notes above
6726  * tg3_alloc_rx_data for full details.
6727  */
6728 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6729                            struct tg3_rx_prodring_set *dpr,
6730                            u32 opaque_key, int src_idx,
6731                            u32 dest_idx_unmasked)
6732 {
6733         struct tg3 *tp = tnapi->tp;
6734         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6735         struct ring_info *src_map, *dest_map;
6736         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6737         int dest_idx;
6738
6739         switch (opaque_key) {
6740         case RXD_OPAQUE_RING_STD:
6741                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6742                 dest_desc = &dpr->rx_std[dest_idx];
6743                 dest_map = &dpr->rx_std_buffers[dest_idx];
6744                 src_desc = &spr->rx_std[src_idx];
6745                 src_map = &spr->rx_std_buffers[src_idx];
6746                 break;
6747
6748         case RXD_OPAQUE_RING_JUMBO:
6749                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6750                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6751                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6752                 src_desc = &spr->rx_jmb[src_idx].std;
6753                 src_map = &spr->rx_jmb_buffers[src_idx];
6754                 break;
6755
6756         default:
6757                 return;
6758         }
6759
6760         dest_map->data = src_map->data;
6761         dma_unmap_addr_set(dest_map, mapping,
6762                            dma_unmap_addr(src_map, mapping));
6763         dest_desc->addr_hi = src_desc->addr_hi;
6764         dest_desc->addr_lo = src_desc->addr_lo;
6765
6766         /* Ensure that the update to the skb happens after the physical
6767          * addresses have been transferred to the new BD location.
6768          */
6769         smp_wmb();
6770
6771         src_map->data = NULL;
6772 }
6773
6774 /* The RX ring scheme is composed of multiple rings which post fresh
6775  * buffers to the chip, and one special ring the chip uses to report
6776  * status back to the host.
6777  *
6778  * The special ring reports the status of received packets to the
6779  * host.  The chip does not write into the original descriptor the
6780  * RX buffer was obtained from.  The chip simply takes the original
6781  * descriptor as provided by the host, updates the status and length
6782  * field, then writes this into the next status ring entry.
6783  *
6784  * Each ring the host uses to post buffers to the chip is described
6785  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6786  * it is first placed into the on-chip ram.  When the packet's length
6787  * is known, it walks down the TG3_BDINFO entries to select the ring.
6788  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6789  * which is within the range of the new packet's length is chosen.
6790  *
6791  * The "separate ring for rx status" scheme may sound queer, but it makes
6792  * sense from a cache coherency perspective.  If only the host writes
6793  * to the buffer post rings, and only the chip writes to the rx status
6794  * rings, then cache lines never move beyond shared-modified state.
6795  * If both the host and chip were to write into the same ring, cache line
6796  * eviction could occur since both entities want it in an exclusive state.
6797  */
6798 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6799 {
6800         struct tg3 *tp = tnapi->tp;
6801         u32 work_mask, rx_std_posted = 0;
6802         u32 std_prod_idx, jmb_prod_idx;
6803         u32 sw_idx = tnapi->rx_rcb_ptr;
6804         u16 hw_idx;
6805         int received;
6806         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6807
6808         hw_idx = *(tnapi->rx_rcb_prod_idx);
6809         /*
6810          * We need to order the read of hw_idx and the read of
6811          * the opaque cookie.
6812          */
6813         rmb();
6814         work_mask = 0;
6815         received = 0;
6816         std_prod_idx = tpr->rx_std_prod_idx;
6817         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6818         while (sw_idx != hw_idx && budget > 0) {
6819                 struct ring_info *ri;
6820                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6821                 unsigned int len;
6822                 struct sk_buff *skb;
6823                 dma_addr_t dma_addr;
6824                 u32 opaque_key, desc_idx, *post_ptr;
6825                 u8 *data;
6826                 u64 tstamp = 0;
6827
6828                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6829                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6830                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6831                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6832                         dma_addr = dma_unmap_addr(ri, mapping);
6833                         data = ri->data;
6834                         post_ptr = &std_prod_idx;
6835                         rx_std_posted++;
6836                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6837                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6838                         dma_addr = dma_unmap_addr(ri, mapping);
6839                         data = ri->data;
6840                         post_ptr = &jmb_prod_idx;
6841                 } else
6842                         goto next_pkt_nopost;
6843
6844                 work_mask |= opaque_key;
6845
6846                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6847                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6848                 drop_it:
6849                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6850                                        desc_idx, *post_ptr);
6851                 drop_it_no_recycle:
6852                         /* Other statistics kept track of by card. */
6853                         tp->rx_dropped++;
6854                         goto next_pkt;
6855                 }
6856
6857                 prefetch(data + TG3_RX_OFFSET(tp));
6858                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6859                       ETH_FCS_LEN;
6860
6861                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6862                      RXD_FLAG_PTPSTAT_PTPV1 ||
6863                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6864                      RXD_FLAG_PTPSTAT_PTPV2) {
6865                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6866                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6867                 }
6868
6869                 if (len > TG3_RX_COPY_THRESH(tp)) {
6870                         int skb_size;
6871                         unsigned int frag_size;
6872
6873                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6874                                                     *post_ptr, &frag_size);
6875                         if (skb_size < 0)
6876                                 goto drop_it;
6877
6878                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6879                                          PCI_DMA_FROMDEVICE);
6880
6881                         /* Ensure that the update to the data happens
6882                          * after the usage of the old DMA mapping.
6883                          */
6884                         smp_wmb();
6885
6886                         ri->data = NULL;
6887
6888                         skb = build_skb(data, frag_size);
6889                         if (!skb) {
6890                                 tg3_frag_free(frag_size != 0, data);
6891                                 goto drop_it_no_recycle;
6892                         }
6893                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6894                 } else {
6895                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6896                                        desc_idx, *post_ptr);
6897
6898                         skb = netdev_alloc_skb(tp->dev,
6899                                                len + TG3_RAW_IP_ALIGN);
6900                         if (skb == NULL)
6901                                 goto drop_it_no_recycle;
6902
6903                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6904                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6905                         memcpy(skb->data,
6906                                data + TG3_RX_OFFSET(tp),
6907                                len);
6908                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6909                 }
6910
6911                 skb_put(skb, len);
6912                 if (tstamp)
6913                         tg3_hwclock_to_timestamp(tp, tstamp,
6914                                                  skb_hwtstamps(skb));
6915
6916                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6917                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6918                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6919                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6920                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6921                 else
6922                         skb_checksum_none_assert(skb);
6923
6924                 skb->protocol = eth_type_trans(skb, tp->dev);
6925
6926                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6927                     skb->protocol != htons(ETH_P_8021Q)) {
6928                         dev_kfree_skb(skb);
6929                         goto drop_it_no_recycle;
6930                 }
6931
6932                 if (desc->type_flags & RXD_FLAG_VLAN &&
6933                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6934                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6935                                                desc->err_vlan & RXD_VLAN_MASK);
6936
6937                 napi_gro_receive(&tnapi->napi, skb);
6938
6939                 received++;
6940                 budget--;
6941
6942 next_pkt:
6943                 (*post_ptr)++;
6944
6945                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6946                         tpr->rx_std_prod_idx = std_prod_idx &
6947                                                tp->rx_std_ring_mask;
6948                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6949                                      tpr->rx_std_prod_idx);
6950                         work_mask &= ~RXD_OPAQUE_RING_STD;
6951                         rx_std_posted = 0;
6952                 }
6953 next_pkt_nopost:
6954                 sw_idx++;
6955                 sw_idx &= tp->rx_ret_ring_mask;
6956
6957                 /* Refresh hw_idx to see if there is new work */
6958                 if (sw_idx == hw_idx) {
6959                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6960                         rmb();
6961                 }
6962         }
6963
6964         /* ACK the status ring. */
6965         tnapi->rx_rcb_ptr = sw_idx;
6966         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6967
6968         /* Refill RX ring(s). */
6969         if (!tg3_flag(tp, ENABLE_RSS)) {
6970                 /* Sync BD data before updating mailbox */
6971                 wmb();
6972
6973                 if (work_mask & RXD_OPAQUE_RING_STD) {
6974                         tpr->rx_std_prod_idx = std_prod_idx &
6975                                                tp->rx_std_ring_mask;
6976                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6977                                      tpr->rx_std_prod_idx);
6978                 }
6979                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6980                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6981                                                tp->rx_jmb_ring_mask;
6982                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6983                                      tpr->rx_jmb_prod_idx);
6984                 }
6985                 mmiowb();
6986         } else if (work_mask) {
6987                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6988                  * updated before the producer indices can be updated.
6989                  */
6990                 smp_wmb();
6991
6992                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6993                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6994
6995                 if (tnapi != &tp->napi[1]) {
6996                         tp->rx_refill = true;
6997                         napi_schedule(&tp->napi[1].napi);
6998                 }
6999         }
7000
7001         return received;
7002 }
7003
7004 static void tg3_poll_link(struct tg3 *tp)
7005 {
7006         /* handle link change and other phy events */
7007         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7008                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7009
7010                 if (sblk->status & SD_STATUS_LINK_CHG) {
7011                         sblk->status = SD_STATUS_UPDATED |
7012                                        (sblk->status & ~SD_STATUS_LINK_CHG);
7013                         spin_lock(&tp->lock);
7014                         if (tg3_flag(tp, USE_PHYLIB)) {
7015                                 tw32_f(MAC_STATUS,
7016                                      (MAC_STATUS_SYNC_CHANGED |
7017                                       MAC_STATUS_CFG_CHANGED |
7018                                       MAC_STATUS_MI_COMPLETION |
7019                                       MAC_STATUS_LNKSTATE_CHANGED));
7020                                 udelay(40);
7021                         } else
7022                                 tg3_setup_phy(tp, false);
7023                         spin_unlock(&tp->lock);
7024                 }
7025         }
7026 }
7027
7028 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7029                                 struct tg3_rx_prodring_set *dpr,
7030                                 struct tg3_rx_prodring_set *spr)
7031 {
7032         u32 si, di, cpycnt, src_prod_idx;
7033         int i, err = 0;
7034
7035         while (1) {
7036                 src_prod_idx = spr->rx_std_prod_idx;
7037
7038                 /* Make sure updates to the rx_std_buffers[] entries and the
7039                  * standard producer index are seen in the correct order.
7040                  */
7041                 smp_rmb();
7042
7043                 if (spr->rx_std_cons_idx == src_prod_idx)
7044                         break;
7045
7046                 if (spr->rx_std_cons_idx < src_prod_idx)
7047                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7048                 else
7049                         cpycnt = tp->rx_std_ring_mask + 1 -
7050                                  spr->rx_std_cons_idx;
7051
7052                 cpycnt = min(cpycnt,
7053                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7054
7055                 si = spr->rx_std_cons_idx;
7056                 di = dpr->rx_std_prod_idx;
7057
7058                 for (i = di; i < di + cpycnt; i++) {
7059                         if (dpr->rx_std_buffers[i].data) {
7060                                 cpycnt = i - di;
7061                                 err = -ENOSPC;
7062                                 break;
7063                         }
7064                 }
7065
7066                 if (!cpycnt)
7067                         break;
7068
7069                 /* Ensure that updates to the rx_std_buffers ring and the
7070                  * shadowed hardware producer ring from tg3_recycle_skb() are
7071                  * ordered correctly WRT the skb check above.
7072                  */
7073                 smp_rmb();
7074
7075                 memcpy(&dpr->rx_std_buffers[di],
7076                        &spr->rx_std_buffers[si],
7077                        cpycnt * sizeof(struct ring_info));
7078
7079                 for (i = 0; i < cpycnt; i++, di++, si++) {
7080                         struct tg3_rx_buffer_desc *sbd, *dbd;
7081                         sbd = &spr->rx_std[si];
7082                         dbd = &dpr->rx_std[di];
7083                         dbd->addr_hi = sbd->addr_hi;
7084                         dbd->addr_lo = sbd->addr_lo;
7085                 }
7086
7087                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7088                                        tp->rx_std_ring_mask;
7089                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7090                                        tp->rx_std_ring_mask;
7091         }
7092
7093         while (1) {
7094                 src_prod_idx = spr->rx_jmb_prod_idx;
7095
7096                 /* Make sure updates to the rx_jmb_buffers[] entries and
7097                  * the jumbo producer index are seen in the correct order.
7098                  */
7099                 smp_rmb();
7100
7101                 if (spr->rx_jmb_cons_idx == src_prod_idx)
7102                         break;
7103
7104                 if (spr->rx_jmb_cons_idx < src_prod_idx)
7105                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7106                 else
7107                         cpycnt = tp->rx_jmb_ring_mask + 1 -
7108                                  spr->rx_jmb_cons_idx;
7109
7110                 cpycnt = min(cpycnt,
7111                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7112
7113                 si = spr->rx_jmb_cons_idx;
7114                 di = dpr->rx_jmb_prod_idx;
7115
7116                 for (i = di; i < di + cpycnt; i++) {
7117                         if (dpr->rx_jmb_buffers[i].data) {
7118                                 cpycnt = i - di;
7119                                 err = -ENOSPC;
7120                                 break;
7121                         }
7122                 }
7123
7124                 if (!cpycnt)
7125                         break;
7126
7127                 /* Ensure that updates to the rx_jmb_buffers ring and the
7128                  * shadowed hardware producer ring from tg3_recycle_skb() are
7129                  * ordered correctly WRT the skb check above.
7130                  */
7131                 smp_rmb();
7132
7133                 memcpy(&dpr->rx_jmb_buffers[di],
7134                        &spr->rx_jmb_buffers[si],
7135                        cpycnt * sizeof(struct ring_info));
7136
7137                 for (i = 0; i < cpycnt; i++, di++, si++) {
7138                         struct tg3_rx_buffer_desc *sbd, *dbd;
7139                         sbd = &spr->rx_jmb[si].std;
7140                         dbd = &dpr->rx_jmb[di].std;
7141                         dbd->addr_hi = sbd->addr_hi;
7142                         dbd->addr_lo = sbd->addr_lo;
7143                 }
7144
7145                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7146                                        tp->rx_jmb_ring_mask;
7147                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7148                                        tp->rx_jmb_ring_mask;
7149         }
7150
7151         return err;
7152 }
7153
7154 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7155 {
7156         struct tg3 *tp = tnapi->tp;
7157
7158         /* run TX completion thread */
7159         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7160                 tg3_tx(tnapi);
7161                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7162                         return work_done;
7163         }
7164
7165         if (!tnapi->rx_rcb_prod_idx)
7166                 return work_done;
7167
7168         /* run RX thread, within the bounds set by NAPI.
7169          * All RX "locking" is done by ensuring outside
7170          * code synchronizes with tg3->napi.poll()
7171          */
7172         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7173                 work_done += tg3_rx(tnapi, budget - work_done);
7174
7175         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7176                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7177                 int i, err = 0;
7178                 u32 std_prod_idx = dpr->rx_std_prod_idx;
7179                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7180
7181                 tp->rx_refill = false;
7182                 for (i = 1; i <= tp->rxq_cnt; i++)
7183                         err |= tg3_rx_prodring_xfer(tp, dpr,
7184                                                     &tp->napi[i].prodring);
7185
7186                 wmb();
7187
7188                 if (std_prod_idx != dpr->rx_std_prod_idx)
7189                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7190                                      dpr->rx_std_prod_idx);
7191
7192                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7193                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7194                                      dpr->rx_jmb_prod_idx);
7195
7196                 mmiowb();
7197
7198                 if (err)
7199                         tw32_f(HOSTCC_MODE, tp->coal_now);
7200         }
7201
7202         return work_done;
7203 }
7204
7205 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7206 {
7207         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7208                 schedule_work(&tp->reset_task);
7209 }
7210
7211 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7212 {
7213         cancel_work_sync(&tp->reset_task);
7214         tg3_flag_clear(tp, RESET_TASK_PENDING);
7215         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7216 }
7217
7218 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7219 {
7220         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7221         struct tg3 *tp = tnapi->tp;
7222         int work_done = 0;
7223         struct tg3_hw_status *sblk = tnapi->hw_status;
7224
7225         while (1) {
7226                 work_done = tg3_poll_work(tnapi, work_done, budget);
7227
7228                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7229                         goto tx_recovery;
7230
7231                 if (unlikely(work_done >= budget))
7232                         break;
7233
7234                 /* tp->last_tag is used in tg3_int_reenable() below
7235                  * to tell the hw how much work has been processed,
7236                  * so we must read it before checking for more work.
7237                  */
7238                 tnapi->last_tag = sblk->status_tag;
7239                 tnapi->last_irq_tag = tnapi->last_tag;
7240                 rmb();
7241
7242                 /* check for RX/TX work to do */
7243                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7244                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7245
7246                         /* This test here is not race free, but will reduce
7247                          * the number of interrupts by looping again.
7248                          */
7249                         if (tnapi == &tp->napi[1] && tp->rx_refill)
7250                                 continue;
7251
7252                         napi_complete(napi);
7253                         /* Reenable interrupts. */
7254                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7255
7256                         /* This test here is synchronized by napi_schedule()
7257                          * and napi_complete() to close the race condition.
7258                          */
7259                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7260                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
7261                                                   HOSTCC_MODE_ENABLE |
7262                                                   tnapi->coal_now);
7263                         }
7264                         mmiowb();
7265                         break;
7266                 }
7267         }
7268
7269         return work_done;
7270
7271 tx_recovery:
7272         /* work_done is guaranteed to be less than budget. */
7273         napi_complete(napi);
7274         tg3_reset_task_schedule(tp);
7275         return work_done;
7276 }
7277
7278 static void tg3_process_error(struct tg3 *tp)
7279 {
7280         u32 val;
7281         bool real_error = false;
7282
7283         if (tg3_flag(tp, ERROR_PROCESSED))
7284                 return;
7285
7286         /* Check Flow Attention register */
7287         val = tr32(HOSTCC_FLOW_ATTN);
7288         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7289                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7290                 real_error = true;
7291         }
7292
7293         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7294                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7295                 real_error = true;
7296         }
7297
7298         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7299                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7300                 real_error = true;
7301         }
7302
7303         if (!real_error)
7304                 return;
7305
7306         tg3_dump_state(tp);
7307
7308         tg3_flag_set(tp, ERROR_PROCESSED);
7309         tg3_reset_task_schedule(tp);
7310 }
7311
7312 static int tg3_poll(struct napi_struct *napi, int budget)
7313 {
7314         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7315         struct tg3 *tp = tnapi->tp;
7316         int work_done = 0;
7317         struct tg3_hw_status *sblk = tnapi->hw_status;
7318
7319         while (1) {
7320                 if (sblk->status & SD_STATUS_ERROR)
7321                         tg3_process_error(tp);
7322
7323                 tg3_poll_link(tp);
7324
7325                 work_done = tg3_poll_work(tnapi, work_done, budget);
7326
7327                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7328                         goto tx_recovery;
7329
7330                 if (unlikely(work_done >= budget))
7331                         break;
7332
7333                 if (tg3_flag(tp, TAGGED_STATUS)) {
7334                         /* tp->last_tag is used in tg3_int_reenable() below
7335                          * to tell the hw how much work has been processed,
7336                          * so we must read it before checking for more work.
7337                          */
7338                         tnapi->last_tag = sblk->status_tag;
7339                         tnapi->last_irq_tag = tnapi->last_tag;
7340                         rmb();
7341                 } else
7342                         sblk->status &= ~SD_STATUS_UPDATED;
7343
7344                 if (likely(!tg3_has_work(tnapi))) {
7345                         napi_complete(napi);
7346                         tg3_int_reenable(tnapi);
7347                         break;
7348                 }
7349         }
7350
7351         return work_done;
7352
7353 tx_recovery:
7354         /* work_done is guaranteed to be less than budget. */
7355         napi_complete(napi);
7356         tg3_reset_task_schedule(tp);
7357         return work_done;
7358 }
7359
7360 static void tg3_napi_disable(struct tg3 *tp)
7361 {
7362         int i;
7363
7364         for (i = tp->irq_cnt - 1; i >= 0; i--)
7365                 napi_disable(&tp->napi[i].napi);
7366 }
7367
7368 static void tg3_napi_enable(struct tg3 *tp)
7369 {
7370         int i;
7371
7372         for (i = 0; i < tp->irq_cnt; i++)
7373                 napi_enable(&tp->napi[i].napi);
7374 }
7375
7376 static void tg3_napi_init(struct tg3 *tp)
7377 {
7378         int i;
7379
7380         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7381         for (i = 1; i < tp->irq_cnt; i++)
7382                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7383 }
7384
7385 static void tg3_napi_fini(struct tg3 *tp)
7386 {
7387         int i;
7388
7389         for (i = 0; i < tp->irq_cnt; i++)
7390                 netif_napi_del(&tp->napi[i].napi);
7391 }
7392
7393 static inline void tg3_netif_stop(struct tg3 *tp)
7394 {
7395         tp->dev->trans_start = jiffies; /* prevent tx timeout */
7396         tg3_napi_disable(tp);
7397         netif_carrier_off(tp->dev);
7398         netif_tx_disable(tp->dev);
7399 }
7400
7401 /* tp->lock must be held */
7402 static inline void tg3_netif_start(struct tg3 *tp)
7403 {
7404         tg3_ptp_resume(tp);
7405
7406         /* NOTE: unconditional netif_tx_wake_all_queues is only
7407          * appropriate so long as all callers are assured to
7408          * have free tx slots (such as after tg3_init_hw)
7409          */
7410         netif_tx_wake_all_queues(tp->dev);
7411
7412         if (tp->link_up)
7413                 netif_carrier_on(tp->dev);
7414
7415         tg3_napi_enable(tp);
7416         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7417         tg3_enable_ints(tp);
7418 }
7419
7420 static void tg3_irq_quiesce(struct tg3 *tp)
7421 {
7422         int i;
7423
7424         BUG_ON(tp->irq_sync);
7425
7426         tp->irq_sync = 1;
7427         smp_mb();
7428
7429         for (i = 0; i < tp->irq_cnt; i++)
7430                 synchronize_irq(tp->napi[i].irq_vec);
7431 }
7432
7433 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7434  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7435  * with as well.  Most of the time, this is not necessary except when
7436  * shutting down the device.
7437  */
7438 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7439 {
7440         spin_lock_bh(&tp->lock);
7441         if (irq_sync)
7442                 tg3_irq_quiesce(tp);
7443 }
7444
7445 static inline void tg3_full_unlock(struct tg3 *tp)
7446 {
7447         spin_unlock_bh(&tp->lock);
7448 }
7449
7450 /* One-shot MSI handler - Chip automatically disables interrupt
7451  * after sending MSI so driver doesn't have to do it.
7452  */
7453 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7454 {
7455         struct tg3_napi *tnapi = dev_id;
7456         struct tg3 *tp = tnapi->tp;
7457
7458         prefetch(tnapi->hw_status);
7459         if (tnapi->rx_rcb)
7460                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7461
7462         if (likely(!tg3_irq_sync(tp)))
7463                 napi_schedule(&tnapi->napi);
7464
7465         return IRQ_HANDLED;
7466 }
7467
7468 /* MSI ISR - No need to check for interrupt sharing and no need to
7469  * flush status block and interrupt mailbox. PCI ordering rules
7470  * guarantee that MSI will arrive after the status block.
7471  */
7472 static irqreturn_t tg3_msi(int irq, void *dev_id)
7473 {
7474         struct tg3_napi *tnapi = dev_id;
7475         struct tg3 *tp = tnapi->tp;
7476
7477         prefetch(tnapi->hw_status);
7478         if (tnapi->rx_rcb)
7479                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7480         /*
7481          * Writing any value to intr-mbox-0 clears PCI INTA# and
7482          * chip-internal interrupt pending events.
7483          * Writing non-zero to intr-mbox-0 additional tells the
7484          * NIC to stop sending us irqs, engaging "in-intr-handler"
7485          * event coalescing.
7486          */
7487         tw32_mailbox(tnapi->int_mbox, 0x00000001);
7488         if (likely(!tg3_irq_sync(tp)))
7489                 napi_schedule(&tnapi->napi);
7490
7491         return IRQ_RETVAL(1);
7492 }
7493
7494 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7495 {
7496         struct tg3_napi *tnapi = dev_id;
7497         struct tg3 *tp = tnapi->tp;
7498         struct tg3_hw_status *sblk = tnapi->hw_status;
7499         unsigned int handled = 1;
7500
7501         /* In INTx mode, it is possible for the interrupt to arrive at
7502          * the CPU before the status block posted prior to the interrupt.
7503          * Reading the PCI State register will confirm whether the
7504          * interrupt is ours and will flush the status block.
7505          */
7506         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7507                 if (tg3_flag(tp, CHIP_RESETTING) ||
7508                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7509                         handled = 0;
7510                         goto out;
7511                 }
7512         }
7513
7514         /*
7515          * Writing any value to intr-mbox-0 clears PCI INTA# and
7516          * chip-internal interrupt pending events.
7517          * Writing non-zero to intr-mbox-0 additional tells the
7518          * NIC to stop sending us irqs, engaging "in-intr-handler"
7519          * event coalescing.
7520          *
7521          * Flush the mailbox to de-assert the IRQ immediately to prevent
7522          * spurious interrupts.  The flush impacts performance but
7523          * excessive spurious interrupts can be worse in some cases.
7524          */
7525         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7526         if (tg3_irq_sync(tp))
7527                 goto out;
7528         sblk->status &= ~SD_STATUS_UPDATED;
7529         if (likely(tg3_has_work(tnapi))) {
7530                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7531                 napi_schedule(&tnapi->napi);
7532         } else {
7533                 /* No work, shared interrupt perhaps?  re-enable
7534                  * interrupts, and flush that PCI write
7535                  */
7536                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7537                                0x00000000);
7538         }
7539 out:
7540         return IRQ_RETVAL(handled);
7541 }
7542
7543 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7544 {
7545         struct tg3_napi *tnapi = dev_id;
7546         struct tg3 *tp = tnapi->tp;
7547         struct tg3_hw_status *sblk = tnapi->hw_status;
7548         unsigned int handled = 1;
7549
7550         /* In INTx mode, it is possible for the interrupt to arrive at
7551          * the CPU before the status block posted prior to the interrupt.
7552          * Reading the PCI State register will confirm whether the
7553          * interrupt is ours and will flush the status block.
7554          */
7555         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7556                 if (tg3_flag(tp, CHIP_RESETTING) ||
7557                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7558                         handled = 0;
7559                         goto out;
7560                 }
7561         }
7562
7563         /*
7564          * writing any value to intr-mbox-0 clears PCI INTA# and
7565          * chip-internal interrupt pending events.
7566          * writing non-zero to intr-mbox-0 additional tells the
7567          * NIC to stop sending us irqs, engaging "in-intr-handler"
7568          * event coalescing.
7569          *
7570          * Flush the mailbox to de-assert the IRQ immediately to prevent
7571          * spurious interrupts.  The flush impacts performance but
7572          * excessive spurious interrupts can be worse in some cases.
7573          */
7574         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7575
7576         /*
7577          * In a shared interrupt configuration, sometimes other devices'
7578          * interrupts will scream.  We record the current status tag here
7579          * so that the above check can report that the screaming interrupts
7580          * are unhandled.  Eventually they will be silenced.
7581          */
7582         tnapi->last_irq_tag = sblk->status_tag;
7583
7584         if (tg3_irq_sync(tp))
7585                 goto out;
7586
7587         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7588
7589         napi_schedule(&tnapi->napi);
7590
7591 out:
7592         return IRQ_RETVAL(handled);
7593 }
7594
7595 /* ISR for interrupt test */
7596 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7597 {
7598         struct tg3_napi *tnapi = dev_id;
7599         struct tg3 *tp = tnapi->tp;
7600         struct tg3_hw_status *sblk = tnapi->hw_status;
7601
7602         if ((sblk->status & SD_STATUS_UPDATED) ||
7603             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7604                 tg3_disable_ints(tp);
7605                 return IRQ_RETVAL(1);
7606         }
7607         return IRQ_RETVAL(0);
7608 }
7609
7610 #ifdef CONFIG_NET_POLL_CONTROLLER
7611 static void tg3_poll_controller(struct net_device *dev)
7612 {
7613         int i;
7614         struct tg3 *tp = netdev_priv(dev);
7615
7616         if (tg3_irq_sync(tp))
7617                 return;
7618
7619         for (i = 0; i < tp->irq_cnt; i++)
7620                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7621 }
7622 #endif
7623
7624 static void tg3_tx_timeout(struct net_device *dev)
7625 {
7626         struct tg3 *tp = netdev_priv(dev);
7627
7628         if (netif_msg_tx_err(tp)) {
7629                 netdev_err(dev, "transmit timed out, resetting\n");
7630                 tg3_dump_state(tp);
7631         }
7632
7633         tg3_reset_task_schedule(tp);
7634 }
7635
7636 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7637 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7638 {
7639         u32 base = (u32) mapping & 0xffffffff;
7640
7641         return base + len + 8 < base;
7642 }
7643
7644 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7645  * of any 4GB boundaries: 4G, 8G, etc
7646  */
7647 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7648                                            u32 len, u32 mss)
7649 {
7650         if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7651                 u32 base = (u32) mapping & 0xffffffff;
7652
7653                 return ((base + len + (mss & 0x3fff)) < base);
7654         }
7655         return 0;
7656 }
7657
7658 /* Test for DMA addresses > 40-bit */
7659 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7660                                           int len)
7661 {
7662 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7663         if (tg3_flag(tp, 40BIT_DMA_BUG))
7664                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7665         return 0;
7666 #else
7667         return 0;
7668 #endif
7669 }
7670
7671 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7672                                  dma_addr_t mapping, u32 len, u32 flags,
7673                                  u32 mss, u32 vlan)
7674 {
7675         txbd->addr_hi = ((u64) mapping >> 32);
7676         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7677         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7678         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7679 }
7680
7681 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7682                             dma_addr_t map, u32 len, u32 flags,
7683                             u32 mss, u32 vlan)
7684 {
7685         struct tg3 *tp = tnapi->tp;
7686         bool hwbug = false;
7687
7688         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7689                 hwbug = true;
7690
7691         if (tg3_4g_overflow_test(map, len))
7692                 hwbug = true;
7693
7694         if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7695                 hwbug = true;
7696
7697         if (tg3_40bit_overflow_test(tp, map, len))
7698                 hwbug = true;
7699
7700         if (tp->dma_limit) {
7701                 u32 prvidx = *entry;
7702                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7703                 while (len > tp->dma_limit && *budget) {
7704                         u32 frag_len = tp->dma_limit;
7705                         len -= tp->dma_limit;
7706
7707                         /* Avoid the 8byte DMA problem */
7708                         if (len <= 8) {
7709                                 len += tp->dma_limit / 2;
7710                                 frag_len = tp->dma_limit / 2;
7711                         }
7712
7713                         tnapi->tx_buffers[*entry].fragmented = true;
7714
7715                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7716                                       frag_len, tmp_flag, mss, vlan);
7717                         *budget -= 1;
7718                         prvidx = *entry;
7719                         *entry = NEXT_TX(*entry);
7720
7721                         map += frag_len;
7722                 }
7723
7724                 if (len) {
7725                         if (*budget) {
7726                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7727                                               len, flags, mss, vlan);
7728                                 *budget -= 1;
7729                                 *entry = NEXT_TX(*entry);
7730                         } else {
7731                                 hwbug = true;
7732                                 tnapi->tx_buffers[prvidx].fragmented = false;
7733                         }
7734                 }
7735         } else {
7736                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7737                               len, flags, mss, vlan);
7738                 *entry = NEXT_TX(*entry);
7739         }
7740
7741         return hwbug;
7742 }
7743
7744 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7745 {
7746         int i;
7747         struct sk_buff *skb;
7748         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7749
7750         skb = txb->skb;
7751         txb->skb = NULL;
7752
7753         pci_unmap_single(tnapi->tp->pdev,
7754                          dma_unmap_addr(txb, mapping),
7755                          skb_headlen(skb),
7756                          PCI_DMA_TODEVICE);
7757
7758         while (txb->fragmented) {
7759                 txb->fragmented = false;
7760                 entry = NEXT_TX(entry);
7761                 txb = &tnapi->tx_buffers[entry];
7762         }
7763
7764         for (i = 0; i <= last; i++) {
7765                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7766
7767                 entry = NEXT_TX(entry);
7768                 txb = &tnapi->tx_buffers[entry];
7769
7770                 pci_unmap_page(tnapi->tp->pdev,
7771                                dma_unmap_addr(txb, mapping),
7772                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7773
7774                 while (txb->fragmented) {
7775                         txb->fragmented = false;
7776                         entry = NEXT_TX(entry);
7777                         txb = &tnapi->tx_buffers[entry];
7778                 }
7779         }
7780 }
7781
7782 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7783 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7784                                        struct sk_buff **pskb,
7785                                        u32 *entry, u32 *budget,
7786                                        u32 base_flags, u32 mss, u32 vlan)
7787 {
7788         struct tg3 *tp = tnapi->tp;
7789         struct sk_buff *new_skb, *skb = *pskb;
7790         dma_addr_t new_addr = 0;
7791         int ret = 0;
7792
7793         if (tg3_asic_rev(tp) != ASIC_REV_5701)
7794                 new_skb = skb_copy(skb, GFP_ATOMIC);
7795         else {
7796                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7797
7798                 new_skb = skb_copy_expand(skb,
7799                                           skb_headroom(skb) + more_headroom,
7800                                           skb_tailroom(skb), GFP_ATOMIC);
7801         }
7802
7803         if (!new_skb) {
7804                 ret = -1;
7805         } else {
7806                 /* New SKB is guaranteed to be linear. */
7807                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7808                                           PCI_DMA_TODEVICE);
7809                 /* Make sure the mapping succeeded */
7810                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7811                         dev_kfree_skb(new_skb);
7812                         ret = -1;
7813                 } else {
7814                         u32 save_entry = *entry;
7815
7816                         base_flags |= TXD_FLAG_END;
7817
7818                         tnapi->tx_buffers[*entry].skb = new_skb;
7819                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7820                                            mapping, new_addr);
7821
7822                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7823                                             new_skb->len, base_flags,
7824                                             mss, vlan)) {
7825                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7826                                 dev_kfree_skb(new_skb);
7827                                 ret = -1;
7828                         }
7829                 }
7830         }
7831
7832         dev_kfree_skb(skb);
7833         *pskb = new_skb;
7834         return ret;
7835 }
7836
7837 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7838
7839 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7840  * TSO header is greater than 80 bytes.
7841  */
7842 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7843 {
7844         struct sk_buff *segs, *nskb;
7845         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7846
7847         /* Estimate the number of fragments in the worst case */
7848         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7849                 netif_stop_queue(tp->dev);
7850
7851                 /* netif_tx_stop_queue() must be done before checking
7852                  * checking tx index in tg3_tx_avail() below, because in
7853                  * tg3_tx(), we update tx index before checking for
7854                  * netif_tx_queue_stopped().
7855                  */
7856                 smp_mb();
7857                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7858                         return NETDEV_TX_BUSY;
7859
7860                 netif_wake_queue(tp->dev);
7861         }
7862
7863         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7864         if (IS_ERR(segs))
7865                 goto tg3_tso_bug_end;
7866
7867         do {
7868                 nskb = segs;
7869                 segs = segs->next;
7870                 nskb->next = NULL;
7871                 tg3_start_xmit(nskb, tp->dev);
7872         } while (segs);
7873
7874 tg3_tso_bug_end:
7875         dev_kfree_skb(skb);
7876
7877         return NETDEV_TX_OK;
7878 }
7879
7880 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7881  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7882  */
7883 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7884 {
7885         struct tg3 *tp = netdev_priv(dev);
7886         u32 len, entry, base_flags, mss, vlan = 0;
7887         u32 budget;
7888         int i = -1, would_hit_hwbug;
7889         dma_addr_t mapping;
7890         struct tg3_napi *tnapi;
7891         struct netdev_queue *txq;
7892         unsigned int last;
7893
7894         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7895         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7896         if (tg3_flag(tp, ENABLE_TSS))
7897                 tnapi++;
7898
7899         budget = tg3_tx_avail(tnapi);
7900
7901         /* We are running in BH disabled context with netif_tx_lock
7902          * and TX reclaim runs via tp->napi.poll inside of a software
7903          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7904          * no IRQ context deadlocks to worry about either.  Rejoice!
7905          */
7906         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7907                 if (!netif_tx_queue_stopped(txq)) {
7908                         netif_tx_stop_queue(txq);
7909
7910                         /* This is a hard error, log it. */
7911                         netdev_err(dev,
7912                                    "BUG! Tx Ring full when queue awake!\n");
7913                 }
7914                 return NETDEV_TX_BUSY;
7915         }
7916
7917         entry = tnapi->tx_prod;
7918         base_flags = 0;
7919         if (skb->ip_summed == CHECKSUM_PARTIAL)
7920                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7921
7922         mss = skb_shinfo(skb)->gso_size;
7923         if (mss) {
7924                 struct iphdr *iph;
7925                 u32 tcp_opt_len, hdr_len;
7926
7927                 if (skb_header_cloned(skb) &&
7928                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7929                         goto drop;
7930
7931                 iph = ip_hdr(skb);
7932                 tcp_opt_len = tcp_optlen(skb);
7933
7934                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7935
7936                 if (!skb_is_gso_v6(skb)) {
7937                         iph->check = 0;
7938                         iph->tot_len = htons(mss + hdr_len);
7939                 }
7940
7941                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7942                     tg3_flag(tp, TSO_BUG))
7943                         return tg3_tso_bug(tp, skb);
7944
7945                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7946                                TXD_FLAG_CPU_POST_DMA);
7947
7948                 if (tg3_flag(tp, HW_TSO_1) ||
7949                     tg3_flag(tp, HW_TSO_2) ||
7950                     tg3_flag(tp, HW_TSO_3)) {
7951                         tcp_hdr(skb)->check = 0;
7952                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7953                 } else
7954                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7955                                                                  iph->daddr, 0,
7956                                                                  IPPROTO_TCP,
7957                                                                  0);
7958
7959                 if (tg3_flag(tp, HW_TSO_3)) {
7960                         mss |= (hdr_len & 0xc) << 12;
7961                         if (hdr_len & 0x10)
7962                                 base_flags |= 0x00000010;
7963                         base_flags |= (hdr_len & 0x3e0) << 5;
7964                 } else if (tg3_flag(tp, HW_TSO_2))
7965                         mss |= hdr_len << 9;
7966                 else if (tg3_flag(tp, HW_TSO_1) ||
7967                          tg3_asic_rev(tp) == ASIC_REV_5705) {
7968                         if (tcp_opt_len || iph->ihl > 5) {
7969                                 int tsflags;
7970
7971                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7972                                 mss |= (tsflags << 11);
7973                         }
7974                 } else {
7975                         if (tcp_opt_len || iph->ihl > 5) {
7976                                 int tsflags;
7977
7978                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7979                                 base_flags |= tsflags << 12;
7980                         }
7981                 }
7982         }
7983
7984         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7985             !mss && skb->len > VLAN_ETH_FRAME_LEN)
7986                 base_flags |= TXD_FLAG_JMB_PKT;
7987
7988         if (vlan_tx_tag_present(skb)) {
7989                 base_flags |= TXD_FLAG_VLAN;
7990                 vlan = vlan_tx_tag_get(skb);
7991         }
7992
7993         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7994             tg3_flag(tp, TX_TSTAMP_EN)) {
7995                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7996                 base_flags |= TXD_FLAG_HWTSTAMP;
7997         }
7998
7999         len = skb_headlen(skb);
8000
8001         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8002         if (pci_dma_mapping_error(tp->pdev, mapping))
8003                 goto drop;
8004
8005
8006         tnapi->tx_buffers[entry].skb = skb;
8007         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8008
8009         would_hit_hwbug = 0;
8010
8011         if (tg3_flag(tp, 5701_DMA_BUG))
8012                 would_hit_hwbug = 1;
8013
8014         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8015                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8016                             mss, vlan)) {
8017                 would_hit_hwbug = 1;
8018         } else if (skb_shinfo(skb)->nr_frags > 0) {
8019                 u32 tmp_mss = mss;
8020
8021                 if (!tg3_flag(tp, HW_TSO_1) &&
8022                     !tg3_flag(tp, HW_TSO_2) &&
8023                     !tg3_flag(tp, HW_TSO_3))
8024                         tmp_mss = 0;
8025
8026                 /* Now loop through additional data
8027                  * fragments, and queue them.
8028                  */
8029                 last = skb_shinfo(skb)->nr_frags - 1;
8030                 for (i = 0; i <= last; i++) {
8031                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8032
8033                         len = skb_frag_size(frag);
8034                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8035                                                    len, DMA_TO_DEVICE);
8036
8037                         tnapi->tx_buffers[entry].skb = NULL;
8038                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8039                                            mapping);
8040                         if (dma_mapping_error(&tp->pdev->dev, mapping))
8041                                 goto dma_error;
8042
8043                         if (!budget ||
8044                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8045                                             len, base_flags |
8046                                             ((i == last) ? TXD_FLAG_END : 0),
8047                                             tmp_mss, vlan)) {
8048                                 would_hit_hwbug = 1;
8049                                 break;
8050                         }
8051                 }
8052         }
8053
8054         if (would_hit_hwbug) {
8055                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8056
8057                 /* If the workaround fails due to memory/mapping
8058                  * failure, silently drop this packet.
8059                  */
8060                 entry = tnapi->tx_prod;
8061                 budget = tg3_tx_avail(tnapi);
8062                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8063                                                 base_flags, mss, vlan))
8064                         goto drop_nofree;
8065         }
8066
8067         skb_tx_timestamp(skb);
8068         netdev_tx_sent_queue(txq, skb->len);
8069
8070         /* Sync BD data before updating mailbox */
8071         wmb();
8072
8073         /* Packets are ready, update Tx producer idx local and on card. */
8074         tw32_tx_mbox(tnapi->prodmbox, entry);
8075
8076         tnapi->tx_prod = entry;
8077         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8078                 netif_tx_stop_queue(txq);
8079
8080                 /* netif_tx_stop_queue() must be done before checking
8081                  * checking tx index in tg3_tx_avail() below, because in
8082                  * tg3_tx(), we update tx index before checking for
8083                  * netif_tx_queue_stopped().
8084                  */
8085                 smp_mb();
8086                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8087                         netif_tx_wake_queue(txq);
8088         }
8089
8090         mmiowb();
8091         return NETDEV_TX_OK;
8092
8093 dma_error:
8094         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8095         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8096 drop:
8097         dev_kfree_skb(skb);
8098 drop_nofree:
8099         tp->tx_dropped++;
8100         return NETDEV_TX_OK;
8101 }
8102
8103 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8104 {
8105         if (enable) {
8106                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8107                                   MAC_MODE_PORT_MODE_MASK);
8108
8109                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8110
8111                 if (!tg3_flag(tp, 5705_PLUS))
8112                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8113
8114                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8115                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8116                 else
8117                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8118         } else {
8119                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8120
8121                 if (tg3_flag(tp, 5705_PLUS) ||
8122                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8123                     tg3_asic_rev(tp) == ASIC_REV_5700)
8124                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8125         }
8126
8127         tw32(MAC_MODE, tp->mac_mode);
8128         udelay(40);
8129 }
8130
8131 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8132 {
8133         u32 val, bmcr, mac_mode, ptest = 0;
8134
8135         tg3_phy_toggle_apd(tp, false);
8136         tg3_phy_toggle_automdix(tp, false);
8137
8138         if (extlpbk && tg3_phy_set_extloopbk(tp))
8139                 return -EIO;
8140
8141         bmcr = BMCR_FULLDPLX;
8142         switch (speed) {
8143         case SPEED_10:
8144                 break;
8145         case SPEED_100:
8146                 bmcr |= BMCR_SPEED100;
8147                 break;
8148         case SPEED_1000:
8149         default:
8150                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8151                         speed = SPEED_100;
8152                         bmcr |= BMCR_SPEED100;
8153                 } else {
8154                         speed = SPEED_1000;
8155                         bmcr |= BMCR_SPEED1000;
8156                 }
8157         }
8158
8159         if (extlpbk) {
8160                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8161                         tg3_readphy(tp, MII_CTRL1000, &val);
8162                         val |= CTL1000_AS_MASTER |
8163                                CTL1000_ENABLE_MASTER;
8164                         tg3_writephy(tp, MII_CTRL1000, val);
8165                 } else {
8166                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8167                                 MII_TG3_FET_PTEST_TRIM_2;
8168                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8169                 }
8170         } else
8171                 bmcr |= BMCR_LOOPBACK;
8172
8173         tg3_writephy(tp, MII_BMCR, bmcr);
8174
8175         /* The write needs to be flushed for the FETs */
8176         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8177                 tg3_readphy(tp, MII_BMCR, &bmcr);
8178
8179         udelay(40);
8180
8181         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8182             tg3_asic_rev(tp) == ASIC_REV_5785) {
8183                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8184                              MII_TG3_FET_PTEST_FRC_TX_LINK |
8185                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
8186
8187                 /* The write needs to be flushed for the AC131 */
8188                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8189         }
8190
8191         /* Reset to prevent losing 1st rx packet intermittently */
8192         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8193             tg3_flag(tp, 5780_CLASS)) {
8194                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8195                 udelay(10);
8196                 tw32_f(MAC_RX_MODE, tp->rx_mode);
8197         }
8198
8199         mac_mode = tp->mac_mode &
8200                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8201         if (speed == SPEED_1000)
8202                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8203         else
8204                 mac_mode |= MAC_MODE_PORT_MODE_MII;
8205
8206         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8207                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8208
8209                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8210                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8211                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8212                         mac_mode |= MAC_MODE_LINK_POLARITY;
8213
8214                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8215                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8216         }
8217
8218         tw32(MAC_MODE, mac_mode);
8219         udelay(40);
8220
8221         return 0;
8222 }
8223
8224 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8225 {
8226         struct tg3 *tp = netdev_priv(dev);
8227
8228         if (features & NETIF_F_LOOPBACK) {
8229                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8230                         return;
8231
8232                 spin_lock_bh(&tp->lock);
8233                 tg3_mac_loopback(tp, true);
8234                 netif_carrier_on(tp->dev);
8235                 spin_unlock_bh(&tp->lock);
8236                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8237         } else {
8238                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8239                         return;
8240
8241                 spin_lock_bh(&tp->lock);
8242                 tg3_mac_loopback(tp, false);
8243                 /* Force link status check */
8244                 tg3_setup_phy(tp, true);
8245                 spin_unlock_bh(&tp->lock);
8246                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8247         }
8248 }
8249
8250 static netdev_features_t tg3_fix_features(struct net_device *dev,
8251         netdev_features_t features)
8252 {
8253         struct tg3 *tp = netdev_priv(dev);
8254
8255         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8256                 features &= ~NETIF_F_ALL_TSO;
8257
8258         return features;
8259 }
8260
8261 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8262 {
8263         netdev_features_t changed = dev->features ^ features;
8264
8265         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8266                 tg3_set_loopback(dev, features);
8267
8268         return 0;
8269 }
8270
8271 static void tg3_rx_prodring_free(struct tg3 *tp,
8272                                  struct tg3_rx_prodring_set *tpr)
8273 {
8274         int i;
8275
8276         if (tpr != &tp->napi[0].prodring) {
8277                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8278                      i = (i + 1) & tp->rx_std_ring_mask)
8279                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8280                                         tp->rx_pkt_map_sz);
8281
8282                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8283                         for (i = tpr->rx_jmb_cons_idx;
8284                              i != tpr->rx_jmb_prod_idx;
8285                              i = (i + 1) & tp->rx_jmb_ring_mask) {
8286                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8287                                                 TG3_RX_JMB_MAP_SZ);
8288                         }
8289                 }
8290
8291                 return;
8292         }
8293
8294         for (i = 0; i <= tp->rx_std_ring_mask; i++)
8295                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8296                                 tp->rx_pkt_map_sz);
8297
8298         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8299                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8300                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8301                                         TG3_RX_JMB_MAP_SZ);
8302         }
8303 }
8304
8305 /* Initialize rx rings for packet processing.
8306  *
8307  * The chip has been shut down and the driver detached from
8308  * the networking, so no interrupts or new tx packets will
8309  * end up in the driver.  tp->{tx,}lock are held and thus
8310  * we may not sleep.
8311  */
8312 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8313                                  struct tg3_rx_prodring_set *tpr)
8314 {
8315         u32 i, rx_pkt_dma_sz;
8316
8317         tpr->rx_std_cons_idx = 0;
8318         tpr->rx_std_prod_idx = 0;
8319         tpr->rx_jmb_cons_idx = 0;
8320         tpr->rx_jmb_prod_idx = 0;
8321
8322         if (tpr != &tp->napi[0].prodring) {
8323                 memset(&tpr->rx_std_buffers[0], 0,
8324                        TG3_RX_STD_BUFF_RING_SIZE(tp));
8325                 if (tpr->rx_jmb_buffers)
8326                         memset(&tpr->rx_jmb_buffers[0], 0,
8327                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
8328                 goto done;
8329         }
8330
8331         /* Zero out all descriptors. */
8332         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8333
8334         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8335         if (tg3_flag(tp, 5780_CLASS) &&
8336             tp->dev->mtu > ETH_DATA_LEN)
8337                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8338         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8339
8340         /* Initialize invariants of the rings, we only set this
8341          * stuff once.  This works because the card does not
8342          * write into the rx buffer posting rings.
8343          */
8344         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8345                 struct tg3_rx_buffer_desc *rxd;
8346
8347                 rxd = &tpr->rx_std[i];
8348                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8349                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8350                 rxd->opaque = (RXD_OPAQUE_RING_STD |
8351                                (i << RXD_OPAQUE_INDEX_SHIFT));
8352         }
8353
8354         /* Now allocate fresh SKBs for each rx ring. */
8355         for (i = 0; i < tp->rx_pending; i++) {
8356                 unsigned int frag_size;
8357
8358                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8359                                       &frag_size) < 0) {
8360                         netdev_warn(tp->dev,
8361                                     "Using a smaller RX standard ring. Only "
8362                                     "%d out of %d buffers were allocated "
8363                                     "successfully\n", i, tp->rx_pending);
8364                         if (i == 0)
8365                                 goto initfail;
8366                         tp->rx_pending = i;
8367                         break;
8368                 }
8369         }
8370
8371         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8372                 goto done;
8373
8374         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8375
8376         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8377                 goto done;
8378
8379         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8380                 struct tg3_rx_buffer_desc *rxd;
8381
8382                 rxd = &tpr->rx_jmb[i].std;
8383                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8384                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8385                                   RXD_FLAG_JUMBO;
8386                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8387                        (i << RXD_OPAQUE_INDEX_SHIFT));
8388         }
8389
8390         for (i = 0; i < tp->rx_jumbo_pending; i++) {
8391                 unsigned int frag_size;
8392
8393                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8394                                       &frag_size) < 0) {
8395                         netdev_warn(tp->dev,
8396                                     "Using a smaller RX jumbo ring. Only %d "
8397                                     "out of %d buffers were allocated "
8398                                     "successfully\n", i, tp->rx_jumbo_pending);
8399                         if (i == 0)
8400                                 goto initfail;
8401                         tp->rx_jumbo_pending = i;
8402                         break;
8403                 }
8404         }
8405
8406 done:
8407         return 0;
8408
8409 initfail:
8410         tg3_rx_prodring_free(tp, tpr);
8411         return -ENOMEM;
8412 }
8413
8414 static void tg3_rx_prodring_fini(struct tg3 *tp,
8415                                  struct tg3_rx_prodring_set *tpr)
8416 {
8417         kfree(tpr->rx_std_buffers);
8418         tpr->rx_std_buffers = NULL;
8419         kfree(tpr->rx_jmb_buffers);
8420         tpr->rx_jmb_buffers = NULL;
8421         if (tpr->rx_std) {
8422                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8423                                   tpr->rx_std, tpr->rx_std_mapping);
8424                 tpr->rx_std = NULL;
8425         }
8426         if (tpr->rx_jmb) {
8427                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8428                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
8429                 tpr->rx_jmb = NULL;
8430         }
8431 }
8432
8433 static int tg3_rx_prodring_init(struct tg3 *tp,
8434                                 struct tg3_rx_prodring_set *tpr)
8435 {
8436         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8437                                       GFP_KERNEL);
8438         if (!tpr->rx_std_buffers)
8439                 return -ENOMEM;
8440
8441         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8442                                          TG3_RX_STD_RING_BYTES(tp),
8443                                          &tpr->rx_std_mapping,
8444                                          GFP_KERNEL);
8445         if (!tpr->rx_std)
8446                 goto err_out;
8447
8448         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8449                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8450                                               GFP_KERNEL);
8451                 if (!tpr->rx_jmb_buffers)
8452                         goto err_out;
8453
8454                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8455                                                  TG3_RX_JMB_RING_BYTES(tp),
8456                                                  &tpr->rx_jmb_mapping,
8457                                                  GFP_KERNEL);
8458                 if (!tpr->rx_jmb)
8459                         goto err_out;
8460         }
8461
8462         return 0;
8463
8464 err_out:
8465         tg3_rx_prodring_fini(tp, tpr);
8466         return -ENOMEM;
8467 }
8468
8469 /* Free up pending packets in all rx/tx rings.
8470  *
8471  * The chip has been shut down and the driver detached from
8472  * the networking, so no interrupts or new tx packets will
8473  * end up in the driver.  tp->{tx,}lock is not held and we are not
8474  * in an interrupt context and thus may sleep.
8475  */
8476 static void tg3_free_rings(struct tg3 *tp)
8477 {
8478         int i, j;
8479
8480         for (j = 0; j < tp->irq_cnt; j++) {
8481                 struct tg3_napi *tnapi = &tp->napi[j];
8482
8483                 tg3_rx_prodring_free(tp, &tnapi->prodring);
8484
8485                 if (!tnapi->tx_buffers)
8486                         continue;
8487
8488                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8489                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8490
8491                         if (!skb)
8492                                 continue;
8493
8494                         tg3_tx_skb_unmap(tnapi, i,
8495                                          skb_shinfo(skb)->nr_frags - 1);
8496
8497                         dev_kfree_skb_any(skb);
8498                 }
8499                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8500         }
8501 }
8502
8503 /* Initialize tx/rx rings for packet processing.
8504  *
8505  * The chip has been shut down and the driver detached from
8506  * the networking, so no interrupts or new tx packets will
8507  * end up in the driver.  tp->{tx,}lock are held and thus
8508  * we may not sleep.
8509  */
8510 static int tg3_init_rings(struct tg3 *tp)
8511 {
8512         int i;
8513
8514         /* Free up all the SKBs. */
8515         tg3_free_rings(tp);
8516
8517         for (i = 0; i < tp->irq_cnt; i++) {
8518                 struct tg3_napi *tnapi = &tp->napi[i];
8519
8520                 tnapi->last_tag = 0;
8521                 tnapi->last_irq_tag = 0;
8522                 tnapi->hw_status->status = 0;
8523                 tnapi->hw_status->status_tag = 0;
8524                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8525
8526                 tnapi->tx_prod = 0;
8527                 tnapi->tx_cons = 0;
8528                 if (tnapi->tx_ring)
8529                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8530
8531                 tnapi->rx_rcb_ptr = 0;
8532                 if (tnapi->rx_rcb)
8533                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8534
8535                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8536                         tg3_free_rings(tp);
8537                         return -ENOMEM;
8538                 }
8539         }
8540
8541         return 0;
8542 }
8543
8544 static void tg3_mem_tx_release(struct tg3 *tp)
8545 {
8546         int i;
8547
8548         for (i = 0; i < tp->irq_max; i++) {
8549                 struct tg3_napi *tnapi = &tp->napi[i];
8550
8551                 if (tnapi->tx_ring) {
8552                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8553                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
8554                         tnapi->tx_ring = NULL;
8555                 }
8556
8557                 kfree(tnapi->tx_buffers);
8558                 tnapi->tx_buffers = NULL;
8559         }
8560 }
8561
8562 static int tg3_mem_tx_acquire(struct tg3 *tp)
8563 {
8564         int i;
8565         struct tg3_napi *tnapi = &tp->napi[0];
8566
8567         /* If multivector TSS is enabled, vector 0 does not handle
8568          * tx interrupts.  Don't allocate any resources for it.
8569          */
8570         if (tg3_flag(tp, ENABLE_TSS))
8571                 tnapi++;
8572
8573         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8574                 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8575                                             TG3_TX_RING_SIZE, GFP_KERNEL);
8576                 if (!tnapi->tx_buffers)
8577                         goto err_out;
8578
8579                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8580                                                     TG3_TX_RING_BYTES,
8581                                                     &tnapi->tx_desc_mapping,
8582                                                     GFP_KERNEL);
8583                 if (!tnapi->tx_ring)
8584                         goto err_out;
8585         }
8586
8587         return 0;
8588
8589 err_out:
8590         tg3_mem_tx_release(tp);
8591         return -ENOMEM;
8592 }
8593
8594 static void tg3_mem_rx_release(struct tg3 *tp)
8595 {
8596         int i;
8597
8598         for (i = 0; i < tp->irq_max; i++) {
8599                 struct tg3_napi *tnapi = &tp->napi[i];
8600
8601                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8602
8603                 if (!tnapi->rx_rcb)
8604                         continue;
8605
8606                 dma_free_coherent(&tp->pdev->dev,
8607                                   TG3_RX_RCB_RING_BYTES(tp),
8608                                   tnapi->rx_rcb,
8609                                   tnapi->rx_rcb_mapping);
8610                 tnapi->rx_rcb = NULL;
8611         }
8612 }
8613
8614 static int tg3_mem_rx_acquire(struct tg3 *tp)
8615 {
8616         unsigned int i, limit;
8617
8618         limit = tp->rxq_cnt;
8619
8620         /* If RSS is enabled, we need a (dummy) producer ring
8621          * set on vector zero.  This is the true hw prodring.
8622          */
8623         if (tg3_flag(tp, ENABLE_RSS))
8624                 limit++;
8625
8626         for (i = 0; i < limit; i++) {
8627                 struct tg3_napi *tnapi = &tp->napi[i];
8628
8629                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8630                         goto err_out;
8631
8632                 /* If multivector RSS is enabled, vector 0
8633                  * does not handle rx or tx interrupts.
8634                  * Don't allocate any resources for it.
8635                  */
8636                 if (!i && tg3_flag(tp, ENABLE_RSS))
8637                         continue;
8638
8639                 tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8640                                                     TG3_RX_RCB_RING_BYTES(tp),
8641                                                     &tnapi->rx_rcb_mapping,
8642                                                     GFP_KERNEL);
8643                 if (!tnapi->rx_rcb)
8644                         goto err_out;
8645         }
8646
8647         return 0;
8648
8649 err_out:
8650         tg3_mem_rx_release(tp);
8651         return -ENOMEM;
8652 }
8653
8654 /*
8655  * Must not be invoked with interrupt sources disabled and
8656  * the hardware shutdown down.
8657  */
8658 static void tg3_free_consistent(struct tg3 *tp)
8659 {
8660         int i;
8661
8662         for (i = 0; i < tp->irq_cnt; i++) {
8663                 struct tg3_napi *tnapi = &tp->napi[i];
8664
8665                 if (tnapi->hw_status) {
8666                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8667                                           tnapi->hw_status,
8668                                           tnapi->status_mapping);
8669                         tnapi->hw_status = NULL;
8670                 }
8671         }
8672
8673         tg3_mem_rx_release(tp);
8674         tg3_mem_tx_release(tp);
8675
8676         if (tp->hw_stats) {
8677                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8678                                   tp->hw_stats, tp->stats_mapping);
8679                 tp->hw_stats = NULL;
8680         }
8681 }
8682
8683 /*
8684  * Must not be invoked with interrupt sources disabled and
8685  * the hardware shutdown down.  Can sleep.
8686  */
8687 static int tg3_alloc_consistent(struct tg3 *tp)
8688 {
8689         int i;
8690
8691         tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8692                                            sizeof(struct tg3_hw_stats),
8693                                            &tp->stats_mapping, GFP_KERNEL);
8694         if (!tp->hw_stats)
8695                 goto err_out;
8696
8697         for (i = 0; i < tp->irq_cnt; i++) {
8698                 struct tg3_napi *tnapi = &tp->napi[i];
8699                 struct tg3_hw_status *sblk;
8700
8701                 tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8702                                                        TG3_HW_STATUS_SIZE,
8703                                                        &tnapi->status_mapping,
8704                                                        GFP_KERNEL);
8705                 if (!tnapi->hw_status)
8706                         goto err_out;
8707
8708                 sblk = tnapi->hw_status;
8709
8710                 if (tg3_flag(tp, ENABLE_RSS)) {
8711                         u16 *prodptr = NULL;
8712
8713                         /*
8714                          * When RSS is enabled, the status block format changes
8715                          * slightly.  The "rx_jumbo_consumer", "reserved",
8716                          * and "rx_mini_consumer" members get mapped to the
8717                          * other three rx return ring producer indexes.
8718                          */
8719                         switch (i) {
8720                         case 1:
8721                                 prodptr = &sblk->idx[0].rx_producer;
8722                                 break;
8723                         case 2:
8724                                 prodptr = &sblk->rx_jumbo_consumer;
8725                                 break;
8726                         case 3:
8727                                 prodptr = &sblk->reserved;
8728                                 break;
8729                         case 4:
8730                                 prodptr = &sblk->rx_mini_consumer;
8731                                 break;
8732                         }
8733                         tnapi->rx_rcb_prod_idx = prodptr;
8734                 } else {
8735                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8736                 }
8737         }
8738
8739         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8740                 goto err_out;
8741
8742         return 0;
8743
8744 err_out:
8745         tg3_free_consistent(tp);
8746         return -ENOMEM;
8747 }
8748
8749 #define MAX_WAIT_CNT 1000
8750
8751 /* To stop a block, clear the enable bit and poll till it
8752  * clears.  tp->lock is held.
8753  */
8754 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8755 {
8756         unsigned int i;
8757         u32 val;
8758
8759         if (tg3_flag(tp, 5705_PLUS)) {
8760                 switch (ofs) {
8761                 case RCVLSC_MODE:
8762                 case DMAC_MODE:
8763                 case MBFREE_MODE:
8764                 case BUFMGR_MODE:
8765                 case MEMARB_MODE:
8766                         /* We can't enable/disable these bits of the
8767                          * 5705/5750, just say success.
8768                          */
8769                         return 0;
8770
8771                 default:
8772                         break;
8773                 }
8774         }
8775
8776         val = tr32(ofs);
8777         val &= ~enable_bit;
8778         tw32_f(ofs, val);
8779
8780         for (i = 0; i < MAX_WAIT_CNT; i++) {
8781                 if (pci_channel_offline(tp->pdev)) {
8782                         dev_err(&tp->pdev->dev,
8783                                 "tg3_stop_block device offline, "
8784                                 "ofs=%lx enable_bit=%x\n",
8785                                 ofs, enable_bit);
8786                         return -ENODEV;
8787                 }
8788
8789                 udelay(100);
8790                 val = tr32(ofs);
8791                 if ((val & enable_bit) == 0)
8792                         break;
8793         }
8794
8795         if (i == MAX_WAIT_CNT && !silent) {
8796                 dev_err(&tp->pdev->dev,
8797                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8798                         ofs, enable_bit);
8799                 return -ENODEV;
8800         }
8801
8802         return 0;
8803 }
8804
8805 /* tp->lock is held. */
8806 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8807 {
8808         int i, err;
8809
8810         tg3_disable_ints(tp);
8811
8812         if (pci_channel_offline(tp->pdev)) {
8813                 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8814                 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8815                 err = -ENODEV;
8816                 goto err_no_dev;
8817         }
8818
8819         tp->rx_mode &= ~RX_MODE_ENABLE;
8820         tw32_f(MAC_RX_MODE, tp->rx_mode);
8821         udelay(10);
8822
8823         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8824         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8825         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8826         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8827         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8828         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8829
8830         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8831         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8832         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8833         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8834         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8835         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8836         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8837
8838         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8839         tw32_f(MAC_MODE, tp->mac_mode);
8840         udelay(40);
8841
8842         tp->tx_mode &= ~TX_MODE_ENABLE;
8843         tw32_f(MAC_TX_MODE, tp->tx_mode);
8844
8845         for (i = 0; i < MAX_WAIT_CNT; i++) {
8846                 udelay(100);
8847                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8848                         break;
8849         }
8850         if (i >= MAX_WAIT_CNT) {
8851                 dev_err(&tp->pdev->dev,
8852                         "%s timed out, TX_MODE_ENABLE will not clear "
8853                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8854                 err |= -ENODEV;
8855         }
8856
8857         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8858         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8859         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8860
8861         tw32(FTQ_RESET, 0xffffffff);
8862         tw32(FTQ_RESET, 0x00000000);
8863
8864         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8865         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8866
8867 err_no_dev:
8868         for (i = 0; i < tp->irq_cnt; i++) {
8869                 struct tg3_napi *tnapi = &tp->napi[i];
8870                 if (tnapi->hw_status)
8871                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8872         }
8873
8874         return err;
8875 }
8876
8877 /* Save PCI command register before chip reset */
8878 static void tg3_save_pci_state(struct tg3 *tp)
8879 {
8880         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8881 }
8882
8883 /* Restore PCI state after chip reset */
8884 static void tg3_restore_pci_state(struct tg3 *tp)
8885 {
8886         u32 val;
8887
8888         /* Re-enable indirect register accesses. */
8889         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8890                                tp->misc_host_ctrl);
8891
8892         /* Set MAX PCI retry to zero. */
8893         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8894         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8895             tg3_flag(tp, PCIX_MODE))
8896                 val |= PCISTATE_RETRY_SAME_DMA;
8897         /* Allow reads and writes to the APE register and memory space. */
8898         if (tg3_flag(tp, ENABLE_APE))
8899                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8900                        PCISTATE_ALLOW_APE_SHMEM_WR |
8901                        PCISTATE_ALLOW_APE_PSPACE_WR;
8902         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8903
8904         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8905
8906         if (!tg3_flag(tp, PCI_EXPRESS)) {
8907                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8908                                       tp->pci_cacheline_sz);
8909                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8910                                       tp->pci_lat_timer);
8911         }
8912
8913         /* Make sure PCI-X relaxed ordering bit is clear. */
8914         if (tg3_flag(tp, PCIX_MODE)) {
8915                 u16 pcix_cmd;
8916
8917                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8918                                      &pcix_cmd);
8919                 pcix_cmd &= ~PCI_X_CMD_ERO;
8920                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8921                                       pcix_cmd);
8922         }
8923
8924         if (tg3_flag(tp, 5780_CLASS)) {
8925
8926                 /* Chip reset on 5780 will reset MSI enable bit,
8927                  * so need to restore it.
8928                  */
8929                 if (tg3_flag(tp, USING_MSI)) {
8930                         u16 ctrl;
8931
8932                         pci_read_config_word(tp->pdev,
8933                                              tp->msi_cap + PCI_MSI_FLAGS,
8934                                              &ctrl);
8935                         pci_write_config_word(tp->pdev,
8936                                               tp->msi_cap + PCI_MSI_FLAGS,
8937                                               ctrl | PCI_MSI_FLAGS_ENABLE);
8938                         val = tr32(MSGINT_MODE);
8939                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8940                 }
8941         }
8942 }
8943
8944 static void tg3_override_clk(struct tg3 *tp)
8945 {
8946         u32 val;
8947
8948         switch (tg3_asic_rev(tp)) {
8949         case ASIC_REV_5717:
8950                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
8951                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
8952                      TG3_CPMU_MAC_ORIDE_ENABLE);
8953                 break;
8954
8955         case ASIC_REV_5719:
8956         case ASIC_REV_5720:
8957                 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8958                 break;
8959
8960         default:
8961                 return;
8962         }
8963 }
8964
8965 static void tg3_restore_clk(struct tg3 *tp)
8966 {
8967         u32 val;
8968
8969         switch (tg3_asic_rev(tp)) {
8970         case ASIC_REV_5717:
8971                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
8972                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
8973                      val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
8974                 break;
8975
8976         case ASIC_REV_5719:
8977         case ASIC_REV_5720:
8978                 val = tr32(TG3_CPMU_CLCK_ORIDE);
8979                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8980                 break;
8981
8982         default:
8983                 return;
8984         }
8985 }
8986
8987 /* tp->lock is held. */
8988 static int tg3_chip_reset(struct tg3 *tp)
8989 {
8990         u32 val;
8991         void (*write_op)(struct tg3 *, u32, u32);
8992         int i, err;
8993
8994         if (!pci_device_is_present(tp->pdev))
8995                 return -ENODEV;
8996
8997         tg3_nvram_lock(tp);
8998
8999         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9000
9001         /* No matching tg3_nvram_unlock() after this because
9002          * chip reset below will undo the nvram lock.
9003          */
9004         tp->nvram_lock_cnt = 0;
9005
9006         /* GRC_MISC_CFG core clock reset will clear the memory
9007          * enable bit in PCI register 4 and the MSI enable bit
9008          * on some chips, so we save relevant registers here.
9009          */
9010         tg3_save_pci_state(tp);
9011
9012         if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9013             tg3_flag(tp, 5755_PLUS))
9014                 tw32(GRC_FASTBOOT_PC, 0);
9015
9016         /*
9017          * We must avoid the readl() that normally takes place.
9018          * It locks machines, causes machine checks, and other
9019          * fun things.  So, temporarily disable the 5701
9020          * hardware workaround, while we do the reset.
9021          */
9022         write_op = tp->write32;
9023         if (write_op == tg3_write_flush_reg32)
9024                 tp->write32 = tg3_write32;
9025
9026         /* Prevent the irq handler from reading or writing PCI registers
9027          * during chip reset when the memory enable bit in the PCI command
9028          * register may be cleared.  The chip does not generate interrupt
9029          * at this time, but the irq handler may still be called due to irq
9030          * sharing or irqpoll.
9031          */
9032         tg3_flag_set(tp, CHIP_RESETTING);
9033         for (i = 0; i < tp->irq_cnt; i++) {
9034                 struct tg3_napi *tnapi = &tp->napi[i];
9035                 if (tnapi->hw_status) {
9036                         tnapi->hw_status->status = 0;
9037                         tnapi->hw_status->status_tag = 0;
9038                 }
9039                 tnapi->last_tag = 0;
9040                 tnapi->last_irq_tag = 0;
9041         }
9042         smp_mb();
9043
9044         for (i = 0; i < tp->irq_cnt; i++)
9045                 synchronize_irq(tp->napi[i].irq_vec);
9046
9047         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9048                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9049                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9050         }
9051
9052         /* do the reset */
9053         val = GRC_MISC_CFG_CORECLK_RESET;
9054
9055         if (tg3_flag(tp, PCI_EXPRESS)) {
9056                 /* Force PCIe 1.0a mode */
9057                 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9058                     !tg3_flag(tp, 57765_PLUS) &&
9059                     tr32(TG3_PCIE_PHY_TSTCTL) ==
9060                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9061                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9062
9063                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9064                         tw32(GRC_MISC_CFG, (1 << 29));
9065                         val |= (1 << 29);
9066                 }
9067         }
9068
9069         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9070                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9071                 tw32(GRC_VCPU_EXT_CTRL,
9072                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9073         }
9074
9075         /* Set the clock to the highest frequency to avoid timeouts. With link
9076          * aware mode, the clock speed could be slow and bootcode does not
9077          * complete within the expected time. Override the clock to allow the
9078          * bootcode to finish sooner and then restore it.
9079          */
9080         tg3_override_clk(tp);
9081
9082         /* Manage gphy power for all CPMU absent PCIe devices. */
9083         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9084                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9085
9086         tw32(GRC_MISC_CFG, val);
9087
9088         /* restore 5701 hardware bug workaround write method */
9089         tp->write32 = write_op;
9090
9091         /* Unfortunately, we have to delay before the PCI read back.
9092          * Some 575X chips even will not respond to a PCI cfg access
9093          * when the reset command is given to the chip.
9094          *
9095          * How do these hardware designers expect things to work
9096          * properly if the PCI write is posted for a long period
9097          * of time?  It is always necessary to have some method by
9098          * which a register read back can occur to push the write
9099          * out which does the reset.
9100          *
9101          * For most tg3 variants the trick below was working.
9102          * Ho hum...
9103          */
9104         udelay(120);
9105
9106         /* Flush PCI posted writes.  The normal MMIO registers
9107          * are inaccessible at this time so this is the only
9108          * way to make this reliably (actually, this is no longer
9109          * the case, see above).  I tried to use indirect
9110          * register read/write but this upset some 5701 variants.
9111          */
9112         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9113
9114         udelay(120);
9115
9116         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9117                 u16 val16;
9118
9119                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9120                         int j;
9121                         u32 cfg_val;
9122
9123                         /* Wait for link training to complete.  */
9124                         for (j = 0; j < 5000; j++)
9125                                 udelay(100);
9126
9127                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9128                         pci_write_config_dword(tp->pdev, 0xc4,
9129                                                cfg_val | (1 << 15));
9130                 }
9131
9132                 /* Clear the "no snoop" and "relaxed ordering" bits. */
9133                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9134                 /*
9135                  * Older PCIe devices only support the 128 byte
9136                  * MPS setting.  Enforce the restriction.
9137                  */
9138                 if (!tg3_flag(tp, CPMU_PRESENT))
9139                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9140                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9141
9142                 /* Clear error status */
9143                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9144                                       PCI_EXP_DEVSTA_CED |
9145                                       PCI_EXP_DEVSTA_NFED |
9146                                       PCI_EXP_DEVSTA_FED |
9147                                       PCI_EXP_DEVSTA_URD);
9148         }
9149
9150         tg3_restore_pci_state(tp);
9151
9152         tg3_flag_clear(tp, CHIP_RESETTING);
9153         tg3_flag_clear(tp, ERROR_PROCESSED);
9154
9155         val = 0;
9156         if (tg3_flag(tp, 5780_CLASS))
9157                 val = tr32(MEMARB_MODE);
9158         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9159
9160         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9161                 tg3_stop_fw(tp);
9162                 tw32(0x5000, 0x400);
9163         }
9164
9165         if (tg3_flag(tp, IS_SSB_CORE)) {
9166                 /*
9167                  * BCM4785: In order to avoid repercussions from using
9168                  * potentially defective internal ROM, stop the Rx RISC CPU,
9169                  * which is not required.
9170                  */
9171                 tg3_stop_fw(tp);
9172                 tg3_halt_cpu(tp, RX_CPU_BASE);
9173         }
9174
9175         err = tg3_poll_fw(tp);
9176         if (err)
9177                 return err;
9178
9179         tw32(GRC_MODE, tp->grc_mode);
9180
9181         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9182                 val = tr32(0xc4);
9183
9184                 tw32(0xc4, val | (1 << 15));
9185         }
9186
9187         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9188             tg3_asic_rev(tp) == ASIC_REV_5705) {
9189                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9190                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9191                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9192                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9193         }
9194
9195         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9196                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9197                 val = tp->mac_mode;
9198         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9199                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9200                 val = tp->mac_mode;
9201         } else
9202                 val = 0;
9203
9204         tw32_f(MAC_MODE, val);
9205         udelay(40);
9206
9207         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9208
9209         tg3_mdio_start(tp);
9210
9211         if (tg3_flag(tp, PCI_EXPRESS) &&
9212             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9213             tg3_asic_rev(tp) != ASIC_REV_5785 &&
9214             !tg3_flag(tp, 57765_PLUS)) {
9215                 val = tr32(0x7c00);
9216
9217                 tw32(0x7c00, val | (1 << 25));
9218         }
9219
9220         tg3_restore_clk(tp);
9221
9222         /* Reprobe ASF enable state.  */
9223         tg3_flag_clear(tp, ENABLE_ASF);
9224         tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9225                            TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9226
9227         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9228         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9229         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9230                 u32 nic_cfg;
9231
9232                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9233                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9234                         tg3_flag_set(tp, ENABLE_ASF);
9235                         tp->last_event_jiffies = jiffies;
9236                         if (tg3_flag(tp, 5750_PLUS))
9237                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9238
9239                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9240                         if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9241                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9242                         if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9243                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9244                 }
9245         }
9246
9247         return 0;
9248 }
9249
9250 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9251 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9252 static void __tg3_set_rx_mode(struct net_device *);
9253
9254 /* tp->lock is held. */
9255 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9256 {
9257         int err;
9258
9259         tg3_stop_fw(tp);
9260
9261         tg3_write_sig_pre_reset(tp, kind);
9262
9263         tg3_abort_hw(tp, silent);
9264         err = tg3_chip_reset(tp);
9265
9266         __tg3_set_mac_addr(tp, false);
9267
9268         tg3_write_sig_legacy(tp, kind);
9269         tg3_write_sig_post_reset(tp, kind);
9270
9271         if (tp->hw_stats) {
9272                 /* Save the stats across chip resets... */
9273                 tg3_get_nstats(tp, &tp->net_stats_prev);
9274                 tg3_get_estats(tp, &tp->estats_prev);
9275
9276                 /* And make sure the next sample is new data */
9277                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9278         }
9279
9280         return err;
9281 }
9282
9283 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9284 {
9285         struct tg3 *tp = netdev_priv(dev);
9286         struct sockaddr *addr = p;
9287         int err = 0;
9288         bool skip_mac_1 = false;
9289
9290         if (!is_valid_ether_addr(addr->sa_data))
9291                 return -EADDRNOTAVAIL;
9292
9293         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9294
9295         if (!netif_running(dev))
9296                 return 0;
9297
9298         if (tg3_flag(tp, ENABLE_ASF)) {
9299                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9300
9301                 addr0_high = tr32(MAC_ADDR_0_HIGH);
9302                 addr0_low = tr32(MAC_ADDR_0_LOW);
9303                 addr1_high = tr32(MAC_ADDR_1_HIGH);
9304                 addr1_low = tr32(MAC_ADDR_1_LOW);
9305
9306                 /* Skip MAC addr 1 if ASF is using it. */
9307                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9308                     !(addr1_high == 0 && addr1_low == 0))
9309                         skip_mac_1 = true;
9310         }
9311         spin_lock_bh(&tp->lock);
9312         __tg3_set_mac_addr(tp, skip_mac_1);
9313         __tg3_set_rx_mode(dev);
9314         spin_unlock_bh(&tp->lock);
9315
9316         return err;
9317 }
9318
9319 /* tp->lock is held. */
9320 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9321                            dma_addr_t mapping, u32 maxlen_flags,
9322                            u32 nic_addr)
9323 {
9324         tg3_write_mem(tp,
9325                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9326                       ((u64) mapping >> 32));
9327         tg3_write_mem(tp,
9328                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9329                       ((u64) mapping & 0xffffffff));
9330         tg3_write_mem(tp,
9331                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9332                        maxlen_flags);
9333
9334         if (!tg3_flag(tp, 5705_PLUS))
9335                 tg3_write_mem(tp,
9336                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9337                               nic_addr);
9338 }
9339
9340
9341 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9342 {
9343         int i = 0;
9344
9345         if (!tg3_flag(tp, ENABLE_TSS)) {
9346                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9347                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9348                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9349         } else {
9350                 tw32(HOSTCC_TXCOL_TICKS, 0);
9351                 tw32(HOSTCC_TXMAX_FRAMES, 0);
9352                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9353
9354                 for (; i < tp->txq_cnt; i++) {
9355                         u32 reg;
9356
9357                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9358                         tw32(reg, ec->tx_coalesce_usecs);
9359                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9360                         tw32(reg, ec->tx_max_coalesced_frames);
9361                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9362                         tw32(reg, ec->tx_max_coalesced_frames_irq);
9363                 }
9364         }
9365
9366         for (; i < tp->irq_max - 1; i++) {
9367                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9368                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9369                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9370         }
9371 }
9372
9373 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9374 {
9375         int i = 0;
9376         u32 limit = tp->rxq_cnt;
9377
9378         if (!tg3_flag(tp, ENABLE_RSS)) {
9379                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9380                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9381                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9382                 limit--;
9383         } else {
9384                 tw32(HOSTCC_RXCOL_TICKS, 0);
9385                 tw32(HOSTCC_RXMAX_FRAMES, 0);
9386                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9387         }
9388
9389         for (; i < limit; i++) {
9390                 u32 reg;
9391
9392                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9393                 tw32(reg, ec->rx_coalesce_usecs);
9394                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9395                 tw32(reg, ec->rx_max_coalesced_frames);
9396                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9397                 tw32(reg, ec->rx_max_coalesced_frames_irq);
9398         }
9399
9400         for (; i < tp->irq_max - 1; i++) {
9401                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9402                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9403                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9404         }
9405 }
9406
9407 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9408 {
9409         tg3_coal_tx_init(tp, ec);
9410         tg3_coal_rx_init(tp, ec);
9411
9412         if (!tg3_flag(tp, 5705_PLUS)) {
9413                 u32 val = ec->stats_block_coalesce_usecs;
9414
9415                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9416                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9417
9418                 if (!tp->link_up)
9419                         val = 0;
9420
9421                 tw32(HOSTCC_STAT_COAL_TICKS, val);
9422         }
9423 }
9424
9425 /* tp->lock is held. */
9426 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9427 {
9428         u32 txrcb, limit;
9429
9430         /* Disable all transmit rings but the first. */
9431         if (!tg3_flag(tp, 5705_PLUS))
9432                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9433         else if (tg3_flag(tp, 5717_PLUS))
9434                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9435         else if (tg3_flag(tp, 57765_CLASS) ||
9436                  tg3_asic_rev(tp) == ASIC_REV_5762)
9437                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9438         else
9439                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9440
9441         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9442              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9443                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9444                               BDINFO_FLAGS_DISABLED);
9445 }
9446
9447 /* tp->lock is held. */
9448 static void tg3_tx_rcbs_init(struct tg3 *tp)
9449 {
9450         int i = 0;
9451         u32 txrcb = NIC_SRAM_SEND_RCB;
9452
9453         if (tg3_flag(tp, ENABLE_TSS))
9454                 i++;
9455
9456         for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9457                 struct tg3_napi *tnapi = &tp->napi[i];
9458
9459                 if (!tnapi->tx_ring)
9460                         continue;
9461
9462                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9463                                (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9464                                NIC_SRAM_TX_BUFFER_DESC);
9465         }
9466 }
9467
9468 /* tp->lock is held. */
9469 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9470 {
9471         u32 rxrcb, limit;
9472
9473         /* Disable all receive return rings but the first. */
9474         if (tg3_flag(tp, 5717_PLUS))
9475                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9476         else if (!tg3_flag(tp, 5705_PLUS))
9477                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9478         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9479                  tg3_asic_rev(tp) == ASIC_REV_5762 ||
9480                  tg3_flag(tp, 57765_CLASS))
9481                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9482         else
9483                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9484
9485         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9486              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9487                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9488                               BDINFO_FLAGS_DISABLED);
9489 }
9490
9491 /* tp->lock is held. */
9492 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9493 {
9494         int i = 0;
9495         u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9496
9497         if (tg3_flag(tp, ENABLE_RSS))
9498                 i++;
9499
9500         for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9501                 struct tg3_napi *tnapi = &tp->napi[i];
9502
9503                 if (!tnapi->rx_rcb)
9504                         continue;
9505
9506                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9507                                (tp->rx_ret_ring_mask + 1) <<
9508                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9509         }
9510 }
9511
9512 /* tp->lock is held. */
9513 static void tg3_rings_reset(struct tg3 *tp)
9514 {
9515         int i;
9516         u32 stblk;
9517         struct tg3_napi *tnapi = &tp->napi[0];
9518
9519         tg3_tx_rcbs_disable(tp);
9520
9521         tg3_rx_ret_rcbs_disable(tp);
9522
9523         /* Disable interrupts */
9524         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9525         tp->napi[0].chk_msi_cnt = 0;
9526         tp->napi[0].last_rx_cons = 0;
9527         tp->napi[0].last_tx_cons = 0;
9528
9529         /* Zero mailbox registers. */
9530         if (tg3_flag(tp, SUPPORT_MSIX)) {
9531                 for (i = 1; i < tp->irq_max; i++) {
9532                         tp->napi[i].tx_prod = 0;
9533                         tp->napi[i].tx_cons = 0;
9534                         if (tg3_flag(tp, ENABLE_TSS))
9535                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
9536                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
9537                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9538                         tp->napi[i].chk_msi_cnt = 0;
9539                         tp->napi[i].last_rx_cons = 0;
9540                         tp->napi[i].last_tx_cons = 0;
9541                 }
9542                 if (!tg3_flag(tp, ENABLE_TSS))
9543                         tw32_mailbox(tp->napi[0].prodmbox, 0);
9544         } else {
9545                 tp->napi[0].tx_prod = 0;
9546                 tp->napi[0].tx_cons = 0;
9547                 tw32_mailbox(tp->napi[0].prodmbox, 0);
9548                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9549         }
9550
9551         /* Make sure the NIC-based send BD rings are disabled. */
9552         if (!tg3_flag(tp, 5705_PLUS)) {
9553                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9554                 for (i = 0; i < 16; i++)
9555                         tw32_tx_mbox(mbox + i * 8, 0);
9556         }
9557
9558         /* Clear status block in ram. */
9559         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9560
9561         /* Set status block DMA address */
9562         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9563              ((u64) tnapi->status_mapping >> 32));
9564         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9565              ((u64) tnapi->status_mapping & 0xffffffff));
9566
9567         stblk = HOSTCC_STATBLCK_RING1;
9568
9569         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9570                 u64 mapping = (u64)tnapi->status_mapping;
9571                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9572                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9573                 stblk += 8;
9574
9575                 /* Clear status block in ram. */
9576                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9577         }
9578
9579         tg3_tx_rcbs_init(tp);
9580         tg3_rx_ret_rcbs_init(tp);
9581 }
9582
9583 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9584 {
9585         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9586
9587         if (!tg3_flag(tp, 5750_PLUS) ||
9588             tg3_flag(tp, 5780_CLASS) ||
9589             tg3_asic_rev(tp) == ASIC_REV_5750 ||
9590             tg3_asic_rev(tp) == ASIC_REV_5752 ||
9591             tg3_flag(tp, 57765_PLUS))
9592                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9593         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9594                  tg3_asic_rev(tp) == ASIC_REV_5787)
9595                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9596         else
9597                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9598
9599         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9600         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9601
9602         val = min(nic_rep_thresh, host_rep_thresh);
9603         tw32(RCVBDI_STD_THRESH, val);
9604
9605         if (tg3_flag(tp, 57765_PLUS))
9606                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9607
9608         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9609                 return;
9610
9611         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9612
9613         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9614
9615         val = min(bdcache_maxcnt / 2, host_rep_thresh);
9616         tw32(RCVBDI_JUMBO_THRESH, val);
9617
9618         if (tg3_flag(tp, 57765_PLUS))
9619                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9620 }
9621
9622 static inline u32 calc_crc(unsigned char *buf, int len)
9623 {
9624         u32 reg;
9625         u32 tmp;
9626         int j, k;
9627
9628         reg = 0xffffffff;
9629
9630         for (j = 0; j < len; j++) {
9631                 reg ^= buf[j];
9632
9633                 for (k = 0; k < 8; k++) {
9634                         tmp = reg & 0x01;
9635
9636                         reg >>= 1;
9637
9638                         if (tmp)
9639                                 reg ^= 0xedb88320;
9640                 }
9641         }
9642
9643         return ~reg;
9644 }
9645
9646 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9647 {
9648         /* accept or reject all multicast frames */
9649         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9650         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9651         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9652         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9653 }
9654
9655 static void __tg3_set_rx_mode(struct net_device *dev)
9656 {
9657         struct tg3 *tp = netdev_priv(dev);
9658         u32 rx_mode;
9659
9660         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9661                                   RX_MODE_KEEP_VLAN_TAG);
9662
9663 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9664         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9665          * flag clear.
9666          */
9667         if (!tg3_flag(tp, ENABLE_ASF))
9668                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9669 #endif
9670
9671         if (dev->flags & IFF_PROMISC) {
9672                 /* Promiscuous mode. */
9673                 rx_mode |= RX_MODE_PROMISC;
9674         } else if (dev->flags & IFF_ALLMULTI) {
9675                 /* Accept all multicast. */
9676                 tg3_set_multi(tp, 1);
9677         } else if (netdev_mc_empty(dev)) {
9678                 /* Reject all multicast. */
9679                 tg3_set_multi(tp, 0);
9680         } else {
9681                 /* Accept one or more multicast(s). */
9682                 struct netdev_hw_addr *ha;
9683                 u32 mc_filter[4] = { 0, };
9684                 u32 regidx;
9685                 u32 bit;
9686                 u32 crc;
9687
9688                 netdev_for_each_mc_addr(ha, dev) {
9689                         crc = calc_crc(ha->addr, ETH_ALEN);
9690                         bit = ~crc & 0x7f;
9691                         regidx = (bit & 0x60) >> 5;
9692                         bit &= 0x1f;
9693                         mc_filter[regidx] |= (1 << bit);
9694                 }
9695
9696                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9697                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9698                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9699                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9700         }
9701
9702         if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9703                 rx_mode |= RX_MODE_PROMISC;
9704         } else if (!(dev->flags & IFF_PROMISC)) {
9705                 /* Add all entries into to the mac addr filter list */
9706                 int i = 0;
9707                 struct netdev_hw_addr *ha;
9708
9709                 netdev_for_each_uc_addr(ha, dev) {
9710                         __tg3_set_one_mac_addr(tp, ha->addr,
9711                                                i + TG3_UCAST_ADDR_IDX(tp));
9712                         i++;
9713                 }
9714         }
9715
9716         if (rx_mode != tp->rx_mode) {
9717                 tp->rx_mode = rx_mode;
9718                 tw32_f(MAC_RX_MODE, rx_mode);
9719                 udelay(10);
9720         }
9721 }
9722
9723 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9724 {
9725         int i;
9726
9727         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9728                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9729 }
9730
9731 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9732 {
9733         int i;
9734
9735         if (!tg3_flag(tp, SUPPORT_MSIX))
9736                 return;
9737
9738         if (tp->rxq_cnt == 1) {
9739                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9740                 return;
9741         }
9742
9743         /* Validate table against current IRQ count */
9744         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9745                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9746                         break;
9747         }
9748
9749         if (i != TG3_RSS_INDIR_TBL_SIZE)
9750                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9751 }
9752
9753 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9754 {
9755         int i = 0;
9756         u32 reg = MAC_RSS_INDIR_TBL_0;
9757
9758         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9759                 u32 val = tp->rss_ind_tbl[i];
9760                 i++;
9761                 for (; i % 8; i++) {
9762                         val <<= 4;
9763                         val |= tp->rss_ind_tbl[i];
9764                 }
9765                 tw32(reg, val);
9766                 reg += 4;
9767         }
9768 }
9769
9770 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9771 {
9772         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9773                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9774         else
9775                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9776 }
9777
9778 /* tp->lock is held. */
9779 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9780 {
9781         u32 val, rdmac_mode;
9782         int i, err, limit;
9783         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9784
9785         tg3_disable_ints(tp);
9786
9787         tg3_stop_fw(tp);
9788
9789         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9790
9791         if (tg3_flag(tp, INIT_COMPLETE))
9792                 tg3_abort_hw(tp, 1);
9793
9794         if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9795             !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9796                 tg3_phy_pull_config(tp);
9797                 tg3_eee_pull_config(tp, NULL);
9798                 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9799         }
9800
9801         /* Enable MAC control of LPI */
9802         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9803                 tg3_setup_eee(tp);
9804
9805         if (reset_phy)
9806                 tg3_phy_reset(tp);
9807
9808         err = tg3_chip_reset(tp);
9809         if (err)
9810                 return err;
9811
9812         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9813
9814         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9815                 val = tr32(TG3_CPMU_CTRL);
9816                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9817                 tw32(TG3_CPMU_CTRL, val);
9818
9819                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9820                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9821                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9822                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9823
9824                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9825                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9826                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9827                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9828
9829                 val = tr32(TG3_CPMU_HST_ACC);
9830                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9831                 val |= CPMU_HST_ACC_MACCLK_6_25;
9832                 tw32(TG3_CPMU_HST_ACC, val);
9833         }
9834
9835         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9836                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9837                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9838                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9839                 tw32(PCIE_PWR_MGMT_THRESH, val);
9840
9841                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9842                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9843
9844                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9845
9846                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9847                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9848         }
9849
9850         if (tg3_flag(tp, L1PLLPD_EN)) {
9851                 u32 grc_mode = tr32(GRC_MODE);
9852
9853                 /* Access the lower 1K of PL PCIE block registers. */
9854                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9855                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9856
9857                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9858                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9859                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9860
9861                 tw32(GRC_MODE, grc_mode);
9862         }
9863
9864         if (tg3_flag(tp, 57765_CLASS)) {
9865                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9866                         u32 grc_mode = tr32(GRC_MODE);
9867
9868                         /* Access the lower 1K of PL PCIE block registers. */
9869                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9870                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9871
9872                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9873                                    TG3_PCIE_PL_LO_PHYCTL5);
9874                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9875                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9876
9877                         tw32(GRC_MODE, grc_mode);
9878                 }
9879
9880                 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9881                         u32 grc_mode;
9882
9883                         /* Fix transmit hangs */
9884                         val = tr32(TG3_CPMU_PADRNG_CTL);
9885                         val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9886                         tw32(TG3_CPMU_PADRNG_CTL, val);
9887
9888                         grc_mode = tr32(GRC_MODE);
9889
9890                         /* Access the lower 1K of DL PCIE block registers. */
9891                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9892                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9893
9894                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9895                                    TG3_PCIE_DL_LO_FTSMAX);
9896                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9897                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9898                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9899
9900                         tw32(GRC_MODE, grc_mode);
9901                 }
9902
9903                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9904                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9905                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9906                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9907         }
9908
9909         /* This works around an issue with Athlon chipsets on
9910          * B3 tigon3 silicon.  This bit has no effect on any
9911          * other revision.  But do not set this on PCI Express
9912          * chips and don't even touch the clocks if the CPMU is present.
9913          */
9914         if (!tg3_flag(tp, CPMU_PRESENT)) {
9915                 if (!tg3_flag(tp, PCI_EXPRESS))
9916                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9917                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9918         }
9919
9920         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9921             tg3_flag(tp, PCIX_MODE)) {
9922                 val = tr32(TG3PCI_PCISTATE);
9923                 val |= PCISTATE_RETRY_SAME_DMA;
9924                 tw32(TG3PCI_PCISTATE, val);
9925         }
9926
9927         if (tg3_flag(tp, ENABLE_APE)) {
9928                 /* Allow reads and writes to the
9929                  * APE register and memory space.
9930                  */
9931                 val = tr32(TG3PCI_PCISTATE);
9932                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9933                        PCISTATE_ALLOW_APE_SHMEM_WR |
9934                        PCISTATE_ALLOW_APE_PSPACE_WR;
9935                 tw32(TG3PCI_PCISTATE, val);
9936         }
9937
9938         if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9939                 /* Enable some hw fixes.  */
9940                 val = tr32(TG3PCI_MSI_DATA);
9941                 val |= (1 << 26) | (1 << 28) | (1 << 29);
9942                 tw32(TG3PCI_MSI_DATA, val);
9943         }
9944
9945         /* Descriptor ring init may make accesses to the
9946          * NIC SRAM area to setup the TX descriptors, so we
9947          * can only do this after the hardware has been
9948          * successfully reset.
9949          */
9950         err = tg3_init_rings(tp);
9951         if (err)
9952                 return err;
9953
9954         if (tg3_flag(tp, 57765_PLUS)) {
9955                 val = tr32(TG3PCI_DMA_RW_CTRL) &
9956                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9957                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9958                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9959                 if (!tg3_flag(tp, 57765_CLASS) &&
9960                     tg3_asic_rev(tp) != ASIC_REV_5717 &&
9961                     tg3_asic_rev(tp) != ASIC_REV_5762)
9962                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
9963                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9964         } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9965                    tg3_asic_rev(tp) != ASIC_REV_5761) {
9966                 /* This value is determined during the probe time DMA
9967                  * engine test, tg3_test_dma.
9968                  */
9969                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9970         }
9971
9972         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9973                           GRC_MODE_4X_NIC_SEND_RINGS |
9974                           GRC_MODE_NO_TX_PHDR_CSUM |
9975                           GRC_MODE_NO_RX_PHDR_CSUM);
9976         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9977
9978         /* Pseudo-header checksum is done by hardware logic and not
9979          * the offload processers, so make the chip do the pseudo-
9980          * header checksums on receive.  For transmit it is more
9981          * convenient to do the pseudo-header checksum in software
9982          * as Linux does that on transmit for us in all cases.
9983          */
9984         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9985
9986         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9987         if (tp->rxptpctl)
9988                 tw32(TG3_RX_PTP_CTL,
9989                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9990
9991         if (tg3_flag(tp, PTP_CAPABLE))
9992                 val |= GRC_MODE_TIME_SYNC_ENABLE;
9993
9994         tw32(GRC_MODE, tp->grc_mode | val);
9995
9996         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
9997         val = tr32(GRC_MISC_CFG);
9998         val &= ~0xff;
9999         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10000         tw32(GRC_MISC_CFG, val);
10001
10002         /* Initialize MBUF/DESC pool. */
10003         if (tg3_flag(tp, 5750_PLUS)) {
10004                 /* Do nothing.  */
10005         } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10006                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10007                 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10008                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10009                 else
10010                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10011                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10012                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10013         } else if (tg3_flag(tp, TSO_CAPABLE)) {
10014                 int fw_len;
10015
10016                 fw_len = tp->fw_len;
10017                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10018                 tw32(BUFMGR_MB_POOL_ADDR,
10019                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10020                 tw32(BUFMGR_MB_POOL_SIZE,
10021                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10022         }
10023
10024         if (tp->dev->mtu <= ETH_DATA_LEN) {
10025                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10026                      tp->bufmgr_config.mbuf_read_dma_low_water);
10027                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10028                      tp->bufmgr_config.mbuf_mac_rx_low_water);
10029                 tw32(BUFMGR_MB_HIGH_WATER,
10030                      tp->bufmgr_config.mbuf_high_water);
10031         } else {
10032                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10033                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10034                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10035                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10036                 tw32(BUFMGR_MB_HIGH_WATER,
10037                      tp->bufmgr_config.mbuf_high_water_jumbo);
10038         }
10039         tw32(BUFMGR_DMA_LOW_WATER,
10040              tp->bufmgr_config.dma_low_water);
10041         tw32(BUFMGR_DMA_HIGH_WATER,
10042              tp->bufmgr_config.dma_high_water);
10043
10044         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10045         if (tg3_asic_rev(tp) == ASIC_REV_5719)
10046                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10047         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10048             tg3_asic_rev(tp) == ASIC_REV_5762 ||
10049             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10050             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10051                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10052         tw32(BUFMGR_MODE, val);
10053         for (i = 0; i < 2000; i++) {
10054                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10055                         break;
10056                 udelay(10);
10057         }
10058         if (i >= 2000) {
10059                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10060                 return -ENODEV;
10061         }
10062
10063         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10064                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10065
10066         tg3_setup_rxbd_thresholds(tp);
10067
10068         /* Initialize TG3_BDINFO's at:
10069          *  RCVDBDI_STD_BD:     standard eth size rx ring
10070          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
10071          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
10072          *
10073          * like so:
10074          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
10075          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
10076          *                              ring attribute flags
10077          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
10078          *
10079          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10080          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10081          *
10082          * The size of each ring is fixed in the firmware, but the location is
10083          * configurable.
10084          */
10085         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10086              ((u64) tpr->rx_std_mapping >> 32));
10087         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10088              ((u64) tpr->rx_std_mapping & 0xffffffff));
10089         if (!tg3_flag(tp, 5717_PLUS))
10090                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10091                      NIC_SRAM_RX_BUFFER_DESC);
10092
10093         /* Disable the mini ring */
10094         if (!tg3_flag(tp, 5705_PLUS))
10095                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10096                      BDINFO_FLAGS_DISABLED);
10097
10098         /* Program the jumbo buffer descriptor ring control
10099          * blocks on those devices that have them.
10100          */
10101         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10102             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10103
10104                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10105                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10106                              ((u64) tpr->rx_jmb_mapping >> 32));
10107                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10108                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10109                         val = TG3_RX_JMB_RING_SIZE(tp) <<
10110                               BDINFO_FLAGS_MAXLEN_SHIFT;
10111                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10112                              val | BDINFO_FLAGS_USE_EXT_RECV);
10113                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10114                             tg3_flag(tp, 57765_CLASS) ||
10115                             tg3_asic_rev(tp) == ASIC_REV_5762)
10116                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10117                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10118                 } else {
10119                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10120                              BDINFO_FLAGS_DISABLED);
10121                 }
10122
10123                 if (tg3_flag(tp, 57765_PLUS)) {
10124                         val = TG3_RX_STD_RING_SIZE(tp);
10125                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10126                         val |= (TG3_RX_STD_DMA_SZ << 2);
10127                 } else
10128                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10129         } else
10130                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10131
10132         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10133
10134         tpr->rx_std_prod_idx = tp->rx_pending;
10135         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10136
10137         tpr->rx_jmb_prod_idx =
10138                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10139         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10140
10141         tg3_rings_reset(tp);
10142
10143         /* Initialize MAC address and backoff seed. */
10144         __tg3_set_mac_addr(tp, false);
10145
10146         /* MTU + ethernet header + FCS + optional VLAN tag */
10147         tw32(MAC_RX_MTU_SIZE,
10148              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10149
10150         /* The slot time is changed by tg3_setup_phy if we
10151          * run at gigabit with half duplex.
10152          */
10153         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10154               (6 << TX_LENGTHS_IPG_SHIFT) |
10155               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10156
10157         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10158             tg3_asic_rev(tp) == ASIC_REV_5762)
10159                 val |= tr32(MAC_TX_LENGTHS) &
10160                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
10161                         TX_LENGTHS_CNT_DWN_VAL_MSK);
10162
10163         tw32(MAC_TX_LENGTHS, val);
10164
10165         /* Receive rules. */
10166         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10167         tw32(RCVLPC_CONFIG, 0x0181);
10168
10169         /* Calculate RDMAC_MODE setting early, we need it to determine
10170          * the RCVLPC_STATE_ENABLE mask.
10171          */
10172         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10173                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10174                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10175                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10176                       RDMAC_MODE_LNGREAD_ENAB);
10177
10178         if (tg3_asic_rev(tp) == ASIC_REV_5717)
10179                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10180
10181         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10182             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10183             tg3_asic_rev(tp) == ASIC_REV_57780)
10184                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10185                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10186                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10187
10188         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10189             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10190                 if (tg3_flag(tp, TSO_CAPABLE) &&
10191                     tg3_asic_rev(tp) == ASIC_REV_5705) {
10192                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10193                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10194                            !tg3_flag(tp, IS_5788)) {
10195                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10196                 }
10197         }
10198
10199         if (tg3_flag(tp, PCI_EXPRESS))
10200                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10201
10202         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10203                 tp->dma_limit = 0;
10204                 if (tp->dev->mtu <= ETH_DATA_LEN) {
10205                         rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10206                         tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10207                 }
10208         }
10209
10210         if (tg3_flag(tp, HW_TSO_1) ||
10211             tg3_flag(tp, HW_TSO_2) ||
10212             tg3_flag(tp, HW_TSO_3))
10213                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10214
10215         if (tg3_flag(tp, 57765_PLUS) ||
10216             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10217             tg3_asic_rev(tp) == ASIC_REV_57780)
10218                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10219
10220         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10221             tg3_asic_rev(tp) == ASIC_REV_5762)
10222                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10223
10224         if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10225             tg3_asic_rev(tp) == ASIC_REV_5784 ||
10226             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10227             tg3_asic_rev(tp) == ASIC_REV_57780 ||
10228             tg3_flag(tp, 57765_PLUS)) {
10229                 u32 tgtreg;
10230
10231                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10232                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10233                 else
10234                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
10235
10236                 val = tr32(tgtreg);
10237                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10238                     tg3_asic_rev(tp) == ASIC_REV_5762) {
10239                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10240                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10241                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10242                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10243                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10244                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10245                 }
10246                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10247         }
10248
10249         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10250             tg3_asic_rev(tp) == ASIC_REV_5720 ||
10251             tg3_asic_rev(tp) == ASIC_REV_5762) {
10252                 u32 tgtreg;
10253
10254                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10255                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10256                 else
10257                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10258
10259                 val = tr32(tgtreg);
10260                 tw32(tgtreg, val |
10261                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10262                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10263         }
10264
10265         /* Receive/send statistics. */
10266         if (tg3_flag(tp, 5750_PLUS)) {
10267                 val = tr32(RCVLPC_STATS_ENABLE);
10268                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10269                 tw32(RCVLPC_STATS_ENABLE, val);
10270         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10271                    tg3_flag(tp, TSO_CAPABLE)) {
10272                 val = tr32(RCVLPC_STATS_ENABLE);
10273                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10274                 tw32(RCVLPC_STATS_ENABLE, val);
10275         } else {
10276                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10277         }
10278         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10279         tw32(SNDDATAI_STATSENAB, 0xffffff);
10280         tw32(SNDDATAI_STATSCTRL,
10281              (SNDDATAI_SCTRL_ENABLE |
10282               SNDDATAI_SCTRL_FASTUPD));
10283
10284         /* Setup host coalescing engine. */
10285         tw32(HOSTCC_MODE, 0);
10286         for (i = 0; i < 2000; i++) {
10287                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10288                         break;
10289                 udelay(10);
10290         }
10291
10292         __tg3_set_coalesce(tp, &tp->coal);
10293
10294         if (!tg3_flag(tp, 5705_PLUS)) {
10295                 /* Status/statistics block address.  See tg3_timer,
10296                  * the tg3_periodic_fetch_stats call there, and
10297                  * tg3_get_stats to see how this works for 5705/5750 chips.
10298                  */
10299                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10300                      ((u64) tp->stats_mapping >> 32));
10301                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10302                      ((u64) tp->stats_mapping & 0xffffffff));
10303                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10304
10305                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10306
10307                 /* Clear statistics and status block memory areas */
10308                 for (i = NIC_SRAM_STATS_BLK;
10309                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10310                      i += sizeof(u32)) {
10311                         tg3_write_mem(tp, i, 0);
10312                         udelay(40);
10313                 }
10314         }
10315
10316         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10317
10318         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10319         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10320         if (!tg3_flag(tp, 5705_PLUS))
10321                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10322
10323         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10324                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10325                 /* reset to prevent losing 1st rx packet intermittently */
10326                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10327                 udelay(10);
10328         }
10329
10330         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10331                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10332                         MAC_MODE_FHDE_ENABLE;
10333         if (tg3_flag(tp, ENABLE_APE))
10334                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10335         if (!tg3_flag(tp, 5705_PLUS) &&
10336             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10337             tg3_asic_rev(tp) != ASIC_REV_5700)
10338                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10339         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10340         udelay(40);
10341
10342         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10343          * If TG3_FLAG_IS_NIC is zero, we should read the
10344          * register to preserve the GPIO settings for LOMs. The GPIOs,
10345          * whether used as inputs or outputs, are set by boot code after
10346          * reset.
10347          */
10348         if (!tg3_flag(tp, IS_NIC)) {
10349                 u32 gpio_mask;
10350
10351                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10352                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10353                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10354
10355                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10356                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10357                                      GRC_LCLCTRL_GPIO_OUTPUT3;
10358
10359                 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10360                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10361
10362                 tp->grc_local_ctrl &= ~gpio_mask;
10363                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10364
10365                 /* GPIO1 must be driven high for eeprom write protect */
10366                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10367                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10368                                                GRC_LCLCTRL_GPIO_OUTPUT1);
10369         }
10370         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10371         udelay(100);
10372
10373         if (tg3_flag(tp, USING_MSIX)) {
10374                 val = tr32(MSGINT_MODE);
10375                 val |= MSGINT_MODE_ENABLE;
10376                 if (tp->irq_cnt > 1)
10377                         val |= MSGINT_MODE_MULTIVEC_EN;
10378                 if (!tg3_flag(tp, 1SHOT_MSI))
10379                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10380                 tw32(MSGINT_MODE, val);
10381         }
10382
10383         if (!tg3_flag(tp, 5705_PLUS)) {
10384                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10385                 udelay(40);
10386         }
10387
10388         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10389                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10390                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10391                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10392                WDMAC_MODE_LNGREAD_ENAB);
10393
10394         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10395             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10396                 if (tg3_flag(tp, TSO_CAPABLE) &&
10397                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10398                      tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10399                         /* nothing */
10400                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10401                            !tg3_flag(tp, IS_5788)) {
10402                         val |= WDMAC_MODE_RX_ACCEL;
10403                 }
10404         }
10405
10406         /* Enable host coalescing bug fix */
10407         if (tg3_flag(tp, 5755_PLUS))
10408                 val |= WDMAC_MODE_STATUS_TAG_FIX;
10409
10410         if (tg3_asic_rev(tp) == ASIC_REV_5785)
10411                 val |= WDMAC_MODE_BURST_ALL_DATA;
10412
10413         tw32_f(WDMAC_MODE, val);
10414         udelay(40);
10415
10416         if (tg3_flag(tp, PCIX_MODE)) {
10417                 u16 pcix_cmd;
10418
10419                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10420                                      &pcix_cmd);
10421                 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10422                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10423                         pcix_cmd |= PCI_X_CMD_READ_2K;
10424                 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10425                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10426                         pcix_cmd |= PCI_X_CMD_READ_2K;
10427                 }
10428                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10429                                       pcix_cmd);
10430         }
10431
10432         tw32_f(RDMAC_MODE, rdmac_mode);
10433         udelay(40);
10434
10435         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10436             tg3_asic_rev(tp) == ASIC_REV_5720) {
10437                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10438                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10439                                 break;
10440                 }
10441                 if (i < TG3_NUM_RDMA_CHANNELS) {
10442                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10443                         val |= tg3_lso_rd_dma_workaround_bit(tp);
10444                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10445                         tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10446                 }
10447         }
10448
10449         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10450         if (!tg3_flag(tp, 5705_PLUS))
10451                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10452
10453         if (tg3_asic_rev(tp) == ASIC_REV_5761)
10454                 tw32(SNDDATAC_MODE,
10455                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10456         else
10457                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10458
10459         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10460         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10461         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10462         if (tg3_flag(tp, LRG_PROD_RING_CAP))
10463                 val |= RCVDBDI_MODE_LRG_RING_SZ;
10464         tw32(RCVDBDI_MODE, val);
10465         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10466         if (tg3_flag(tp, HW_TSO_1) ||
10467             tg3_flag(tp, HW_TSO_2) ||
10468             tg3_flag(tp, HW_TSO_3))
10469                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10470         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10471         if (tg3_flag(tp, ENABLE_TSS))
10472                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10473         tw32(SNDBDI_MODE, val);
10474         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10475
10476         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10477                 err = tg3_load_5701_a0_firmware_fix(tp);
10478                 if (err)
10479                         return err;
10480         }
10481
10482         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10483                 /* Ignore any errors for the firmware download. If download
10484                  * fails, the device will operate with EEE disabled
10485                  */
10486                 tg3_load_57766_firmware(tp);
10487         }
10488
10489         if (tg3_flag(tp, TSO_CAPABLE)) {
10490                 err = tg3_load_tso_firmware(tp);
10491                 if (err)
10492                         return err;
10493         }
10494
10495         tp->tx_mode = TX_MODE_ENABLE;
10496
10497         if (tg3_flag(tp, 5755_PLUS) ||
10498             tg3_asic_rev(tp) == ASIC_REV_5906)
10499                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10500
10501         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10502             tg3_asic_rev(tp) == ASIC_REV_5762) {
10503                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10504                 tp->tx_mode &= ~val;
10505                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10506         }
10507
10508         tw32_f(MAC_TX_MODE, tp->tx_mode);
10509         udelay(100);
10510
10511         if (tg3_flag(tp, ENABLE_RSS)) {
10512                 tg3_rss_write_indir_tbl(tp);
10513
10514                 /* Setup the "secret" hash key. */
10515                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
10516                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
10517                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
10518                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
10519                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
10520                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
10521                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
10522                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
10523                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
10524                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
10525         }
10526
10527         tp->rx_mode = RX_MODE_ENABLE;
10528         if (tg3_flag(tp, 5755_PLUS))
10529                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10530
10531         if (tg3_asic_rev(tp) == ASIC_REV_5762)
10532                 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10533
10534         if (tg3_flag(tp, ENABLE_RSS))
10535                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10536                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
10537                                RX_MODE_RSS_IPV6_HASH_EN |
10538                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
10539                                RX_MODE_RSS_IPV4_HASH_EN |
10540                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
10541
10542         tw32_f(MAC_RX_MODE, tp->rx_mode);
10543         udelay(10);
10544
10545         tw32(MAC_LED_CTRL, tp->led_ctrl);
10546
10547         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10548         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10549                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10550                 udelay(10);
10551         }
10552         tw32_f(MAC_RX_MODE, tp->rx_mode);
10553         udelay(10);
10554
10555         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10556                 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10557                     !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10558                         /* Set drive transmission level to 1.2V  */
10559                         /* only if the signal pre-emphasis bit is not set  */
10560                         val = tr32(MAC_SERDES_CFG);
10561                         val &= 0xfffff000;
10562                         val |= 0x880;
10563                         tw32(MAC_SERDES_CFG, val);
10564                 }
10565                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10566                         tw32(MAC_SERDES_CFG, 0x616000);
10567         }
10568
10569         /* Prevent chip from dropping frames when flow control
10570          * is enabled.
10571          */
10572         if (tg3_flag(tp, 57765_CLASS))
10573                 val = 1;
10574         else
10575                 val = 2;
10576         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10577
10578         if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10579             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10580                 /* Use hardware link auto-negotiation */
10581                 tg3_flag_set(tp, HW_AUTONEG);
10582         }
10583
10584         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10585             tg3_asic_rev(tp) == ASIC_REV_5714) {
10586                 u32 tmp;
10587
10588                 tmp = tr32(SERDES_RX_CTRL);
10589                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10590                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10591                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10592                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10593         }
10594
10595         if (!tg3_flag(tp, USE_PHYLIB)) {
10596                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10597                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10598
10599                 err = tg3_setup_phy(tp, false);
10600                 if (err)
10601                         return err;
10602
10603                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10604                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10605                         u32 tmp;
10606
10607                         /* Clear CRC stats. */
10608                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10609                                 tg3_writephy(tp, MII_TG3_TEST1,
10610                                              tmp | MII_TG3_TEST1_CRC_EN);
10611                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10612                         }
10613                 }
10614         }
10615
10616         __tg3_set_rx_mode(tp->dev);
10617
10618         /* Initialize receive rules. */
10619         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10620         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10621         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10622         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10623
10624         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10625                 limit = 8;
10626         else
10627                 limit = 16;
10628         if (tg3_flag(tp, ENABLE_ASF))
10629                 limit -= 4;
10630         switch (limit) {
10631         case 16:
10632                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10633         case 15:
10634                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10635         case 14:
10636                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10637         case 13:
10638                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10639         case 12:
10640                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10641         case 11:
10642                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10643         case 10:
10644                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10645         case 9:
10646                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10647         case 8:
10648                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10649         case 7:
10650                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10651         case 6:
10652                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10653         case 5:
10654                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10655         case 4:
10656                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10657         case 3:
10658                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10659         case 2:
10660         case 1:
10661
10662         default:
10663                 break;
10664         }
10665
10666         if (tg3_flag(tp, ENABLE_APE))
10667                 /* Write our heartbeat update interval to APE. */
10668                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10669                                 APE_HOST_HEARTBEAT_INT_DISABLE);
10670
10671         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10672
10673         return 0;
10674 }
10675
10676 /* Called at device open time to get the chip ready for
10677  * packet processing.  Invoked with tp->lock held.
10678  */
10679 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10680 {
10681         /* Chip may have been just powered on. If so, the boot code may still
10682          * be running initialization. Wait for it to finish to avoid races in
10683          * accessing the hardware.
10684          */
10685         tg3_enable_register_access(tp);
10686         tg3_poll_fw(tp);
10687
10688         tg3_switch_clocks(tp);
10689
10690         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10691
10692         return tg3_reset_hw(tp, reset_phy);
10693 }
10694
10695 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10696 {
10697         int i;
10698
10699         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10700                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10701
10702                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10703                 off += len;
10704
10705                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10706                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10707                         memset(ocir, 0, TG3_OCIR_LEN);
10708         }
10709 }
10710
10711 /* sysfs attributes for hwmon */
10712 static ssize_t tg3_show_temp(struct device *dev,
10713                              struct device_attribute *devattr, char *buf)
10714 {
10715         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10716         struct tg3 *tp = dev_get_drvdata(dev);
10717         u32 temperature;
10718
10719         spin_lock_bh(&tp->lock);
10720         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10721                                 sizeof(temperature));
10722         spin_unlock_bh(&tp->lock);
10723         return sprintf(buf, "%u\n", temperature);
10724 }
10725
10726
10727 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10728                           TG3_TEMP_SENSOR_OFFSET);
10729 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10730                           TG3_TEMP_CAUTION_OFFSET);
10731 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10732                           TG3_TEMP_MAX_OFFSET);
10733
10734 static struct attribute *tg3_attrs[] = {
10735         &sensor_dev_attr_temp1_input.dev_attr.attr,
10736         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10737         &sensor_dev_attr_temp1_max.dev_attr.attr,
10738         NULL
10739 };
10740 ATTRIBUTE_GROUPS(tg3);
10741
10742 static void tg3_hwmon_close(struct tg3 *tp)
10743 {
10744         if (tp->hwmon_dev) {
10745                 hwmon_device_unregister(tp->hwmon_dev);
10746                 tp->hwmon_dev = NULL;
10747         }
10748 }
10749
10750 static void tg3_hwmon_open(struct tg3 *tp)
10751 {
10752         int i;
10753         u32 size = 0;
10754         struct pci_dev *pdev = tp->pdev;
10755         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10756
10757         tg3_sd_scan_scratchpad(tp, ocirs);
10758
10759         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10760                 if (!ocirs[i].src_data_length)
10761                         continue;
10762
10763                 size += ocirs[i].src_hdr_length;
10764                 size += ocirs[i].src_data_length;
10765         }
10766
10767         if (!size)
10768                 return;
10769
10770         tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10771                                                           tp, tg3_groups);
10772         if (IS_ERR(tp->hwmon_dev)) {
10773                 tp->hwmon_dev = NULL;
10774                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10775         }
10776 }
10777
10778
10779 #define TG3_STAT_ADD32(PSTAT, REG) \
10780 do {    u32 __val = tr32(REG); \
10781         (PSTAT)->low += __val; \
10782         if ((PSTAT)->low < __val) \
10783                 (PSTAT)->high += 1; \
10784 } while (0)
10785
10786 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10787 {
10788         struct tg3_hw_stats *sp = tp->hw_stats;
10789
10790         if (!tp->link_up)
10791                 return;
10792
10793         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10794         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10795         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10796         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10797         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10798         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10799         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10800         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10801         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10802         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10803         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10804         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10805         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10806         if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10807                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10808                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10809                 u32 val;
10810
10811                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10812                 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10813                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10814                 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10815         }
10816
10817         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10818         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10819         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10820         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10821         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10822         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10823         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10824         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10825         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10826         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10827         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10828         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10829         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10830         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10831
10832         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10833         if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10834             tg3_asic_rev(tp) != ASIC_REV_5762 &&
10835             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10836             tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10837                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10838         } else {
10839                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10840                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10841                 if (val) {
10842                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10843                         sp->rx_discards.low += val;
10844                         if (sp->rx_discards.low < val)
10845                                 sp->rx_discards.high += 1;
10846                 }
10847                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10848         }
10849         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10850 }
10851
10852 static void tg3_chk_missed_msi(struct tg3 *tp)
10853 {
10854         u32 i;
10855
10856         for (i = 0; i < tp->irq_cnt; i++) {
10857                 struct tg3_napi *tnapi = &tp->napi[i];
10858
10859                 if (tg3_has_work(tnapi)) {
10860                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10861                             tnapi->last_tx_cons == tnapi->tx_cons) {
10862                                 if (tnapi->chk_msi_cnt < 1) {
10863                                         tnapi->chk_msi_cnt++;
10864                                         return;
10865                                 }
10866                                 tg3_msi(0, tnapi);
10867                         }
10868                 }
10869                 tnapi->chk_msi_cnt = 0;
10870                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10871                 tnapi->last_tx_cons = tnapi->tx_cons;
10872         }
10873 }
10874
10875 static void tg3_timer(unsigned long __opaque)
10876 {
10877         struct tg3 *tp = (struct tg3 *) __opaque;
10878
10879         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10880                 goto restart_timer;
10881
10882         spin_lock(&tp->lock);
10883
10884         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10885             tg3_flag(tp, 57765_CLASS))
10886                 tg3_chk_missed_msi(tp);
10887
10888         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10889                 /* BCM4785: Flush posted writes from GbE to host memory. */
10890                 tr32(HOSTCC_MODE);
10891         }
10892
10893         if (!tg3_flag(tp, TAGGED_STATUS)) {
10894                 /* All of this garbage is because when using non-tagged
10895                  * IRQ status the mailbox/status_block protocol the chip
10896                  * uses with the cpu is race prone.
10897                  */
10898                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10899                         tw32(GRC_LOCAL_CTRL,
10900                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10901                 } else {
10902                         tw32(HOSTCC_MODE, tp->coalesce_mode |
10903                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10904                 }
10905
10906                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10907                         spin_unlock(&tp->lock);
10908                         tg3_reset_task_schedule(tp);
10909                         goto restart_timer;
10910                 }
10911         }
10912
10913         /* This part only runs once per second. */
10914         if (!--tp->timer_counter) {
10915                 if (tg3_flag(tp, 5705_PLUS))
10916                         tg3_periodic_fetch_stats(tp);
10917
10918                 if (tp->setlpicnt && !--tp->setlpicnt)
10919                         tg3_phy_eee_enable(tp);
10920
10921                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10922                         u32 mac_stat;
10923                         int phy_event;
10924
10925                         mac_stat = tr32(MAC_STATUS);
10926
10927                         phy_event = 0;
10928                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10929                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10930                                         phy_event = 1;
10931                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10932                                 phy_event = 1;
10933
10934                         if (phy_event)
10935                                 tg3_setup_phy(tp, false);
10936                 } else if (tg3_flag(tp, POLL_SERDES)) {
10937                         u32 mac_stat = tr32(MAC_STATUS);
10938                         int need_setup = 0;
10939
10940                         if (tp->link_up &&
10941                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10942                                 need_setup = 1;
10943                         }
10944                         if (!tp->link_up &&
10945                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
10946                                          MAC_STATUS_SIGNAL_DET))) {
10947                                 need_setup = 1;
10948                         }
10949                         if (need_setup) {
10950                                 if (!tp->serdes_counter) {
10951                                         tw32_f(MAC_MODE,
10952                                              (tp->mac_mode &
10953                                               ~MAC_MODE_PORT_MODE_MASK));
10954                                         udelay(40);
10955                                         tw32_f(MAC_MODE, tp->mac_mode);
10956                                         udelay(40);
10957                                 }
10958                                 tg3_setup_phy(tp, false);
10959                         }
10960                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10961                            tg3_flag(tp, 5780_CLASS)) {
10962                         tg3_serdes_parallel_detect(tp);
10963                 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
10964                         u32 cpmu = tr32(TG3_CPMU_STATUS);
10965                         bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
10966                                          TG3_CPMU_STATUS_LINK_MASK);
10967
10968                         if (link_up != tp->link_up)
10969                                 tg3_setup_phy(tp, false);
10970                 }
10971
10972                 tp->timer_counter = tp->timer_multiplier;
10973         }
10974
10975         /* Heartbeat is only sent once every 2 seconds.
10976          *
10977          * The heartbeat is to tell the ASF firmware that the host
10978          * driver is still alive.  In the event that the OS crashes,
10979          * ASF needs to reset the hardware to free up the FIFO space
10980          * that may be filled with rx packets destined for the host.
10981          * If the FIFO is full, ASF will no longer function properly.
10982          *
10983          * Unintended resets have been reported on real time kernels
10984          * where the timer doesn't run on time.  Netpoll will also have
10985          * same problem.
10986          *
10987          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10988          * to check the ring condition when the heartbeat is expiring
10989          * before doing the reset.  This will prevent most unintended
10990          * resets.
10991          */
10992         if (!--tp->asf_counter) {
10993                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10994                         tg3_wait_for_event_ack(tp);
10995
10996                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10997                                       FWCMD_NICDRV_ALIVE3);
10998                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10999                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11000                                       TG3_FW_UPDATE_TIMEOUT_SEC);
11001
11002                         tg3_generate_fw_event(tp);
11003                 }
11004                 tp->asf_counter = tp->asf_multiplier;
11005         }
11006
11007         spin_unlock(&tp->lock);
11008
11009 restart_timer:
11010         tp->timer.expires = jiffies + tp->timer_offset;
11011         add_timer(&tp->timer);
11012 }
11013
11014 static void tg3_timer_init(struct tg3 *tp)
11015 {
11016         if (tg3_flag(tp, TAGGED_STATUS) &&
11017             tg3_asic_rev(tp) != ASIC_REV_5717 &&
11018             !tg3_flag(tp, 57765_CLASS))
11019                 tp->timer_offset = HZ;
11020         else
11021                 tp->timer_offset = HZ / 10;
11022
11023         BUG_ON(tp->timer_offset > HZ);
11024
11025         tp->timer_multiplier = (HZ / tp->timer_offset);
11026         tp->asf_multiplier = (HZ / tp->timer_offset) *
11027                              TG3_FW_UPDATE_FREQ_SEC;
11028
11029         init_timer(&tp->timer);
11030         tp->timer.data = (unsigned long) tp;
11031         tp->timer.function = tg3_timer;
11032 }
11033
11034 static void tg3_timer_start(struct tg3 *tp)
11035 {
11036         tp->asf_counter   = tp->asf_multiplier;
11037         tp->timer_counter = tp->timer_multiplier;
11038
11039         tp->timer.expires = jiffies + tp->timer_offset;
11040         add_timer(&tp->timer);
11041 }
11042
11043 static void tg3_timer_stop(struct tg3 *tp)
11044 {
11045         del_timer_sync(&tp->timer);
11046 }
11047
11048 /* Restart hardware after configuration changes, self-test, etc.
11049  * Invoked with tp->lock held.
11050  */
11051 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11052         __releases(tp->lock)
11053         __acquires(tp->lock)
11054 {
11055         int err;
11056
11057         err = tg3_init_hw(tp, reset_phy);
11058         if (err) {
11059                 netdev_err(tp->dev,
11060                            "Failed to re-initialize device, aborting\n");
11061                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11062                 tg3_full_unlock(tp);
11063                 tg3_timer_stop(tp);
11064                 tp->irq_sync = 0;
11065                 tg3_napi_enable(tp);
11066                 dev_close(tp->dev);
11067                 tg3_full_lock(tp, 0);
11068         }
11069         return err;
11070 }
11071
11072 static void tg3_reset_task(struct work_struct *work)
11073 {
11074         struct tg3 *tp = container_of(work, struct tg3, reset_task);
11075         int err;
11076
11077         tg3_full_lock(tp, 0);
11078
11079         if (!netif_running(tp->dev)) {
11080                 tg3_flag_clear(tp, RESET_TASK_PENDING);
11081                 tg3_full_unlock(tp);
11082                 return;
11083         }
11084
11085         tg3_full_unlock(tp);
11086
11087         tg3_phy_stop(tp);
11088
11089         tg3_netif_stop(tp);
11090
11091         tg3_full_lock(tp, 1);
11092
11093         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11094                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11095                 tp->write32_rx_mbox = tg3_write_flush_reg32;
11096                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11097                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11098         }
11099
11100         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11101         err = tg3_init_hw(tp, true);
11102         if (err)
11103                 goto out;
11104
11105         tg3_netif_start(tp);
11106
11107 out:
11108         tg3_full_unlock(tp);
11109
11110         if (!err)
11111                 tg3_phy_start(tp);
11112
11113         tg3_flag_clear(tp, RESET_TASK_PENDING);
11114 }
11115
11116 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11117 {
11118         irq_handler_t fn;
11119         unsigned long flags;
11120         char *name;
11121         struct tg3_napi *tnapi = &tp->napi[irq_num];
11122
11123         if (tp->irq_cnt == 1)
11124                 name = tp->dev->name;
11125         else {
11126                 name = &tnapi->irq_lbl[0];
11127                 if (tnapi->tx_buffers && tnapi->rx_rcb)
11128                         snprintf(name, IFNAMSIZ,
11129                                  "%s-txrx-%d", tp->dev->name, irq_num);
11130                 else if (tnapi->tx_buffers)
11131                         snprintf(name, IFNAMSIZ,
11132                                  "%s-tx-%d", tp->dev->name, irq_num);
11133                 else if (tnapi->rx_rcb)
11134                         snprintf(name, IFNAMSIZ,
11135                                  "%s-rx-%d", tp->dev->name, irq_num);
11136                 else
11137                         snprintf(name, IFNAMSIZ,
11138                                  "%s-%d", tp->dev->name, irq_num);
11139                 name[IFNAMSIZ-1] = 0;
11140         }
11141
11142         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11143                 fn = tg3_msi;
11144                 if (tg3_flag(tp, 1SHOT_MSI))
11145                         fn = tg3_msi_1shot;
11146                 flags = 0;
11147         } else {
11148                 fn = tg3_interrupt;
11149                 if (tg3_flag(tp, TAGGED_STATUS))
11150                         fn = tg3_interrupt_tagged;
11151                 flags = IRQF_SHARED;
11152         }
11153
11154         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11155 }
11156
11157 static int tg3_test_interrupt(struct tg3 *tp)
11158 {
11159         struct tg3_napi *tnapi = &tp->napi[0];
11160         struct net_device *dev = tp->dev;
11161         int err, i, intr_ok = 0;
11162         u32 val;
11163
11164         if (!netif_running(dev))
11165                 return -ENODEV;
11166
11167         tg3_disable_ints(tp);
11168
11169         free_irq(tnapi->irq_vec, tnapi);
11170
11171         /*
11172          * Turn off MSI one shot mode.  Otherwise this test has no
11173          * observable way to know whether the interrupt was delivered.
11174          */
11175         if (tg3_flag(tp, 57765_PLUS)) {
11176                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11177                 tw32(MSGINT_MODE, val);
11178         }
11179
11180         err = request_irq(tnapi->irq_vec, tg3_test_isr,
11181                           IRQF_SHARED, dev->name, tnapi);
11182         if (err)
11183                 return err;
11184
11185         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11186         tg3_enable_ints(tp);
11187
11188         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11189                tnapi->coal_now);
11190
11191         for (i = 0; i < 5; i++) {
11192                 u32 int_mbox, misc_host_ctrl;
11193
11194                 int_mbox = tr32_mailbox(tnapi->int_mbox);
11195                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11196
11197                 if ((int_mbox != 0) ||
11198                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11199                         intr_ok = 1;
11200                         break;
11201                 }
11202
11203                 if (tg3_flag(tp, 57765_PLUS) &&
11204                     tnapi->hw_status->status_tag != tnapi->last_tag)
11205                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11206
11207                 msleep(10);
11208         }
11209
11210         tg3_disable_ints(tp);
11211
11212         free_irq(tnapi->irq_vec, tnapi);
11213
11214         err = tg3_request_irq(tp, 0);
11215
11216         if (err)
11217                 return err;
11218
11219         if (intr_ok) {
11220                 /* Reenable MSI one shot mode. */
11221                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11222                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11223                         tw32(MSGINT_MODE, val);
11224                 }
11225                 return 0;
11226         }
11227
11228         return -EIO;
11229 }
11230
11231 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11232  * successfully restored
11233  */
11234 static int tg3_test_msi(struct tg3 *tp)
11235 {
11236         int err;
11237         u16 pci_cmd;
11238
11239         if (!tg3_flag(tp, USING_MSI))
11240                 return 0;
11241
11242         /* Turn off SERR reporting in case MSI terminates with Master
11243          * Abort.
11244          */
11245         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11246         pci_write_config_word(tp->pdev, PCI_COMMAND,
11247                               pci_cmd & ~PCI_COMMAND_SERR);
11248
11249         err = tg3_test_interrupt(tp);
11250
11251         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11252
11253         if (!err)
11254                 return 0;
11255
11256         /* other failures */
11257         if (err != -EIO)
11258                 return err;
11259
11260         /* MSI test failed, go back to INTx mode */
11261         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11262                     "to INTx mode. Please report this failure to the PCI "
11263                     "maintainer and include system chipset information\n");
11264
11265         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11266
11267         pci_disable_msi(tp->pdev);
11268
11269         tg3_flag_clear(tp, USING_MSI);
11270         tp->napi[0].irq_vec = tp->pdev->irq;
11271
11272         err = tg3_request_irq(tp, 0);
11273         if (err)
11274                 return err;
11275
11276         /* Need to reset the chip because the MSI cycle may have terminated
11277          * with Master Abort.
11278          */
11279         tg3_full_lock(tp, 1);
11280
11281         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11282         err = tg3_init_hw(tp, true);
11283
11284         tg3_full_unlock(tp);
11285
11286         if (err)
11287                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11288
11289         return err;
11290 }
11291
11292 static int tg3_request_firmware(struct tg3 *tp)
11293 {
11294         const struct tg3_firmware_hdr *fw_hdr;
11295
11296         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11297                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11298                            tp->fw_needed);
11299                 return -ENOENT;
11300         }
11301
11302         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11303
11304         /* Firmware blob starts with version numbers, followed by
11305          * start address and _full_ length including BSS sections
11306          * (which must be longer than the actual data, of course
11307          */
11308
11309         tp->fw_len = be32_to_cpu(fw_hdr->len);  /* includes bss */
11310         if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11311                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11312                            tp->fw_len, tp->fw_needed);
11313                 release_firmware(tp->fw);
11314                 tp->fw = NULL;
11315                 return -EINVAL;
11316         }
11317
11318         /* We no longer need firmware; we have it. */
11319         tp->fw_needed = NULL;
11320         return 0;
11321 }
11322
11323 static u32 tg3_irq_count(struct tg3 *tp)
11324 {
11325         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11326
11327         if (irq_cnt > 1) {
11328                 /* We want as many rx rings enabled as there are cpus.
11329                  * In multiqueue MSI-X mode, the first MSI-X vector
11330                  * only deals with link interrupts, etc, so we add
11331                  * one to the number of vectors we are requesting.
11332                  */
11333                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11334         }
11335
11336         return irq_cnt;
11337 }
11338
11339 static bool tg3_enable_msix(struct tg3 *tp)
11340 {
11341         int i, rc;
11342         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11343
11344         tp->txq_cnt = tp->txq_req;
11345         tp->rxq_cnt = tp->rxq_req;
11346         if (!tp->rxq_cnt)
11347                 tp->rxq_cnt = netif_get_num_default_rss_queues();
11348         if (tp->rxq_cnt > tp->rxq_max)
11349                 tp->rxq_cnt = tp->rxq_max;
11350
11351         /* Disable multiple TX rings by default.  Simple round-robin hardware
11352          * scheduling of the TX rings can cause starvation of rings with
11353          * small packets when other rings have TSO or jumbo packets.
11354          */
11355         if (!tp->txq_req)
11356                 tp->txq_cnt = 1;
11357
11358         tp->irq_cnt = tg3_irq_count(tp);
11359
11360         for (i = 0; i < tp->irq_max; i++) {
11361                 msix_ent[i].entry  = i;
11362                 msix_ent[i].vector = 0;
11363         }
11364
11365         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
11366         if (rc < 0) {
11367                 return false;
11368         } else if (rc != 0) {
11369                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
11370                         return false;
11371                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11372                               tp->irq_cnt, rc);
11373                 tp->irq_cnt = rc;
11374                 tp->rxq_cnt = max(rc - 1, 1);
11375                 if (tp->txq_cnt)
11376                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11377         }
11378
11379         for (i = 0; i < tp->irq_max; i++)
11380                 tp->napi[i].irq_vec = msix_ent[i].vector;
11381
11382         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11383                 pci_disable_msix(tp->pdev);
11384                 return false;
11385         }
11386
11387         if (tp->irq_cnt == 1)
11388                 return true;
11389
11390         tg3_flag_set(tp, ENABLE_RSS);
11391
11392         if (tp->txq_cnt > 1)
11393                 tg3_flag_set(tp, ENABLE_TSS);
11394
11395         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11396
11397         return true;
11398 }
11399
11400 static void tg3_ints_init(struct tg3 *tp)
11401 {
11402         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11403             !tg3_flag(tp, TAGGED_STATUS)) {
11404                 /* All MSI supporting chips should support tagged
11405                  * status.  Assert that this is the case.
11406                  */
11407                 netdev_warn(tp->dev,
11408                             "MSI without TAGGED_STATUS? Not using MSI\n");
11409                 goto defcfg;
11410         }
11411
11412         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11413                 tg3_flag_set(tp, USING_MSIX);
11414         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11415                 tg3_flag_set(tp, USING_MSI);
11416
11417         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11418                 u32 msi_mode = tr32(MSGINT_MODE);
11419                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11420                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11421                 if (!tg3_flag(tp, 1SHOT_MSI))
11422                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11423                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11424         }
11425 defcfg:
11426         if (!tg3_flag(tp, USING_MSIX)) {
11427                 tp->irq_cnt = 1;
11428                 tp->napi[0].irq_vec = tp->pdev->irq;
11429         }
11430
11431         if (tp->irq_cnt == 1) {
11432                 tp->txq_cnt = 1;
11433                 tp->rxq_cnt = 1;
11434                 netif_set_real_num_tx_queues(tp->dev, 1);
11435                 netif_set_real_num_rx_queues(tp->dev, 1);
11436         }
11437 }
11438
11439 static void tg3_ints_fini(struct tg3 *tp)
11440 {
11441         if (tg3_flag(tp, USING_MSIX))
11442                 pci_disable_msix(tp->pdev);
11443         else if (tg3_flag(tp, USING_MSI))
11444                 pci_disable_msi(tp->pdev);
11445         tg3_flag_clear(tp, USING_MSI);
11446         tg3_flag_clear(tp, USING_MSIX);
11447         tg3_flag_clear(tp, ENABLE_RSS);
11448         tg3_flag_clear(tp, ENABLE_TSS);
11449 }
11450
11451 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11452                      bool init)
11453 {
11454         struct net_device *dev = tp->dev;
11455         int i, err;
11456
11457         /*
11458          * Setup interrupts first so we know how
11459          * many NAPI resources to allocate
11460          */
11461         tg3_ints_init(tp);
11462
11463         tg3_rss_check_indir_tbl(tp);
11464
11465         /* The placement of this call is tied
11466          * to the setup and use of Host TX descriptors.
11467          */
11468         err = tg3_alloc_consistent(tp);
11469         if (err)
11470                 goto out_ints_fini;
11471
11472         tg3_napi_init(tp);
11473
11474         tg3_napi_enable(tp);
11475
11476         for (i = 0; i < tp->irq_cnt; i++) {
11477                 struct tg3_napi *tnapi = &tp->napi[i];
11478                 err = tg3_request_irq(tp, i);
11479                 if (err) {
11480                         for (i--; i >= 0; i--) {
11481                                 tnapi = &tp->napi[i];
11482                                 free_irq(tnapi->irq_vec, tnapi);
11483                         }
11484                         goto out_napi_fini;
11485                 }
11486         }
11487
11488         tg3_full_lock(tp, 0);
11489
11490         if (init)
11491                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11492
11493         err = tg3_init_hw(tp, reset_phy);
11494         if (err) {
11495                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11496                 tg3_free_rings(tp);
11497         }
11498
11499         tg3_full_unlock(tp);
11500
11501         if (err)
11502                 goto out_free_irq;
11503
11504         if (test_irq && tg3_flag(tp, USING_MSI)) {
11505                 err = tg3_test_msi(tp);
11506
11507                 if (err) {
11508                         tg3_full_lock(tp, 0);
11509                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11510                         tg3_free_rings(tp);
11511                         tg3_full_unlock(tp);
11512
11513                         goto out_napi_fini;
11514                 }
11515
11516                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11517                         u32 val = tr32(PCIE_TRANSACTION_CFG);
11518
11519                         tw32(PCIE_TRANSACTION_CFG,
11520                              val | PCIE_TRANS_CFG_1SHOT_MSI);
11521                 }
11522         }
11523
11524         tg3_phy_start(tp);
11525
11526         tg3_hwmon_open(tp);
11527
11528         tg3_full_lock(tp, 0);
11529
11530         tg3_timer_start(tp);
11531         tg3_flag_set(tp, INIT_COMPLETE);
11532         tg3_enable_ints(tp);
11533
11534         if (init)
11535                 tg3_ptp_init(tp);
11536         else
11537                 tg3_ptp_resume(tp);
11538
11539
11540         tg3_full_unlock(tp);
11541
11542         netif_tx_start_all_queues(dev);
11543
11544         /*
11545          * Reset loopback feature if it was turned on while the device was down
11546          * make sure that it's installed properly now.
11547          */
11548         if (dev->features & NETIF_F_LOOPBACK)
11549                 tg3_set_loopback(dev, dev->features);
11550
11551         return 0;
11552
11553 out_free_irq:
11554         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11555                 struct tg3_napi *tnapi = &tp->napi[i];
11556                 free_irq(tnapi->irq_vec, tnapi);
11557         }
11558
11559 out_napi_fini:
11560         tg3_napi_disable(tp);
11561         tg3_napi_fini(tp);
11562         tg3_free_consistent(tp);
11563
11564 out_ints_fini:
11565         tg3_ints_fini(tp);
11566
11567         return err;
11568 }
11569
11570 static void tg3_stop(struct tg3 *tp)
11571 {
11572         int i;
11573
11574         tg3_reset_task_cancel(tp);
11575         tg3_netif_stop(tp);
11576
11577         tg3_timer_stop(tp);
11578
11579         tg3_hwmon_close(tp);
11580
11581         tg3_phy_stop(tp);
11582
11583         tg3_full_lock(tp, 1);
11584
11585         tg3_disable_ints(tp);
11586
11587         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11588         tg3_free_rings(tp);
11589         tg3_flag_clear(tp, INIT_COMPLETE);
11590
11591         tg3_full_unlock(tp);
11592
11593         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11594                 struct tg3_napi *tnapi = &tp->napi[i];
11595                 free_irq(tnapi->irq_vec, tnapi);
11596         }
11597
11598         tg3_ints_fini(tp);
11599
11600         tg3_napi_fini(tp);
11601
11602         tg3_free_consistent(tp);
11603 }
11604
11605 static int tg3_open(struct net_device *dev)
11606 {
11607         struct tg3 *tp = netdev_priv(dev);
11608         int err;
11609
11610         if (tp->fw_needed) {
11611                 err = tg3_request_firmware(tp);
11612                 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11613                         if (err) {
11614                                 netdev_warn(tp->dev, "EEE capability disabled\n");
11615                                 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11616                         } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11617                                 netdev_warn(tp->dev, "EEE capability restored\n");
11618                                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11619                         }
11620                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11621                         if (err)
11622                                 return err;
11623                 } else if (err) {
11624                         netdev_warn(tp->dev, "TSO capability disabled\n");
11625                         tg3_flag_clear(tp, TSO_CAPABLE);
11626                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11627                         netdev_notice(tp->dev, "TSO capability restored\n");
11628                         tg3_flag_set(tp, TSO_CAPABLE);
11629                 }
11630         }
11631
11632         tg3_carrier_off(tp);
11633
11634         err = tg3_power_up(tp);
11635         if (err)
11636                 return err;
11637
11638         tg3_full_lock(tp, 0);
11639
11640         tg3_disable_ints(tp);
11641         tg3_flag_clear(tp, INIT_COMPLETE);
11642
11643         tg3_full_unlock(tp);
11644
11645         err = tg3_start(tp,
11646                         !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11647                         true, true);
11648         if (err) {
11649                 tg3_frob_aux_power(tp, false);
11650                 pci_set_power_state(tp->pdev, PCI_D3hot);
11651         }
11652
11653         if (tg3_flag(tp, PTP_CAPABLE)) {
11654                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11655                                                    &tp->pdev->dev);
11656                 if (IS_ERR(tp->ptp_clock))
11657                         tp->ptp_clock = NULL;
11658         }
11659
11660         return err;
11661 }
11662
11663 static int tg3_close(struct net_device *dev)
11664 {
11665         struct tg3 *tp = netdev_priv(dev);
11666
11667         tg3_ptp_fini(tp);
11668
11669         tg3_stop(tp);
11670
11671         /* Clear stats across close / open calls */
11672         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11673         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11674
11675         if (pci_device_is_present(tp->pdev)) {
11676                 tg3_power_down_prepare(tp);
11677
11678                 tg3_carrier_off(tp);
11679         }
11680         return 0;
11681 }
11682
11683 static inline u64 get_stat64(tg3_stat64_t *val)
11684 {
11685        return ((u64)val->high << 32) | ((u64)val->low);
11686 }
11687
11688 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11689 {
11690         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11691
11692         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11693             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11694              tg3_asic_rev(tp) == ASIC_REV_5701)) {
11695                 u32 val;
11696
11697                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11698                         tg3_writephy(tp, MII_TG3_TEST1,
11699                                      val | MII_TG3_TEST1_CRC_EN);
11700                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11701                 } else
11702                         val = 0;
11703
11704                 tp->phy_crc_errors += val;
11705
11706                 return tp->phy_crc_errors;
11707         }
11708
11709         return get_stat64(&hw_stats->rx_fcs_errors);
11710 }
11711
11712 #define ESTAT_ADD(member) \
11713         estats->member =        old_estats->member + \
11714                                 get_stat64(&hw_stats->member)
11715
11716 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11717 {
11718         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11719         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11720
11721         ESTAT_ADD(rx_octets);
11722         ESTAT_ADD(rx_fragments);
11723         ESTAT_ADD(rx_ucast_packets);
11724         ESTAT_ADD(rx_mcast_packets);
11725         ESTAT_ADD(rx_bcast_packets);
11726         ESTAT_ADD(rx_fcs_errors);
11727         ESTAT_ADD(rx_align_errors);
11728         ESTAT_ADD(rx_xon_pause_rcvd);
11729         ESTAT_ADD(rx_xoff_pause_rcvd);
11730         ESTAT_ADD(rx_mac_ctrl_rcvd);
11731         ESTAT_ADD(rx_xoff_entered);
11732         ESTAT_ADD(rx_frame_too_long_errors);
11733         ESTAT_ADD(rx_jabbers);
11734         ESTAT_ADD(rx_undersize_packets);
11735         ESTAT_ADD(rx_in_length_errors);
11736         ESTAT_ADD(rx_out_length_errors);
11737         ESTAT_ADD(rx_64_or_less_octet_packets);
11738         ESTAT_ADD(rx_65_to_127_octet_packets);
11739         ESTAT_ADD(rx_128_to_255_octet_packets);
11740         ESTAT_ADD(rx_256_to_511_octet_packets);
11741         ESTAT_ADD(rx_512_to_1023_octet_packets);
11742         ESTAT_ADD(rx_1024_to_1522_octet_packets);
11743         ESTAT_ADD(rx_1523_to_2047_octet_packets);
11744         ESTAT_ADD(rx_2048_to_4095_octet_packets);
11745         ESTAT_ADD(rx_4096_to_8191_octet_packets);
11746         ESTAT_ADD(rx_8192_to_9022_octet_packets);
11747
11748         ESTAT_ADD(tx_octets);
11749         ESTAT_ADD(tx_collisions);
11750         ESTAT_ADD(tx_xon_sent);
11751         ESTAT_ADD(tx_xoff_sent);
11752         ESTAT_ADD(tx_flow_control);
11753         ESTAT_ADD(tx_mac_errors);
11754         ESTAT_ADD(tx_single_collisions);
11755         ESTAT_ADD(tx_mult_collisions);
11756         ESTAT_ADD(tx_deferred);
11757         ESTAT_ADD(tx_excessive_collisions);
11758         ESTAT_ADD(tx_late_collisions);
11759         ESTAT_ADD(tx_collide_2times);
11760         ESTAT_ADD(tx_collide_3times);
11761         ESTAT_ADD(tx_collide_4times);
11762         ESTAT_ADD(tx_collide_5times);
11763         ESTAT_ADD(tx_collide_6times);
11764         ESTAT_ADD(tx_collide_7times);
11765         ESTAT_ADD(tx_collide_8times);
11766         ESTAT_ADD(tx_collide_9times);
11767         ESTAT_ADD(tx_collide_10times);
11768         ESTAT_ADD(tx_collide_11times);
11769         ESTAT_ADD(tx_collide_12times);
11770         ESTAT_ADD(tx_collide_13times);
11771         ESTAT_ADD(tx_collide_14times);
11772         ESTAT_ADD(tx_collide_15times);
11773         ESTAT_ADD(tx_ucast_packets);
11774         ESTAT_ADD(tx_mcast_packets);
11775         ESTAT_ADD(tx_bcast_packets);
11776         ESTAT_ADD(tx_carrier_sense_errors);
11777         ESTAT_ADD(tx_discards);
11778         ESTAT_ADD(tx_errors);
11779
11780         ESTAT_ADD(dma_writeq_full);
11781         ESTAT_ADD(dma_write_prioq_full);
11782         ESTAT_ADD(rxbds_empty);
11783         ESTAT_ADD(rx_discards);
11784         ESTAT_ADD(rx_errors);
11785         ESTAT_ADD(rx_threshold_hit);
11786
11787         ESTAT_ADD(dma_readq_full);
11788         ESTAT_ADD(dma_read_prioq_full);
11789         ESTAT_ADD(tx_comp_queue_full);
11790
11791         ESTAT_ADD(ring_set_send_prod_index);
11792         ESTAT_ADD(ring_status_update);
11793         ESTAT_ADD(nic_irqs);
11794         ESTAT_ADD(nic_avoided_irqs);
11795         ESTAT_ADD(nic_tx_threshold_hit);
11796
11797         ESTAT_ADD(mbuf_lwm_thresh_hit);
11798 }
11799
11800 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11801 {
11802         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11803         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11804
11805         stats->rx_packets = old_stats->rx_packets +
11806                 get_stat64(&hw_stats->rx_ucast_packets) +
11807                 get_stat64(&hw_stats->rx_mcast_packets) +
11808                 get_stat64(&hw_stats->rx_bcast_packets);
11809
11810         stats->tx_packets = old_stats->tx_packets +
11811                 get_stat64(&hw_stats->tx_ucast_packets) +
11812                 get_stat64(&hw_stats->tx_mcast_packets) +
11813                 get_stat64(&hw_stats->tx_bcast_packets);
11814
11815         stats->rx_bytes = old_stats->rx_bytes +
11816                 get_stat64(&hw_stats->rx_octets);
11817         stats->tx_bytes = old_stats->tx_bytes +
11818                 get_stat64(&hw_stats->tx_octets);
11819
11820         stats->rx_errors = old_stats->rx_errors +
11821                 get_stat64(&hw_stats->rx_errors);
11822         stats->tx_errors = old_stats->tx_errors +
11823                 get_stat64(&hw_stats->tx_errors) +
11824                 get_stat64(&hw_stats->tx_mac_errors) +
11825                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11826                 get_stat64(&hw_stats->tx_discards);
11827
11828         stats->multicast = old_stats->multicast +
11829                 get_stat64(&hw_stats->rx_mcast_packets);
11830         stats->collisions = old_stats->collisions +
11831                 get_stat64(&hw_stats->tx_collisions);
11832
11833         stats->rx_length_errors = old_stats->rx_length_errors +
11834                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11835                 get_stat64(&hw_stats->rx_undersize_packets);
11836
11837         stats->rx_frame_errors = old_stats->rx_frame_errors +
11838                 get_stat64(&hw_stats->rx_align_errors);
11839         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11840                 get_stat64(&hw_stats->tx_discards);
11841         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11842                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11843
11844         stats->rx_crc_errors = old_stats->rx_crc_errors +
11845                 tg3_calc_crc_errors(tp);
11846
11847         stats->rx_missed_errors = old_stats->rx_missed_errors +
11848                 get_stat64(&hw_stats->rx_discards);
11849
11850         stats->rx_dropped = tp->rx_dropped;
11851         stats->tx_dropped = tp->tx_dropped;
11852 }
11853
11854 static int tg3_get_regs_len(struct net_device *dev)
11855 {
11856         return TG3_REG_BLK_SIZE;
11857 }
11858
11859 static void tg3_get_regs(struct net_device *dev,
11860                 struct ethtool_regs *regs, void *_p)
11861 {
11862         struct tg3 *tp = netdev_priv(dev);
11863
11864         regs->version = 0;
11865
11866         memset(_p, 0, TG3_REG_BLK_SIZE);
11867
11868         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11869                 return;
11870
11871         tg3_full_lock(tp, 0);
11872
11873         tg3_dump_legacy_regs(tp, (u32 *)_p);
11874
11875         tg3_full_unlock(tp);
11876 }
11877
11878 static int tg3_get_eeprom_len(struct net_device *dev)
11879 {
11880         struct tg3 *tp = netdev_priv(dev);
11881
11882         return tp->nvram_size;
11883 }
11884
11885 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11886 {
11887         struct tg3 *tp = netdev_priv(dev);
11888         int ret;
11889         u8  *pd;
11890         u32 i, offset, len, b_offset, b_count;
11891         __be32 val;
11892
11893         if (tg3_flag(tp, NO_NVRAM))
11894                 return -EINVAL;
11895
11896         offset = eeprom->offset;
11897         len = eeprom->len;
11898         eeprom->len = 0;
11899
11900         eeprom->magic = TG3_EEPROM_MAGIC;
11901
11902         if (offset & 3) {
11903                 /* adjustments to start on required 4 byte boundary */
11904                 b_offset = offset & 3;
11905                 b_count = 4 - b_offset;
11906                 if (b_count > len) {
11907                         /* i.e. offset=1 len=2 */
11908                         b_count = len;
11909                 }
11910                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11911                 if (ret)
11912                         return ret;
11913                 memcpy(data, ((char *)&val) + b_offset, b_count);
11914                 len -= b_count;
11915                 offset += b_count;
11916                 eeprom->len += b_count;
11917         }
11918
11919         /* read bytes up to the last 4 byte boundary */
11920         pd = &data[eeprom->len];
11921         for (i = 0; i < (len - (len & 3)); i += 4) {
11922                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11923                 if (ret) {
11924                         eeprom->len += i;
11925                         return ret;
11926                 }
11927                 memcpy(pd + i, &val, 4);
11928         }
11929         eeprom->len += i;
11930
11931         if (len & 3) {
11932                 /* read last bytes not ending on 4 byte boundary */
11933                 pd = &data[eeprom->len];
11934                 b_count = len & 3;
11935                 b_offset = offset + len - b_count;
11936                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11937                 if (ret)
11938                         return ret;
11939                 memcpy(pd, &val, b_count);
11940                 eeprom->len += b_count;
11941         }
11942         return 0;
11943 }
11944
11945 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11946 {
11947         struct tg3 *tp = netdev_priv(dev);
11948         int ret;
11949         u32 offset, len, b_offset, odd_len;
11950         u8 *buf;
11951         __be32 start, end;
11952
11953         if (tg3_flag(tp, NO_NVRAM) ||
11954             eeprom->magic != TG3_EEPROM_MAGIC)
11955                 return -EINVAL;
11956
11957         offset = eeprom->offset;
11958         len = eeprom->len;
11959
11960         if ((b_offset = (offset & 3))) {
11961                 /* adjustments to start on required 4 byte boundary */
11962                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11963                 if (ret)
11964                         return ret;
11965                 len += b_offset;
11966                 offset &= ~3;
11967                 if (len < 4)
11968                         len = 4;
11969         }
11970
11971         odd_len = 0;
11972         if (len & 3) {
11973                 /* adjustments to end on required 4 byte boundary */
11974                 odd_len = 1;
11975                 len = (len + 3) & ~3;
11976                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11977                 if (ret)
11978                         return ret;
11979         }
11980
11981         buf = data;
11982         if (b_offset || odd_len) {
11983                 buf = kmalloc(len, GFP_KERNEL);
11984                 if (!buf)
11985                         return -ENOMEM;
11986                 if (b_offset)
11987                         memcpy(buf, &start, 4);
11988                 if (odd_len)
11989                         memcpy(buf+len-4, &end, 4);
11990                 memcpy(buf + b_offset, data, eeprom->len);
11991         }
11992
11993         ret = tg3_nvram_write_block(tp, offset, len, buf);
11994
11995         if (buf != data)
11996                 kfree(buf);
11997
11998         return ret;
11999 }
12000
12001 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
12002 {
12003         struct tg3 *tp = netdev_priv(dev);
12004
12005         if (tg3_flag(tp, USE_PHYLIB)) {
12006                 struct phy_device *phydev;
12007                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12008                         return -EAGAIN;
12009                 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
12010                 return phy_ethtool_gset(phydev, cmd);
12011         }
12012
12013         cmd->supported = (SUPPORTED_Autoneg);
12014
12015         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12016                 cmd->supported |= (SUPPORTED_1000baseT_Half |
12017                                    SUPPORTED_1000baseT_Full);
12018
12019         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12020                 cmd->supported |= (SUPPORTED_100baseT_Half |
12021                                   SUPPORTED_100baseT_Full |
12022                                   SUPPORTED_10baseT_Half |
12023                                   SUPPORTED_10baseT_Full |
12024                                   SUPPORTED_TP);
12025                 cmd->port = PORT_TP;
12026         } else {
12027                 cmd->supported |= SUPPORTED_FIBRE;
12028                 cmd->port = PORT_FIBRE;
12029         }
12030
12031         cmd->advertising = tp->link_config.advertising;
12032         if (tg3_flag(tp, PAUSE_AUTONEG)) {
12033                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12034                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12035                                 cmd->advertising |= ADVERTISED_Pause;
12036                         } else {
12037                                 cmd->advertising |= ADVERTISED_Pause |
12038                                                     ADVERTISED_Asym_Pause;
12039                         }
12040                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12041                         cmd->advertising |= ADVERTISED_Asym_Pause;
12042                 }
12043         }
12044         if (netif_running(dev) && tp->link_up) {
12045                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
12046                 cmd->duplex = tp->link_config.active_duplex;
12047                 cmd->lp_advertising = tp->link_config.rmt_adv;
12048                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12049                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12050                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
12051                         else
12052                                 cmd->eth_tp_mdix = ETH_TP_MDI;
12053                 }
12054         } else {
12055                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
12056                 cmd->duplex = DUPLEX_UNKNOWN;
12057                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
12058         }
12059         cmd->phy_address = tp->phy_addr;
12060         cmd->transceiver = XCVR_INTERNAL;
12061         cmd->autoneg = tp->link_config.autoneg;
12062         cmd->maxtxpkt = 0;
12063         cmd->maxrxpkt = 0;
12064         return 0;
12065 }
12066
12067 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
12068 {
12069         struct tg3 *tp = netdev_priv(dev);
12070         u32 speed = ethtool_cmd_speed(cmd);
12071
12072         if (tg3_flag(tp, USE_PHYLIB)) {
12073                 struct phy_device *phydev;
12074                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12075                         return -EAGAIN;
12076                 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
12077                 return phy_ethtool_sset(phydev, cmd);
12078         }
12079
12080         if (cmd->autoneg != AUTONEG_ENABLE &&
12081             cmd->autoneg != AUTONEG_DISABLE)
12082                 return -EINVAL;
12083
12084         if (cmd->autoneg == AUTONEG_DISABLE &&
12085             cmd->duplex != DUPLEX_FULL &&
12086             cmd->duplex != DUPLEX_HALF)
12087                 return -EINVAL;
12088
12089         if (cmd->autoneg == AUTONEG_ENABLE) {
12090                 u32 mask = ADVERTISED_Autoneg |
12091                            ADVERTISED_Pause |
12092                            ADVERTISED_Asym_Pause;
12093
12094                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12095                         mask |= ADVERTISED_1000baseT_Half |
12096                                 ADVERTISED_1000baseT_Full;
12097
12098                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12099                         mask |= ADVERTISED_100baseT_Half |
12100                                 ADVERTISED_100baseT_Full |
12101                                 ADVERTISED_10baseT_Half |
12102                                 ADVERTISED_10baseT_Full |
12103                                 ADVERTISED_TP;
12104                 else
12105                         mask |= ADVERTISED_FIBRE;
12106
12107                 if (cmd->advertising & ~mask)
12108                         return -EINVAL;
12109
12110                 mask &= (ADVERTISED_1000baseT_Half |
12111                          ADVERTISED_1000baseT_Full |
12112                          ADVERTISED_100baseT_Half |
12113                          ADVERTISED_100baseT_Full |
12114                          ADVERTISED_10baseT_Half |
12115                          ADVERTISED_10baseT_Full);
12116
12117                 cmd->advertising &= mask;
12118         } else {
12119                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12120                         if (speed != SPEED_1000)
12121                                 return -EINVAL;
12122
12123                         if (cmd->duplex != DUPLEX_FULL)
12124                                 return -EINVAL;
12125                 } else {
12126                         if (speed != SPEED_100 &&
12127                             speed != SPEED_10)
12128                                 return -EINVAL;
12129                 }
12130         }
12131
12132         tg3_full_lock(tp, 0);
12133
12134         tp->link_config.autoneg = cmd->autoneg;
12135         if (cmd->autoneg == AUTONEG_ENABLE) {
12136                 tp->link_config.advertising = (cmd->advertising |
12137                                               ADVERTISED_Autoneg);
12138                 tp->link_config.speed = SPEED_UNKNOWN;
12139                 tp->link_config.duplex = DUPLEX_UNKNOWN;
12140         } else {
12141                 tp->link_config.advertising = 0;
12142                 tp->link_config.speed = speed;
12143                 tp->link_config.duplex = cmd->duplex;
12144         }
12145
12146         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12147
12148         tg3_warn_mgmt_link_flap(tp);
12149
12150         if (netif_running(dev))
12151                 tg3_setup_phy(tp, true);
12152
12153         tg3_full_unlock(tp);
12154
12155         return 0;
12156 }
12157
12158 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12159 {
12160         struct tg3 *tp = netdev_priv(dev);
12161
12162         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12163         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12164         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12165         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12166 }
12167
12168 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12169 {
12170         struct tg3 *tp = netdev_priv(dev);
12171
12172         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12173                 wol->supported = WAKE_MAGIC;
12174         else
12175                 wol->supported = 0;
12176         wol->wolopts = 0;
12177         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12178                 wol->wolopts = WAKE_MAGIC;
12179         memset(&wol->sopass, 0, sizeof(wol->sopass));
12180 }
12181
12182 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12183 {
12184         struct tg3 *tp = netdev_priv(dev);
12185         struct device *dp = &tp->pdev->dev;
12186
12187         if (wol->wolopts & ~WAKE_MAGIC)
12188                 return -EINVAL;
12189         if ((wol->wolopts & WAKE_MAGIC) &&
12190             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12191                 return -EINVAL;
12192
12193         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12194
12195         if (device_may_wakeup(dp))
12196                 tg3_flag_set(tp, WOL_ENABLE);
12197         else
12198                 tg3_flag_clear(tp, WOL_ENABLE);
12199
12200         return 0;
12201 }
12202
12203 static u32 tg3_get_msglevel(struct net_device *dev)
12204 {
12205         struct tg3 *tp = netdev_priv(dev);
12206         return tp->msg_enable;
12207 }
12208
12209 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12210 {
12211         struct tg3 *tp = netdev_priv(dev);
12212         tp->msg_enable = value;
12213 }
12214
12215 static int tg3_nway_reset(struct net_device *dev)
12216 {
12217         struct tg3 *tp = netdev_priv(dev);
12218         int r;
12219
12220         if (!netif_running(dev))
12221                 return -EAGAIN;
12222
12223         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12224                 return -EINVAL;
12225
12226         tg3_warn_mgmt_link_flap(tp);
12227
12228         if (tg3_flag(tp, USE_PHYLIB)) {
12229                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12230                         return -EAGAIN;
12231                 r = phy_start_aneg(tp->mdio_bus->phy_map[tp->phy_addr]);
12232         } else {
12233                 u32 bmcr;
12234
12235                 spin_lock_bh(&tp->lock);
12236                 r = -EINVAL;
12237                 tg3_readphy(tp, MII_BMCR, &bmcr);
12238                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12239                     ((bmcr & BMCR_ANENABLE) ||
12240                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12241                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12242                                                    BMCR_ANENABLE);
12243                         r = 0;
12244                 }
12245                 spin_unlock_bh(&tp->lock);
12246         }
12247
12248         return r;
12249 }
12250
12251 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12252 {
12253         struct tg3 *tp = netdev_priv(dev);
12254
12255         ering->rx_max_pending = tp->rx_std_ring_mask;
12256         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12257                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12258         else
12259                 ering->rx_jumbo_max_pending = 0;
12260
12261         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12262
12263         ering->rx_pending = tp->rx_pending;
12264         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12265                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12266         else
12267                 ering->rx_jumbo_pending = 0;
12268
12269         ering->tx_pending = tp->napi[0].tx_pending;
12270 }
12271
12272 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12273 {
12274         struct tg3 *tp = netdev_priv(dev);
12275         int i, irq_sync = 0, err = 0;
12276
12277         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12278             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12279             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12280             (ering->tx_pending <= MAX_SKB_FRAGS) ||
12281             (tg3_flag(tp, TSO_BUG) &&
12282              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12283                 return -EINVAL;
12284
12285         if (netif_running(dev)) {
12286                 tg3_phy_stop(tp);
12287                 tg3_netif_stop(tp);
12288                 irq_sync = 1;
12289         }
12290
12291         tg3_full_lock(tp, irq_sync);
12292
12293         tp->rx_pending = ering->rx_pending;
12294
12295         if (tg3_flag(tp, MAX_RXPEND_64) &&
12296             tp->rx_pending > 63)
12297                 tp->rx_pending = 63;
12298         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12299
12300         for (i = 0; i < tp->irq_max; i++)
12301                 tp->napi[i].tx_pending = ering->tx_pending;
12302
12303         if (netif_running(dev)) {
12304                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12305                 err = tg3_restart_hw(tp, false);
12306                 if (!err)
12307                         tg3_netif_start(tp);
12308         }
12309
12310         tg3_full_unlock(tp);
12311
12312         if (irq_sync && !err)
12313                 tg3_phy_start(tp);
12314
12315         return err;
12316 }
12317
12318 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12319 {
12320         struct tg3 *tp = netdev_priv(dev);
12321
12322         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12323
12324         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12325                 epause->rx_pause = 1;
12326         else
12327                 epause->rx_pause = 0;
12328
12329         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12330                 epause->tx_pause = 1;
12331         else
12332                 epause->tx_pause = 0;
12333 }
12334
12335 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12336 {
12337         struct tg3 *tp = netdev_priv(dev);
12338         int err = 0;
12339
12340         if (tp->link_config.autoneg == AUTONEG_ENABLE)
12341                 tg3_warn_mgmt_link_flap(tp);
12342
12343         if (tg3_flag(tp, USE_PHYLIB)) {
12344                 u32 newadv;
12345                 struct phy_device *phydev;
12346
12347                 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
12348
12349                 if (!(phydev->supported & SUPPORTED_Pause) ||
12350                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12351                      (epause->rx_pause != epause->tx_pause)))
12352                         return -EINVAL;
12353
12354                 tp->link_config.flowctrl = 0;
12355                 if (epause->rx_pause) {
12356                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12357
12358                         if (epause->tx_pause) {
12359                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12360                                 newadv = ADVERTISED_Pause;
12361                         } else
12362                                 newadv = ADVERTISED_Pause |
12363                                          ADVERTISED_Asym_Pause;
12364                 } else if (epause->tx_pause) {
12365                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12366                         newadv = ADVERTISED_Asym_Pause;
12367                 } else
12368                         newadv = 0;
12369
12370                 if (epause->autoneg)
12371                         tg3_flag_set(tp, PAUSE_AUTONEG);
12372                 else
12373                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12374
12375                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12376                         u32 oldadv = phydev->advertising &
12377                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12378                         if (oldadv != newadv) {
12379                                 phydev->advertising &=
12380                                         ~(ADVERTISED_Pause |
12381                                           ADVERTISED_Asym_Pause);
12382                                 phydev->advertising |= newadv;
12383                                 if (phydev->autoneg) {
12384                                         /*
12385                                          * Always renegotiate the link to
12386                                          * inform our link partner of our
12387                                          * flow control settings, even if the
12388                                          * flow control is forced.  Let
12389                                          * tg3_adjust_link() do the final
12390                                          * flow control setup.
12391                                          */
12392                                         return phy_start_aneg(phydev);
12393                                 }
12394                         }
12395
12396                         if (!epause->autoneg)
12397                                 tg3_setup_flow_control(tp, 0, 0);
12398                 } else {
12399                         tp->link_config.advertising &=
12400                                         ~(ADVERTISED_Pause |
12401                                           ADVERTISED_Asym_Pause);
12402                         tp->link_config.advertising |= newadv;
12403                 }
12404         } else {
12405                 int irq_sync = 0;
12406
12407                 if (netif_running(dev)) {
12408                         tg3_netif_stop(tp);
12409                         irq_sync = 1;
12410                 }
12411
12412                 tg3_full_lock(tp, irq_sync);
12413
12414                 if (epause->autoneg)
12415                         tg3_flag_set(tp, PAUSE_AUTONEG);
12416                 else
12417                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12418                 if (epause->rx_pause)
12419                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12420                 else
12421                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12422                 if (epause->tx_pause)
12423                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12424                 else
12425                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12426
12427                 if (netif_running(dev)) {
12428                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12429                         err = tg3_restart_hw(tp, false);
12430                         if (!err)
12431                                 tg3_netif_start(tp);
12432                 }
12433
12434                 tg3_full_unlock(tp);
12435         }
12436
12437         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12438
12439         return err;
12440 }
12441
12442 static int tg3_get_sset_count(struct net_device *dev, int sset)
12443 {
12444         switch (sset) {
12445         case ETH_SS_TEST:
12446                 return TG3_NUM_TEST;
12447         case ETH_SS_STATS:
12448                 return TG3_NUM_STATS;
12449         default:
12450                 return -EOPNOTSUPP;
12451         }
12452 }
12453
12454 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12455                          u32 *rules __always_unused)
12456 {
12457         struct tg3 *tp = netdev_priv(dev);
12458
12459         if (!tg3_flag(tp, SUPPORT_MSIX))
12460                 return -EOPNOTSUPP;
12461
12462         switch (info->cmd) {
12463         case ETHTOOL_GRXRINGS:
12464                 if (netif_running(tp->dev))
12465                         info->data = tp->rxq_cnt;
12466                 else {
12467                         info->data = num_online_cpus();
12468                         if (info->data > TG3_RSS_MAX_NUM_QS)
12469                                 info->data = TG3_RSS_MAX_NUM_QS;
12470                 }
12471
12472                 /* The first interrupt vector only
12473                  * handles link interrupts.
12474                  */
12475                 info->data -= 1;
12476                 return 0;
12477
12478         default:
12479                 return -EOPNOTSUPP;
12480         }
12481 }
12482
12483 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12484 {
12485         u32 size = 0;
12486         struct tg3 *tp = netdev_priv(dev);
12487
12488         if (tg3_flag(tp, SUPPORT_MSIX))
12489                 size = TG3_RSS_INDIR_TBL_SIZE;
12490
12491         return size;
12492 }
12493
12494 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
12495 {
12496         struct tg3 *tp = netdev_priv(dev);
12497         int i;
12498
12499         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12500                 indir[i] = tp->rss_ind_tbl[i];
12501
12502         return 0;
12503 }
12504
12505 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
12506 {
12507         struct tg3 *tp = netdev_priv(dev);
12508         size_t i;
12509
12510         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12511                 tp->rss_ind_tbl[i] = indir[i];
12512
12513         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12514                 return 0;
12515
12516         /* It is legal to write the indirection
12517          * table while the device is running.
12518          */
12519         tg3_full_lock(tp, 0);
12520         tg3_rss_write_indir_tbl(tp);
12521         tg3_full_unlock(tp);
12522
12523         return 0;
12524 }
12525
12526 static void tg3_get_channels(struct net_device *dev,
12527                              struct ethtool_channels *channel)
12528 {
12529         struct tg3 *tp = netdev_priv(dev);
12530         u32 deflt_qs = netif_get_num_default_rss_queues();
12531
12532         channel->max_rx = tp->rxq_max;
12533         channel->max_tx = tp->txq_max;
12534
12535         if (netif_running(dev)) {
12536                 channel->rx_count = tp->rxq_cnt;
12537                 channel->tx_count = tp->txq_cnt;
12538         } else {
12539                 if (tp->rxq_req)
12540                         channel->rx_count = tp->rxq_req;
12541                 else
12542                         channel->rx_count = min(deflt_qs, tp->rxq_max);
12543
12544                 if (tp->txq_req)
12545                         channel->tx_count = tp->txq_req;
12546                 else
12547                         channel->tx_count = min(deflt_qs, tp->txq_max);
12548         }
12549 }
12550
12551 static int tg3_set_channels(struct net_device *dev,
12552                             struct ethtool_channels *channel)
12553 {
12554         struct tg3 *tp = netdev_priv(dev);
12555
12556         if (!tg3_flag(tp, SUPPORT_MSIX))
12557                 return -EOPNOTSUPP;
12558
12559         if (channel->rx_count > tp->rxq_max ||
12560             channel->tx_count > tp->txq_max)
12561                 return -EINVAL;
12562
12563         tp->rxq_req = channel->rx_count;
12564         tp->txq_req = channel->tx_count;
12565
12566         if (!netif_running(dev))
12567                 return 0;
12568
12569         tg3_stop(tp);
12570
12571         tg3_carrier_off(tp);
12572
12573         tg3_start(tp, true, false, false);
12574
12575         return 0;
12576 }
12577
12578 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12579 {
12580         switch (stringset) {
12581         case ETH_SS_STATS:
12582                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12583                 break;
12584         case ETH_SS_TEST:
12585                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12586                 break;
12587         default:
12588                 WARN_ON(1);     /* we need a WARN() */
12589                 break;
12590         }
12591 }
12592
12593 static int tg3_set_phys_id(struct net_device *dev,
12594                             enum ethtool_phys_id_state state)
12595 {
12596         struct tg3 *tp = netdev_priv(dev);
12597
12598         if (!netif_running(tp->dev))
12599                 return -EAGAIN;
12600
12601         switch (state) {
12602         case ETHTOOL_ID_ACTIVE:
12603                 return 1;       /* cycle on/off once per second */
12604
12605         case ETHTOOL_ID_ON:
12606                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12607                      LED_CTRL_1000MBPS_ON |
12608                      LED_CTRL_100MBPS_ON |
12609                      LED_CTRL_10MBPS_ON |
12610                      LED_CTRL_TRAFFIC_OVERRIDE |
12611                      LED_CTRL_TRAFFIC_BLINK |
12612                      LED_CTRL_TRAFFIC_LED);
12613                 break;
12614
12615         case ETHTOOL_ID_OFF:
12616                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12617                      LED_CTRL_TRAFFIC_OVERRIDE);
12618                 break;
12619
12620         case ETHTOOL_ID_INACTIVE:
12621                 tw32(MAC_LED_CTRL, tp->led_ctrl);
12622                 break;
12623         }
12624
12625         return 0;
12626 }
12627
12628 static void tg3_get_ethtool_stats(struct net_device *dev,
12629                                    struct ethtool_stats *estats, u64 *tmp_stats)
12630 {
12631         struct tg3 *tp = netdev_priv(dev);
12632
12633         if (tp->hw_stats)
12634                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12635         else
12636                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12637 }
12638
12639 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12640 {
12641         int i;
12642         __be32 *buf;
12643         u32 offset = 0, len = 0;
12644         u32 magic, val;
12645
12646         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12647                 return NULL;
12648
12649         if (magic == TG3_EEPROM_MAGIC) {
12650                 for (offset = TG3_NVM_DIR_START;
12651                      offset < TG3_NVM_DIR_END;
12652                      offset += TG3_NVM_DIRENT_SIZE) {
12653                         if (tg3_nvram_read(tp, offset, &val))
12654                                 return NULL;
12655
12656                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12657                             TG3_NVM_DIRTYPE_EXTVPD)
12658                                 break;
12659                 }
12660
12661                 if (offset != TG3_NVM_DIR_END) {
12662                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12663                         if (tg3_nvram_read(tp, offset + 4, &offset))
12664                                 return NULL;
12665
12666                         offset = tg3_nvram_logical_addr(tp, offset);
12667                 }
12668         }
12669
12670         if (!offset || !len) {
12671                 offset = TG3_NVM_VPD_OFF;
12672                 len = TG3_NVM_VPD_LEN;
12673         }
12674
12675         buf = kmalloc(len, GFP_KERNEL);
12676         if (buf == NULL)
12677                 return NULL;
12678
12679         if (magic == TG3_EEPROM_MAGIC) {
12680                 for (i = 0; i < len; i += 4) {
12681                         /* The data is in little-endian format in NVRAM.
12682                          * Use the big-endian read routines to preserve
12683                          * the byte order as it exists in NVRAM.
12684                          */
12685                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12686                                 goto error;
12687                 }
12688         } else {
12689                 u8 *ptr;
12690                 ssize_t cnt;
12691                 unsigned int pos = 0;
12692
12693                 ptr = (u8 *)&buf[0];
12694                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12695                         cnt = pci_read_vpd(tp->pdev, pos,
12696                                            len - pos, ptr);
12697                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
12698                                 cnt = 0;
12699                         else if (cnt < 0)
12700                                 goto error;
12701                 }
12702                 if (pos != len)
12703                         goto error;
12704         }
12705
12706         *vpdlen = len;
12707
12708         return buf;
12709
12710 error:
12711         kfree(buf);
12712         return NULL;
12713 }
12714
12715 #define NVRAM_TEST_SIZE 0x100
12716 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
12717 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
12718 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
12719 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
12720 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
12721 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
12722 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12723 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12724
12725 static int tg3_test_nvram(struct tg3 *tp)
12726 {
12727         u32 csum, magic, len;
12728         __be32 *buf;
12729         int i, j, k, err = 0, size;
12730
12731         if (tg3_flag(tp, NO_NVRAM))
12732                 return 0;
12733
12734         if (tg3_nvram_read(tp, 0, &magic) != 0)
12735                 return -EIO;
12736
12737         if (magic == TG3_EEPROM_MAGIC)
12738                 size = NVRAM_TEST_SIZE;
12739         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12740                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12741                     TG3_EEPROM_SB_FORMAT_1) {
12742                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12743                         case TG3_EEPROM_SB_REVISION_0:
12744                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12745                                 break;
12746                         case TG3_EEPROM_SB_REVISION_2:
12747                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12748                                 break;
12749                         case TG3_EEPROM_SB_REVISION_3:
12750                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12751                                 break;
12752                         case TG3_EEPROM_SB_REVISION_4:
12753                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12754                                 break;
12755                         case TG3_EEPROM_SB_REVISION_5:
12756                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12757                                 break;
12758                         case TG3_EEPROM_SB_REVISION_6:
12759                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12760                                 break;
12761                         default:
12762                                 return -EIO;
12763                         }
12764                 } else
12765                         return 0;
12766         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12767                 size = NVRAM_SELFBOOT_HW_SIZE;
12768         else
12769                 return -EIO;
12770
12771         buf = kmalloc(size, GFP_KERNEL);
12772         if (buf == NULL)
12773                 return -ENOMEM;
12774
12775         err = -EIO;
12776         for (i = 0, j = 0; i < size; i += 4, j++) {
12777                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12778                 if (err)
12779                         break;
12780         }
12781         if (i < size)
12782                 goto out;
12783
12784         /* Selfboot format */
12785         magic = be32_to_cpu(buf[0]);
12786         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12787             TG3_EEPROM_MAGIC_FW) {
12788                 u8 *buf8 = (u8 *) buf, csum8 = 0;
12789
12790                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12791                     TG3_EEPROM_SB_REVISION_2) {
12792                         /* For rev 2, the csum doesn't include the MBA. */
12793                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12794                                 csum8 += buf8[i];
12795                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12796                                 csum8 += buf8[i];
12797                 } else {
12798                         for (i = 0; i < size; i++)
12799                                 csum8 += buf8[i];
12800                 }
12801
12802                 if (csum8 == 0) {
12803                         err = 0;
12804                         goto out;
12805                 }
12806
12807                 err = -EIO;
12808                 goto out;
12809         }
12810
12811         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12812             TG3_EEPROM_MAGIC_HW) {
12813                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12814                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12815                 u8 *buf8 = (u8 *) buf;
12816
12817                 /* Separate the parity bits and the data bytes.  */
12818                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12819                         if ((i == 0) || (i == 8)) {
12820                                 int l;
12821                                 u8 msk;
12822
12823                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12824                                         parity[k++] = buf8[i] & msk;
12825                                 i++;
12826                         } else if (i == 16) {
12827                                 int l;
12828                                 u8 msk;
12829
12830                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12831                                         parity[k++] = buf8[i] & msk;
12832                                 i++;
12833
12834                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12835                                         parity[k++] = buf8[i] & msk;
12836                                 i++;
12837                         }
12838                         data[j++] = buf8[i];
12839                 }
12840
12841                 err = -EIO;
12842                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12843                         u8 hw8 = hweight8(data[i]);
12844
12845                         if ((hw8 & 0x1) && parity[i])
12846                                 goto out;
12847                         else if (!(hw8 & 0x1) && !parity[i])
12848                                 goto out;
12849                 }
12850                 err = 0;
12851                 goto out;
12852         }
12853
12854         err = -EIO;
12855
12856         /* Bootstrap checksum at offset 0x10 */
12857         csum = calc_crc((unsigned char *) buf, 0x10);
12858         if (csum != le32_to_cpu(buf[0x10/4]))
12859                 goto out;
12860
12861         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12862         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12863         if (csum != le32_to_cpu(buf[0xfc/4]))
12864                 goto out;
12865
12866         kfree(buf);
12867
12868         buf = tg3_vpd_readblock(tp, &len);
12869         if (!buf)
12870                 return -ENOMEM;
12871
12872         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12873         if (i > 0) {
12874                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12875                 if (j < 0)
12876                         goto out;
12877
12878                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12879                         goto out;
12880
12881                 i += PCI_VPD_LRDT_TAG_SIZE;
12882                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12883                                               PCI_VPD_RO_KEYWORD_CHKSUM);
12884                 if (j > 0) {
12885                         u8 csum8 = 0;
12886
12887                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
12888
12889                         for (i = 0; i <= j; i++)
12890                                 csum8 += ((u8 *)buf)[i];
12891
12892                         if (csum8)
12893                                 goto out;
12894                 }
12895         }
12896
12897         err = 0;
12898
12899 out:
12900         kfree(buf);
12901         return err;
12902 }
12903
12904 #define TG3_SERDES_TIMEOUT_SEC  2
12905 #define TG3_COPPER_TIMEOUT_SEC  6
12906
12907 static int tg3_test_link(struct tg3 *tp)
12908 {
12909         int i, max;
12910
12911         if (!netif_running(tp->dev))
12912                 return -ENODEV;
12913
12914         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12915                 max = TG3_SERDES_TIMEOUT_SEC;
12916         else
12917                 max = TG3_COPPER_TIMEOUT_SEC;
12918
12919         for (i = 0; i < max; i++) {
12920                 if (tp->link_up)
12921                         return 0;
12922
12923                 if (msleep_interruptible(1000))
12924                         break;
12925         }
12926
12927         return -EIO;
12928 }
12929
12930 /* Only test the commonly used registers */
12931 static int tg3_test_registers(struct tg3 *tp)
12932 {
12933         int i, is_5705, is_5750;
12934         u32 offset, read_mask, write_mask, val, save_val, read_val;
12935         static struct {
12936                 u16 offset;
12937                 u16 flags;
12938 #define TG3_FL_5705     0x1
12939 #define TG3_FL_NOT_5705 0x2
12940 #define TG3_FL_NOT_5788 0x4
12941 #define TG3_FL_NOT_5750 0x8
12942                 u32 read_mask;
12943                 u32 write_mask;
12944         } reg_tbl[] = {
12945                 /* MAC Control Registers */
12946                 { MAC_MODE, TG3_FL_NOT_5705,
12947                         0x00000000, 0x00ef6f8c },
12948                 { MAC_MODE, TG3_FL_5705,
12949                         0x00000000, 0x01ef6b8c },
12950                 { MAC_STATUS, TG3_FL_NOT_5705,
12951                         0x03800107, 0x00000000 },
12952                 { MAC_STATUS, TG3_FL_5705,
12953                         0x03800100, 0x00000000 },
12954                 { MAC_ADDR_0_HIGH, 0x0000,
12955                         0x00000000, 0x0000ffff },
12956                 { MAC_ADDR_0_LOW, 0x0000,
12957                         0x00000000, 0xffffffff },
12958                 { MAC_RX_MTU_SIZE, 0x0000,
12959                         0x00000000, 0x0000ffff },
12960                 { MAC_TX_MODE, 0x0000,
12961                         0x00000000, 0x00000070 },
12962                 { MAC_TX_LENGTHS, 0x0000,
12963                         0x00000000, 0x00003fff },
12964                 { MAC_RX_MODE, TG3_FL_NOT_5705,
12965                         0x00000000, 0x000007fc },
12966                 { MAC_RX_MODE, TG3_FL_5705,
12967                         0x00000000, 0x000007dc },
12968                 { MAC_HASH_REG_0, 0x0000,
12969                         0x00000000, 0xffffffff },
12970                 { MAC_HASH_REG_1, 0x0000,
12971                         0x00000000, 0xffffffff },
12972                 { MAC_HASH_REG_2, 0x0000,
12973                         0x00000000, 0xffffffff },
12974                 { MAC_HASH_REG_3, 0x0000,
12975                         0x00000000, 0xffffffff },
12976
12977                 /* Receive Data and Receive BD Initiator Control Registers. */
12978                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12979                         0x00000000, 0xffffffff },
12980                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12981                         0x00000000, 0xffffffff },
12982                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12983                         0x00000000, 0x00000003 },
12984                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12985                         0x00000000, 0xffffffff },
12986                 { RCVDBDI_STD_BD+0, 0x0000,
12987                         0x00000000, 0xffffffff },
12988                 { RCVDBDI_STD_BD+4, 0x0000,
12989                         0x00000000, 0xffffffff },
12990                 { RCVDBDI_STD_BD+8, 0x0000,
12991                         0x00000000, 0xffff0002 },
12992                 { RCVDBDI_STD_BD+0xc, 0x0000,
12993                         0x00000000, 0xffffffff },
12994
12995                 /* Receive BD Initiator Control Registers. */
12996                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12997                         0x00000000, 0xffffffff },
12998                 { RCVBDI_STD_THRESH, TG3_FL_5705,
12999                         0x00000000, 0x000003ff },
13000                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13001                         0x00000000, 0xffffffff },
13002
13003                 /* Host Coalescing Control Registers. */
13004                 { HOSTCC_MODE, TG3_FL_NOT_5705,
13005                         0x00000000, 0x00000004 },
13006                 { HOSTCC_MODE, TG3_FL_5705,
13007                         0x00000000, 0x000000f6 },
13008                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13009                         0x00000000, 0xffffffff },
13010                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13011                         0x00000000, 0x000003ff },
13012                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13013                         0x00000000, 0xffffffff },
13014                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13015                         0x00000000, 0x000003ff },
13016                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13017                         0x00000000, 0xffffffff },
13018                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13019                         0x00000000, 0x000000ff },
13020                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13021                         0x00000000, 0xffffffff },
13022                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13023                         0x00000000, 0x000000ff },
13024                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13025                         0x00000000, 0xffffffff },
13026                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13027                         0x00000000, 0xffffffff },
13028                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13029                         0x00000000, 0xffffffff },
13030                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13031                         0x00000000, 0x000000ff },
13032                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13033                         0x00000000, 0xffffffff },
13034                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13035                         0x00000000, 0x000000ff },
13036                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13037                         0x00000000, 0xffffffff },
13038                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13039                         0x00000000, 0xffffffff },
13040                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13041                         0x00000000, 0xffffffff },
13042                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13043                         0x00000000, 0xffffffff },
13044                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13045                         0x00000000, 0xffffffff },
13046                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13047                         0xffffffff, 0x00000000 },
13048                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13049                         0xffffffff, 0x00000000 },
13050
13051                 /* Buffer Manager Control Registers. */
13052                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13053                         0x00000000, 0x007fff80 },
13054                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13055                         0x00000000, 0x007fffff },
13056                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13057                         0x00000000, 0x0000003f },
13058                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13059                         0x00000000, 0x000001ff },
13060                 { BUFMGR_MB_HIGH_WATER, 0x0000,
13061                         0x00000000, 0x000001ff },
13062                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13063                         0xffffffff, 0x00000000 },
13064                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13065                         0xffffffff, 0x00000000 },
13066
13067                 /* Mailbox Registers */
13068                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13069                         0x00000000, 0x000001ff },
13070                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13071                         0x00000000, 0x000001ff },
13072                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13073                         0x00000000, 0x000007ff },
13074                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13075                         0x00000000, 0x000001ff },
13076
13077                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13078         };
13079
13080         is_5705 = is_5750 = 0;
13081         if (tg3_flag(tp, 5705_PLUS)) {
13082                 is_5705 = 1;
13083                 if (tg3_flag(tp, 5750_PLUS))
13084                         is_5750 = 1;
13085         }
13086
13087         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13088                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13089                         continue;
13090
13091                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13092                         continue;
13093
13094                 if (tg3_flag(tp, IS_5788) &&
13095                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
13096                         continue;
13097
13098                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13099                         continue;
13100
13101                 offset = (u32) reg_tbl[i].offset;
13102                 read_mask = reg_tbl[i].read_mask;
13103                 write_mask = reg_tbl[i].write_mask;
13104
13105                 /* Save the original register content */
13106                 save_val = tr32(offset);
13107
13108                 /* Determine the read-only value. */
13109                 read_val = save_val & read_mask;
13110
13111                 /* Write zero to the register, then make sure the read-only bits
13112                  * are not changed and the read/write bits are all zeros.
13113                  */
13114                 tw32(offset, 0);
13115
13116                 val = tr32(offset);
13117
13118                 /* Test the read-only and read/write bits. */
13119                 if (((val & read_mask) != read_val) || (val & write_mask))
13120                         goto out;
13121
13122                 /* Write ones to all the bits defined by RdMask and WrMask, then
13123                  * make sure the read-only bits are not changed and the
13124                  * read/write bits are all ones.
13125                  */
13126                 tw32(offset, read_mask | write_mask);
13127
13128                 val = tr32(offset);
13129
13130                 /* Test the read-only bits. */
13131                 if ((val & read_mask) != read_val)
13132                         goto out;
13133
13134                 /* Test the read/write bits. */
13135                 if ((val & write_mask) != write_mask)
13136                         goto out;
13137
13138                 tw32(offset, save_val);
13139         }
13140
13141         return 0;
13142
13143 out:
13144         if (netif_msg_hw(tp))
13145                 netdev_err(tp->dev,
13146                            "Register test failed at offset %x\n", offset);
13147         tw32(offset, save_val);
13148         return -EIO;
13149 }
13150
13151 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13152 {
13153         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13154         int i;
13155         u32 j;
13156
13157         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13158                 for (j = 0; j < len; j += 4) {
13159                         u32 val;
13160
13161                         tg3_write_mem(tp, offset + j, test_pattern[i]);
13162                         tg3_read_mem(tp, offset + j, &val);
13163                         if (val != test_pattern[i])
13164                                 return -EIO;
13165                 }
13166         }
13167         return 0;
13168 }
13169
13170 static int tg3_test_memory(struct tg3 *tp)
13171 {
13172         static struct mem_entry {
13173                 u32 offset;
13174                 u32 len;
13175         } mem_tbl_570x[] = {
13176                 { 0x00000000, 0x00b50},
13177                 { 0x00002000, 0x1c000},
13178                 { 0xffffffff, 0x00000}
13179         }, mem_tbl_5705[] = {
13180                 { 0x00000100, 0x0000c},
13181                 { 0x00000200, 0x00008},
13182                 { 0x00004000, 0x00800},
13183                 { 0x00006000, 0x01000},
13184                 { 0x00008000, 0x02000},
13185                 { 0x00010000, 0x0e000},
13186                 { 0xffffffff, 0x00000}
13187         }, mem_tbl_5755[] = {
13188                 { 0x00000200, 0x00008},
13189                 { 0x00004000, 0x00800},
13190                 { 0x00006000, 0x00800},
13191                 { 0x00008000, 0x02000},
13192                 { 0x00010000, 0x0c000},
13193                 { 0xffffffff, 0x00000}
13194         }, mem_tbl_5906[] = {
13195                 { 0x00000200, 0x00008},
13196                 { 0x00004000, 0x00400},
13197                 { 0x00006000, 0x00400},
13198                 { 0x00008000, 0x01000},
13199                 { 0x00010000, 0x01000},
13200                 { 0xffffffff, 0x00000}
13201         }, mem_tbl_5717[] = {
13202                 { 0x00000200, 0x00008},
13203                 { 0x00010000, 0x0a000},
13204                 { 0x00020000, 0x13c00},
13205                 { 0xffffffff, 0x00000}
13206         }, mem_tbl_57765[] = {
13207                 { 0x00000200, 0x00008},
13208                 { 0x00004000, 0x00800},
13209                 { 0x00006000, 0x09800},
13210                 { 0x00010000, 0x0a000},
13211                 { 0xffffffff, 0x00000}
13212         };
13213         struct mem_entry *mem_tbl;
13214         int err = 0;
13215         int i;
13216
13217         if (tg3_flag(tp, 5717_PLUS))
13218                 mem_tbl = mem_tbl_5717;
13219         else if (tg3_flag(tp, 57765_CLASS) ||
13220                  tg3_asic_rev(tp) == ASIC_REV_5762)
13221                 mem_tbl = mem_tbl_57765;
13222         else if (tg3_flag(tp, 5755_PLUS))
13223                 mem_tbl = mem_tbl_5755;
13224         else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13225                 mem_tbl = mem_tbl_5906;
13226         else if (tg3_flag(tp, 5705_PLUS))
13227                 mem_tbl = mem_tbl_5705;
13228         else
13229                 mem_tbl = mem_tbl_570x;
13230
13231         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13232                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13233                 if (err)
13234                         break;
13235         }
13236
13237         return err;
13238 }
13239
13240 #define TG3_TSO_MSS             500
13241
13242 #define TG3_TSO_IP_HDR_LEN      20
13243 #define TG3_TSO_TCP_HDR_LEN     20
13244 #define TG3_TSO_TCP_OPT_LEN     12
13245
13246 static const u8 tg3_tso_header[] = {
13247 0x08, 0x00,
13248 0x45, 0x00, 0x00, 0x00,
13249 0x00, 0x00, 0x40, 0x00,
13250 0x40, 0x06, 0x00, 0x00,
13251 0x0a, 0x00, 0x00, 0x01,
13252 0x0a, 0x00, 0x00, 0x02,
13253 0x0d, 0x00, 0xe0, 0x00,
13254 0x00, 0x00, 0x01, 0x00,
13255 0x00, 0x00, 0x02, 0x00,
13256 0x80, 0x10, 0x10, 0x00,
13257 0x14, 0x09, 0x00, 0x00,
13258 0x01, 0x01, 0x08, 0x0a,
13259 0x11, 0x11, 0x11, 0x11,
13260 0x11, 0x11, 0x11, 0x11,
13261 };
13262
13263 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13264 {
13265         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13266         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13267         u32 budget;
13268         struct sk_buff *skb;
13269         u8 *tx_data, *rx_data;
13270         dma_addr_t map;
13271         int num_pkts, tx_len, rx_len, i, err;
13272         struct tg3_rx_buffer_desc *desc;
13273         struct tg3_napi *tnapi, *rnapi;
13274         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13275
13276         tnapi = &tp->napi[0];
13277         rnapi = &tp->napi[0];
13278         if (tp->irq_cnt > 1) {
13279                 if (tg3_flag(tp, ENABLE_RSS))
13280                         rnapi = &tp->napi[1];
13281                 if (tg3_flag(tp, ENABLE_TSS))
13282                         tnapi = &tp->napi[1];
13283         }
13284         coal_now = tnapi->coal_now | rnapi->coal_now;
13285
13286         err = -EIO;
13287
13288         tx_len = pktsz;
13289         skb = netdev_alloc_skb(tp->dev, tx_len);
13290         if (!skb)
13291                 return -ENOMEM;
13292
13293         tx_data = skb_put(skb, tx_len);
13294         memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13295         memset(tx_data + ETH_ALEN, 0x0, 8);
13296
13297         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13298
13299         if (tso_loopback) {
13300                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13301
13302                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13303                               TG3_TSO_TCP_OPT_LEN;
13304
13305                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13306                        sizeof(tg3_tso_header));
13307                 mss = TG3_TSO_MSS;
13308
13309                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13310                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13311
13312                 /* Set the total length field in the IP header */
13313                 iph->tot_len = htons((u16)(mss + hdr_len));
13314
13315                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13316                               TXD_FLAG_CPU_POST_DMA);
13317
13318                 if (tg3_flag(tp, HW_TSO_1) ||
13319                     tg3_flag(tp, HW_TSO_2) ||
13320                     tg3_flag(tp, HW_TSO_3)) {
13321                         struct tcphdr *th;
13322                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13323                         th = (struct tcphdr *)&tx_data[val];
13324                         th->check = 0;
13325                 } else
13326                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
13327
13328                 if (tg3_flag(tp, HW_TSO_3)) {
13329                         mss |= (hdr_len & 0xc) << 12;
13330                         if (hdr_len & 0x10)
13331                                 base_flags |= 0x00000010;
13332                         base_flags |= (hdr_len & 0x3e0) << 5;
13333                 } else if (tg3_flag(tp, HW_TSO_2))
13334                         mss |= hdr_len << 9;
13335                 else if (tg3_flag(tp, HW_TSO_1) ||
13336                          tg3_asic_rev(tp) == ASIC_REV_5705) {
13337                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13338                 } else {
13339                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13340                 }
13341
13342                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13343         } else {
13344                 num_pkts = 1;
13345                 data_off = ETH_HLEN;
13346
13347                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13348                     tx_len > VLAN_ETH_FRAME_LEN)
13349                         base_flags |= TXD_FLAG_JMB_PKT;
13350         }
13351
13352         for (i = data_off; i < tx_len; i++)
13353                 tx_data[i] = (u8) (i & 0xff);
13354
13355         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13356         if (pci_dma_mapping_error(tp->pdev, map)) {
13357                 dev_kfree_skb(skb);
13358                 return -EIO;
13359         }
13360
13361         val = tnapi->tx_prod;
13362         tnapi->tx_buffers[val].skb = skb;
13363         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13364
13365         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13366                rnapi->coal_now);
13367
13368         udelay(10);
13369
13370         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13371
13372         budget = tg3_tx_avail(tnapi);
13373         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13374                             base_flags | TXD_FLAG_END, mss, 0)) {
13375                 tnapi->tx_buffers[val].skb = NULL;
13376                 dev_kfree_skb(skb);
13377                 return -EIO;
13378         }
13379
13380         tnapi->tx_prod++;
13381
13382         /* Sync BD data before updating mailbox */
13383         wmb();
13384
13385         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13386         tr32_mailbox(tnapi->prodmbox);
13387
13388         udelay(10);
13389
13390         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13391         for (i = 0; i < 35; i++) {
13392                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13393                        coal_now);
13394
13395                 udelay(10);
13396
13397                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13398                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13399                 if ((tx_idx == tnapi->tx_prod) &&
13400                     (rx_idx == (rx_start_idx + num_pkts)))
13401                         break;
13402         }
13403
13404         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13405         dev_kfree_skb(skb);
13406
13407         if (tx_idx != tnapi->tx_prod)
13408                 goto out;
13409
13410         if (rx_idx != rx_start_idx + num_pkts)
13411                 goto out;
13412
13413         val = data_off;
13414         while (rx_idx != rx_start_idx) {
13415                 desc = &rnapi->rx_rcb[rx_start_idx++];
13416                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13417                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13418
13419                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13420                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13421                         goto out;
13422
13423                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13424                          - ETH_FCS_LEN;
13425
13426                 if (!tso_loopback) {
13427                         if (rx_len != tx_len)
13428                                 goto out;
13429
13430                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13431                                 if (opaque_key != RXD_OPAQUE_RING_STD)
13432                                         goto out;
13433                         } else {
13434                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13435                                         goto out;
13436                         }
13437                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13438                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13439                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
13440                         goto out;
13441                 }
13442
13443                 if (opaque_key == RXD_OPAQUE_RING_STD) {
13444                         rx_data = tpr->rx_std_buffers[desc_idx].data;
13445                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13446                                              mapping);
13447                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13448                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13449                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13450                                              mapping);
13451                 } else
13452                         goto out;
13453
13454                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13455                                             PCI_DMA_FROMDEVICE);
13456
13457                 rx_data += TG3_RX_OFFSET(tp);
13458                 for (i = data_off; i < rx_len; i++, val++) {
13459                         if (*(rx_data + i) != (u8) (val & 0xff))
13460                                 goto out;
13461                 }
13462         }
13463
13464         err = 0;
13465
13466         /* tg3_free_rings will unmap and free the rx_data */
13467 out:
13468         return err;
13469 }
13470
13471 #define TG3_STD_LOOPBACK_FAILED         1
13472 #define TG3_JMB_LOOPBACK_FAILED         2
13473 #define TG3_TSO_LOOPBACK_FAILED         4
13474 #define TG3_LOOPBACK_FAILED \
13475         (TG3_STD_LOOPBACK_FAILED | \
13476          TG3_JMB_LOOPBACK_FAILED | \
13477          TG3_TSO_LOOPBACK_FAILED)
13478
13479 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13480 {
13481         int err = -EIO;
13482         u32 eee_cap;
13483         u32 jmb_pkt_sz = 9000;
13484
13485         if (tp->dma_limit)
13486                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13487
13488         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13489         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13490
13491         if (!netif_running(tp->dev)) {
13492                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13493                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13494                 if (do_extlpbk)
13495                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13496                 goto done;
13497         }
13498
13499         err = tg3_reset_hw(tp, true);
13500         if (err) {
13501                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13502                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13503                 if (do_extlpbk)
13504                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13505                 goto done;
13506         }
13507
13508         if (tg3_flag(tp, ENABLE_RSS)) {
13509                 int i;
13510
13511                 /* Reroute all rx packets to the 1st queue */
13512                 for (i = MAC_RSS_INDIR_TBL_0;
13513                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13514                         tw32(i, 0x0);
13515         }
13516
13517         /* HW errata - mac loopback fails in some cases on 5780.
13518          * Normal traffic and PHY loopback are not affected by
13519          * errata.  Also, the MAC loopback test is deprecated for
13520          * all newer ASIC revisions.
13521          */
13522         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13523             !tg3_flag(tp, CPMU_PRESENT)) {
13524                 tg3_mac_loopback(tp, true);
13525
13526                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13527                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13528
13529                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13530                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13531                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13532
13533                 tg3_mac_loopback(tp, false);
13534         }
13535
13536         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13537             !tg3_flag(tp, USE_PHYLIB)) {
13538                 int i;
13539
13540                 tg3_phy_lpbk_set(tp, 0, false);
13541
13542                 /* Wait for link */
13543                 for (i = 0; i < 100; i++) {
13544                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13545                                 break;
13546                         mdelay(1);
13547                 }
13548
13549                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13550                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13551                 if (tg3_flag(tp, TSO_CAPABLE) &&
13552                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13553                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13554                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13555                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13556                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13557
13558                 if (do_extlpbk) {
13559                         tg3_phy_lpbk_set(tp, 0, true);
13560
13561                         /* All link indications report up, but the hardware
13562                          * isn't really ready for about 20 msec.  Double it
13563                          * to be sure.
13564                          */
13565                         mdelay(40);
13566
13567                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13568                                 data[TG3_EXT_LOOPB_TEST] |=
13569                                                         TG3_STD_LOOPBACK_FAILED;
13570                         if (tg3_flag(tp, TSO_CAPABLE) &&
13571                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13572                                 data[TG3_EXT_LOOPB_TEST] |=
13573                                                         TG3_TSO_LOOPBACK_FAILED;
13574                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13575                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13576                                 data[TG3_EXT_LOOPB_TEST] |=
13577                                                         TG3_JMB_LOOPBACK_FAILED;
13578                 }
13579
13580                 /* Re-enable gphy autopowerdown. */
13581                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13582                         tg3_phy_toggle_apd(tp, true);
13583         }
13584
13585         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13586                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13587
13588 done:
13589         tp->phy_flags |= eee_cap;
13590
13591         return err;
13592 }
13593
13594 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13595                           u64 *data)
13596 {
13597         struct tg3 *tp = netdev_priv(dev);
13598         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13599
13600         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13601                 if (tg3_power_up(tp)) {
13602                         etest->flags |= ETH_TEST_FL_FAILED;
13603                         memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13604                         return;
13605                 }
13606                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13607         }
13608
13609         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13610
13611         if (tg3_test_nvram(tp) != 0) {
13612                 etest->flags |= ETH_TEST_FL_FAILED;
13613                 data[TG3_NVRAM_TEST] = 1;
13614         }
13615         if (!doextlpbk && tg3_test_link(tp)) {
13616                 etest->flags |= ETH_TEST_FL_FAILED;
13617                 data[TG3_LINK_TEST] = 1;
13618         }
13619         if (etest->flags & ETH_TEST_FL_OFFLINE) {
13620                 int err, err2 = 0, irq_sync = 0;
13621
13622                 if (netif_running(dev)) {
13623                         tg3_phy_stop(tp);
13624                         tg3_netif_stop(tp);
13625                         irq_sync = 1;
13626                 }
13627
13628                 tg3_full_lock(tp, irq_sync);
13629                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13630                 err = tg3_nvram_lock(tp);
13631                 tg3_halt_cpu(tp, RX_CPU_BASE);
13632                 if (!tg3_flag(tp, 5705_PLUS))
13633                         tg3_halt_cpu(tp, TX_CPU_BASE);
13634                 if (!err)
13635                         tg3_nvram_unlock(tp);
13636
13637                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13638                         tg3_phy_reset(tp);
13639
13640                 if (tg3_test_registers(tp) != 0) {
13641                         etest->flags |= ETH_TEST_FL_FAILED;
13642                         data[TG3_REGISTER_TEST] = 1;
13643                 }
13644
13645                 if (tg3_test_memory(tp) != 0) {
13646                         etest->flags |= ETH_TEST_FL_FAILED;
13647                         data[TG3_MEMORY_TEST] = 1;
13648                 }
13649
13650                 if (doextlpbk)
13651                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13652
13653                 if (tg3_test_loopback(tp, data, doextlpbk))
13654                         etest->flags |= ETH_TEST_FL_FAILED;
13655
13656                 tg3_full_unlock(tp);
13657
13658                 if (tg3_test_interrupt(tp) != 0) {
13659                         etest->flags |= ETH_TEST_FL_FAILED;
13660                         data[TG3_INTERRUPT_TEST] = 1;
13661                 }
13662
13663                 tg3_full_lock(tp, 0);
13664
13665                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13666                 if (netif_running(dev)) {
13667                         tg3_flag_set(tp, INIT_COMPLETE);
13668                         err2 = tg3_restart_hw(tp, true);
13669                         if (!err2)
13670                                 tg3_netif_start(tp);
13671                 }
13672
13673                 tg3_full_unlock(tp);
13674
13675                 if (irq_sync && !err2)
13676                         tg3_phy_start(tp);
13677         }
13678         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13679                 tg3_power_down_prepare(tp);
13680
13681 }
13682
13683 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13684 {
13685         struct tg3 *tp = netdev_priv(dev);
13686         struct hwtstamp_config stmpconf;
13687
13688         if (!tg3_flag(tp, PTP_CAPABLE))
13689                 return -EOPNOTSUPP;
13690
13691         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13692                 return -EFAULT;
13693
13694         if (stmpconf.flags)
13695                 return -EINVAL;
13696
13697         if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13698             stmpconf.tx_type != HWTSTAMP_TX_OFF)
13699                 return -ERANGE;
13700
13701         switch (stmpconf.rx_filter) {
13702         case HWTSTAMP_FILTER_NONE:
13703                 tp->rxptpctl = 0;
13704                 break;
13705         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13706                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13707                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13708                 break;
13709         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13710                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13711                                TG3_RX_PTP_CTL_SYNC_EVNT;
13712                 break;
13713         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13714                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13715                                TG3_RX_PTP_CTL_DELAY_REQ;
13716                 break;
13717         case HWTSTAMP_FILTER_PTP_V2_EVENT:
13718                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13719                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13720                 break;
13721         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13722                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13723                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13724                 break;
13725         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13726                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13727                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13728                 break;
13729         case HWTSTAMP_FILTER_PTP_V2_SYNC:
13730                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13731                                TG3_RX_PTP_CTL_SYNC_EVNT;
13732                 break;
13733         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13734                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13735                                TG3_RX_PTP_CTL_SYNC_EVNT;
13736                 break;
13737         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13738                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13739                                TG3_RX_PTP_CTL_SYNC_EVNT;
13740                 break;
13741         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13742                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13743                                TG3_RX_PTP_CTL_DELAY_REQ;
13744                 break;
13745         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13746                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13747                                TG3_RX_PTP_CTL_DELAY_REQ;
13748                 break;
13749         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13750                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13751                                TG3_RX_PTP_CTL_DELAY_REQ;
13752                 break;
13753         default:
13754                 return -ERANGE;
13755         }
13756
13757         if (netif_running(dev) && tp->rxptpctl)
13758                 tw32(TG3_RX_PTP_CTL,
13759                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13760
13761         if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13762                 tg3_flag_set(tp, TX_TSTAMP_EN);
13763         else
13764                 tg3_flag_clear(tp, TX_TSTAMP_EN);
13765
13766         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13767                 -EFAULT : 0;
13768 }
13769
13770 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13771 {
13772         struct tg3 *tp = netdev_priv(dev);
13773         struct hwtstamp_config stmpconf;
13774
13775         if (!tg3_flag(tp, PTP_CAPABLE))
13776                 return -EOPNOTSUPP;
13777
13778         stmpconf.flags = 0;
13779         stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13780                             HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13781
13782         switch (tp->rxptpctl) {
13783         case 0:
13784                 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13785                 break;
13786         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13787                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13788                 break;
13789         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13790                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13791                 break;
13792         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13793                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13794                 break;
13795         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13796                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13797                 break;
13798         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13799                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13800                 break;
13801         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13802                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13803                 break;
13804         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13805                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13806                 break;
13807         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13808                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13809                 break;
13810         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13811                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13812                 break;
13813         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13814                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13815                 break;
13816         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13817                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13818                 break;
13819         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13820                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13821                 break;
13822         default:
13823                 WARN_ON_ONCE(1);
13824                 return -ERANGE;
13825         }
13826
13827         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13828                 -EFAULT : 0;
13829 }
13830
13831 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13832 {
13833         struct mii_ioctl_data *data = if_mii(ifr);
13834         struct tg3 *tp = netdev_priv(dev);
13835         int err;
13836
13837         if (tg3_flag(tp, USE_PHYLIB)) {
13838                 struct phy_device *phydev;
13839                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13840                         return -EAGAIN;
13841                 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
13842                 return phy_mii_ioctl(phydev, ifr, cmd);
13843         }
13844
13845         switch (cmd) {
13846         case SIOCGMIIPHY:
13847                 data->phy_id = tp->phy_addr;
13848
13849                 /* fallthru */
13850         case SIOCGMIIREG: {
13851                 u32 mii_regval;
13852
13853                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13854                         break;                  /* We have no PHY */
13855
13856                 if (!netif_running(dev))
13857                         return -EAGAIN;
13858
13859                 spin_lock_bh(&tp->lock);
13860                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13861                                     data->reg_num & 0x1f, &mii_regval);
13862                 spin_unlock_bh(&tp->lock);
13863
13864                 data->val_out = mii_regval;
13865
13866                 return err;
13867         }
13868
13869         case SIOCSMIIREG:
13870                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13871                         break;                  /* We have no PHY */
13872
13873                 if (!netif_running(dev))
13874                         return -EAGAIN;
13875
13876                 spin_lock_bh(&tp->lock);
13877                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13878                                      data->reg_num & 0x1f, data->val_in);
13879                 spin_unlock_bh(&tp->lock);
13880
13881                 return err;
13882
13883         case SIOCSHWTSTAMP:
13884                 return tg3_hwtstamp_set(dev, ifr);
13885
13886         case SIOCGHWTSTAMP:
13887                 return tg3_hwtstamp_get(dev, ifr);
13888
13889         default:
13890                 /* do nothing */
13891                 break;
13892         }
13893         return -EOPNOTSUPP;
13894 }
13895
13896 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13897 {
13898         struct tg3 *tp = netdev_priv(dev);
13899
13900         memcpy(ec, &tp->coal, sizeof(*ec));
13901         return 0;
13902 }
13903
13904 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13905 {
13906         struct tg3 *tp = netdev_priv(dev);
13907         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13908         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13909
13910         if (!tg3_flag(tp, 5705_PLUS)) {
13911                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13912                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13913                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13914                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13915         }
13916
13917         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13918             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13919             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13920             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13921             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13922             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13923             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13924             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13925             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13926             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13927                 return -EINVAL;
13928
13929         /* No rx interrupts will be generated if both are zero */
13930         if ((ec->rx_coalesce_usecs == 0) &&
13931             (ec->rx_max_coalesced_frames == 0))
13932                 return -EINVAL;
13933
13934         /* No tx interrupts will be generated if both are zero */
13935         if ((ec->tx_coalesce_usecs == 0) &&
13936             (ec->tx_max_coalesced_frames == 0))
13937                 return -EINVAL;
13938
13939         /* Only copy relevant parameters, ignore all others. */
13940         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13941         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13942         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13943         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13944         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13945         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13946         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13947         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13948         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13949
13950         if (netif_running(dev)) {
13951                 tg3_full_lock(tp, 0);
13952                 __tg3_set_coalesce(tp, &tp->coal);
13953                 tg3_full_unlock(tp);
13954         }
13955         return 0;
13956 }
13957
13958 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
13959 {
13960         struct tg3 *tp = netdev_priv(dev);
13961
13962         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13963                 netdev_warn(tp->dev, "Board does not support EEE!\n");
13964                 return -EOPNOTSUPP;
13965         }
13966
13967         if (edata->advertised != tp->eee.advertised) {
13968                 netdev_warn(tp->dev,
13969                             "Direct manipulation of EEE advertisement is not supported\n");
13970                 return -EINVAL;
13971         }
13972
13973         if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
13974                 netdev_warn(tp->dev,
13975                             "Maximal Tx Lpi timer supported is %#x(u)\n",
13976                             TG3_CPMU_DBTMR1_LNKIDLE_MAX);
13977                 return -EINVAL;
13978         }
13979
13980         tp->eee = *edata;
13981
13982         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
13983         tg3_warn_mgmt_link_flap(tp);
13984
13985         if (netif_running(tp->dev)) {
13986                 tg3_full_lock(tp, 0);
13987                 tg3_setup_eee(tp);
13988                 tg3_phy_reset(tp);
13989                 tg3_full_unlock(tp);
13990         }
13991
13992         return 0;
13993 }
13994
13995 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
13996 {
13997         struct tg3 *tp = netdev_priv(dev);
13998
13999         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14000                 netdev_warn(tp->dev,
14001                             "Board does not support EEE!\n");
14002                 return -EOPNOTSUPP;
14003         }
14004
14005         *edata = tp->eee;
14006         return 0;
14007 }
14008
14009 static const struct ethtool_ops tg3_ethtool_ops = {
14010         .get_settings           = tg3_get_settings,
14011         .set_settings           = tg3_set_settings,
14012         .get_drvinfo            = tg3_get_drvinfo,
14013         .get_regs_len           = tg3_get_regs_len,
14014         .get_regs               = tg3_get_regs,
14015         .get_wol                = tg3_get_wol,
14016         .set_wol                = tg3_set_wol,
14017         .get_msglevel           = tg3_get_msglevel,
14018         .set_msglevel           = tg3_set_msglevel,
14019         .nway_reset             = tg3_nway_reset,
14020         .get_link               = ethtool_op_get_link,
14021         .get_eeprom_len         = tg3_get_eeprom_len,
14022         .get_eeprom             = tg3_get_eeprom,
14023         .set_eeprom             = tg3_set_eeprom,
14024         .get_ringparam          = tg3_get_ringparam,
14025         .set_ringparam          = tg3_set_ringparam,
14026         .get_pauseparam         = tg3_get_pauseparam,
14027         .set_pauseparam         = tg3_set_pauseparam,
14028         .self_test              = tg3_self_test,
14029         .get_strings            = tg3_get_strings,
14030         .set_phys_id            = tg3_set_phys_id,
14031         .get_ethtool_stats      = tg3_get_ethtool_stats,
14032         .get_coalesce           = tg3_get_coalesce,
14033         .set_coalesce           = tg3_set_coalesce,
14034         .get_sset_count         = tg3_get_sset_count,
14035         .get_rxnfc              = tg3_get_rxnfc,
14036         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
14037         .get_rxfh_indir         = tg3_get_rxfh_indir,
14038         .set_rxfh_indir         = tg3_set_rxfh_indir,
14039         .get_channels           = tg3_get_channels,
14040         .set_channels           = tg3_set_channels,
14041         .get_ts_info            = tg3_get_ts_info,
14042         .get_eee                = tg3_get_eee,
14043         .set_eee                = tg3_set_eee,
14044 };
14045
14046 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
14047                                                 struct rtnl_link_stats64 *stats)
14048 {
14049         struct tg3 *tp = netdev_priv(dev);
14050
14051         spin_lock_bh(&tp->lock);
14052         if (!tp->hw_stats) {
14053                 spin_unlock_bh(&tp->lock);
14054                 return &tp->net_stats_prev;
14055         }
14056
14057         tg3_get_nstats(tp, stats);
14058         spin_unlock_bh(&tp->lock);
14059
14060         return stats;
14061 }
14062
14063 static void tg3_set_rx_mode(struct net_device *dev)
14064 {
14065         struct tg3 *tp = netdev_priv(dev);
14066
14067         if (!netif_running(dev))
14068                 return;
14069
14070         tg3_full_lock(tp, 0);
14071         __tg3_set_rx_mode(dev);
14072         tg3_full_unlock(tp);
14073 }
14074
14075 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14076                                int new_mtu)
14077 {
14078         dev->mtu = new_mtu;
14079
14080         if (new_mtu > ETH_DATA_LEN) {
14081                 if (tg3_flag(tp, 5780_CLASS)) {
14082                         netdev_update_features(dev);
14083                         tg3_flag_clear(tp, TSO_CAPABLE);
14084                 } else {
14085                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
14086                 }
14087         } else {
14088                 if (tg3_flag(tp, 5780_CLASS)) {
14089                         tg3_flag_set(tp, TSO_CAPABLE);
14090                         netdev_update_features(dev);
14091                 }
14092                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14093         }
14094 }
14095
14096 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14097 {
14098         struct tg3 *tp = netdev_priv(dev);
14099         int err;
14100         bool reset_phy = false;
14101
14102         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
14103                 return -EINVAL;
14104
14105         if (!netif_running(dev)) {
14106                 /* We'll just catch it later when the
14107                  * device is up'd.
14108                  */
14109                 tg3_set_mtu(dev, tp, new_mtu);
14110                 return 0;
14111         }
14112
14113         tg3_phy_stop(tp);
14114
14115         tg3_netif_stop(tp);
14116
14117         tg3_full_lock(tp, 1);
14118
14119         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14120
14121         tg3_set_mtu(dev, tp, new_mtu);
14122
14123         /* Reset PHY, otherwise the read DMA engine will be in a mode that
14124          * breaks all requests to 256 bytes.
14125          */
14126         if (tg3_asic_rev(tp) == ASIC_REV_57766)
14127                 reset_phy = true;
14128
14129         err = tg3_restart_hw(tp, reset_phy);
14130
14131         if (!err)
14132                 tg3_netif_start(tp);
14133
14134         tg3_full_unlock(tp);
14135
14136         if (!err)
14137                 tg3_phy_start(tp);
14138
14139         return err;
14140 }
14141
14142 static const struct net_device_ops tg3_netdev_ops = {
14143         .ndo_open               = tg3_open,
14144         .ndo_stop               = tg3_close,
14145         .ndo_start_xmit         = tg3_start_xmit,
14146         .ndo_get_stats64        = tg3_get_stats64,
14147         .ndo_validate_addr      = eth_validate_addr,
14148         .ndo_set_rx_mode        = tg3_set_rx_mode,
14149         .ndo_set_mac_address    = tg3_set_mac_addr,
14150         .ndo_do_ioctl           = tg3_ioctl,
14151         .ndo_tx_timeout         = tg3_tx_timeout,
14152         .ndo_change_mtu         = tg3_change_mtu,
14153         .ndo_fix_features       = tg3_fix_features,
14154         .ndo_set_features       = tg3_set_features,
14155 #ifdef CONFIG_NET_POLL_CONTROLLER
14156         .ndo_poll_controller    = tg3_poll_controller,
14157 #endif
14158 };
14159
14160 static void tg3_get_eeprom_size(struct tg3 *tp)
14161 {
14162         u32 cursize, val, magic;
14163
14164         tp->nvram_size = EEPROM_CHIP_SIZE;
14165
14166         if (tg3_nvram_read(tp, 0, &magic) != 0)
14167                 return;
14168
14169         if ((magic != TG3_EEPROM_MAGIC) &&
14170             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14171             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14172                 return;
14173
14174         /*
14175          * Size the chip by reading offsets at increasing powers of two.
14176          * When we encounter our validation signature, we know the addressing
14177          * has wrapped around, and thus have our chip size.
14178          */
14179         cursize = 0x10;
14180
14181         while (cursize < tp->nvram_size) {
14182                 if (tg3_nvram_read(tp, cursize, &val) != 0)
14183                         return;
14184
14185                 if (val == magic)
14186                         break;
14187
14188                 cursize <<= 1;
14189         }
14190
14191         tp->nvram_size = cursize;
14192 }
14193
14194 static void tg3_get_nvram_size(struct tg3 *tp)
14195 {
14196         u32 val;
14197
14198         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14199                 return;
14200
14201         /* Selfboot format */
14202         if (val != TG3_EEPROM_MAGIC) {
14203                 tg3_get_eeprom_size(tp);
14204                 return;
14205         }
14206
14207         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14208                 if (val != 0) {
14209                         /* This is confusing.  We want to operate on the
14210                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14211                          * call will read from NVRAM and byteswap the data
14212                          * according to the byteswapping settings for all
14213                          * other register accesses.  This ensures the data we
14214                          * want will always reside in the lower 16-bits.
14215                          * However, the data in NVRAM is in LE format, which
14216                          * means the data from the NVRAM read will always be
14217                          * opposite the endianness of the CPU.  The 16-bit
14218                          * byteswap then brings the data to CPU endianness.
14219                          */
14220                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14221                         return;
14222                 }
14223         }
14224         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14225 }
14226
14227 static void tg3_get_nvram_info(struct tg3 *tp)
14228 {
14229         u32 nvcfg1;
14230
14231         nvcfg1 = tr32(NVRAM_CFG1);
14232         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14233                 tg3_flag_set(tp, FLASH);
14234         } else {
14235                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14236                 tw32(NVRAM_CFG1, nvcfg1);
14237         }
14238
14239         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14240             tg3_flag(tp, 5780_CLASS)) {
14241                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14242                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14243                         tp->nvram_jedecnum = JEDEC_ATMEL;
14244                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14245                         tg3_flag_set(tp, NVRAM_BUFFERED);
14246                         break;
14247                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14248                         tp->nvram_jedecnum = JEDEC_ATMEL;
14249                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14250                         break;
14251                 case FLASH_VENDOR_ATMEL_EEPROM:
14252                         tp->nvram_jedecnum = JEDEC_ATMEL;
14253                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14254                         tg3_flag_set(tp, NVRAM_BUFFERED);
14255                         break;
14256                 case FLASH_VENDOR_ST:
14257                         tp->nvram_jedecnum = JEDEC_ST;
14258                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14259                         tg3_flag_set(tp, NVRAM_BUFFERED);
14260                         break;
14261                 case FLASH_VENDOR_SAIFUN:
14262                         tp->nvram_jedecnum = JEDEC_SAIFUN;
14263                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14264                         break;
14265                 case FLASH_VENDOR_SST_SMALL:
14266                 case FLASH_VENDOR_SST_LARGE:
14267                         tp->nvram_jedecnum = JEDEC_SST;
14268                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14269                         break;
14270                 }
14271         } else {
14272                 tp->nvram_jedecnum = JEDEC_ATMEL;
14273                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14274                 tg3_flag_set(tp, NVRAM_BUFFERED);
14275         }
14276 }
14277
14278 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14279 {
14280         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14281         case FLASH_5752PAGE_SIZE_256:
14282                 tp->nvram_pagesize = 256;
14283                 break;
14284         case FLASH_5752PAGE_SIZE_512:
14285                 tp->nvram_pagesize = 512;
14286                 break;
14287         case FLASH_5752PAGE_SIZE_1K:
14288                 tp->nvram_pagesize = 1024;
14289                 break;
14290         case FLASH_5752PAGE_SIZE_2K:
14291                 tp->nvram_pagesize = 2048;
14292                 break;
14293         case FLASH_5752PAGE_SIZE_4K:
14294                 tp->nvram_pagesize = 4096;
14295                 break;
14296         case FLASH_5752PAGE_SIZE_264:
14297                 tp->nvram_pagesize = 264;
14298                 break;
14299         case FLASH_5752PAGE_SIZE_528:
14300                 tp->nvram_pagesize = 528;
14301                 break;
14302         }
14303 }
14304
14305 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14306 {
14307         u32 nvcfg1;
14308
14309         nvcfg1 = tr32(NVRAM_CFG1);
14310
14311         /* NVRAM protection for TPM */
14312         if (nvcfg1 & (1 << 27))
14313                 tg3_flag_set(tp, PROTECTED_NVRAM);
14314
14315         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14316         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14317         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14318                 tp->nvram_jedecnum = JEDEC_ATMEL;
14319                 tg3_flag_set(tp, NVRAM_BUFFERED);
14320                 break;
14321         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14322                 tp->nvram_jedecnum = JEDEC_ATMEL;
14323                 tg3_flag_set(tp, NVRAM_BUFFERED);
14324                 tg3_flag_set(tp, FLASH);
14325                 break;
14326         case FLASH_5752VENDOR_ST_M45PE10:
14327         case FLASH_5752VENDOR_ST_M45PE20:
14328         case FLASH_5752VENDOR_ST_M45PE40:
14329                 tp->nvram_jedecnum = JEDEC_ST;
14330                 tg3_flag_set(tp, NVRAM_BUFFERED);
14331                 tg3_flag_set(tp, FLASH);
14332                 break;
14333         }
14334
14335         if (tg3_flag(tp, FLASH)) {
14336                 tg3_nvram_get_pagesize(tp, nvcfg1);
14337         } else {
14338                 /* For eeprom, set pagesize to maximum eeprom size */
14339                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14340
14341                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14342                 tw32(NVRAM_CFG1, nvcfg1);
14343         }
14344 }
14345
14346 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14347 {
14348         u32 nvcfg1, protect = 0;
14349
14350         nvcfg1 = tr32(NVRAM_CFG1);
14351
14352         /* NVRAM protection for TPM */
14353         if (nvcfg1 & (1 << 27)) {
14354                 tg3_flag_set(tp, PROTECTED_NVRAM);
14355                 protect = 1;
14356         }
14357
14358         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14359         switch (nvcfg1) {
14360         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14361         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14362         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14363         case FLASH_5755VENDOR_ATMEL_FLASH_5:
14364                 tp->nvram_jedecnum = JEDEC_ATMEL;
14365                 tg3_flag_set(tp, NVRAM_BUFFERED);
14366                 tg3_flag_set(tp, FLASH);
14367                 tp->nvram_pagesize = 264;
14368                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14369                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14370                         tp->nvram_size = (protect ? 0x3e200 :
14371                                           TG3_NVRAM_SIZE_512KB);
14372                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14373                         tp->nvram_size = (protect ? 0x1f200 :
14374                                           TG3_NVRAM_SIZE_256KB);
14375                 else
14376                         tp->nvram_size = (protect ? 0x1f200 :
14377                                           TG3_NVRAM_SIZE_128KB);
14378                 break;
14379         case FLASH_5752VENDOR_ST_M45PE10:
14380         case FLASH_5752VENDOR_ST_M45PE20:
14381         case FLASH_5752VENDOR_ST_M45PE40:
14382                 tp->nvram_jedecnum = JEDEC_ST;
14383                 tg3_flag_set(tp, NVRAM_BUFFERED);
14384                 tg3_flag_set(tp, FLASH);
14385                 tp->nvram_pagesize = 256;
14386                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14387                         tp->nvram_size = (protect ?
14388                                           TG3_NVRAM_SIZE_64KB :
14389                                           TG3_NVRAM_SIZE_128KB);
14390                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14391                         tp->nvram_size = (protect ?
14392                                           TG3_NVRAM_SIZE_64KB :
14393                                           TG3_NVRAM_SIZE_256KB);
14394                 else
14395                         tp->nvram_size = (protect ?
14396                                           TG3_NVRAM_SIZE_128KB :
14397                                           TG3_NVRAM_SIZE_512KB);
14398                 break;
14399         }
14400 }
14401
14402 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14403 {
14404         u32 nvcfg1;
14405
14406         nvcfg1 = tr32(NVRAM_CFG1);
14407
14408         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14409         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14410         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14411         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14412         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14413                 tp->nvram_jedecnum = JEDEC_ATMEL;
14414                 tg3_flag_set(tp, NVRAM_BUFFERED);
14415                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14416
14417                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14418                 tw32(NVRAM_CFG1, nvcfg1);
14419                 break;
14420         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14421         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14422         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14423         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14424                 tp->nvram_jedecnum = JEDEC_ATMEL;
14425                 tg3_flag_set(tp, NVRAM_BUFFERED);
14426                 tg3_flag_set(tp, FLASH);
14427                 tp->nvram_pagesize = 264;
14428                 break;
14429         case FLASH_5752VENDOR_ST_M45PE10:
14430         case FLASH_5752VENDOR_ST_M45PE20:
14431         case FLASH_5752VENDOR_ST_M45PE40:
14432                 tp->nvram_jedecnum = JEDEC_ST;
14433                 tg3_flag_set(tp, NVRAM_BUFFERED);
14434                 tg3_flag_set(tp, FLASH);
14435                 tp->nvram_pagesize = 256;
14436                 break;
14437         }
14438 }
14439
14440 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14441 {
14442         u32 nvcfg1, protect = 0;
14443
14444         nvcfg1 = tr32(NVRAM_CFG1);
14445
14446         /* NVRAM protection for TPM */
14447         if (nvcfg1 & (1 << 27)) {
14448                 tg3_flag_set(tp, PROTECTED_NVRAM);
14449                 protect = 1;
14450         }
14451
14452         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14453         switch (nvcfg1) {
14454         case FLASH_5761VENDOR_ATMEL_ADB021D:
14455         case FLASH_5761VENDOR_ATMEL_ADB041D:
14456         case FLASH_5761VENDOR_ATMEL_ADB081D:
14457         case FLASH_5761VENDOR_ATMEL_ADB161D:
14458         case FLASH_5761VENDOR_ATMEL_MDB021D:
14459         case FLASH_5761VENDOR_ATMEL_MDB041D:
14460         case FLASH_5761VENDOR_ATMEL_MDB081D:
14461         case FLASH_5761VENDOR_ATMEL_MDB161D:
14462                 tp->nvram_jedecnum = JEDEC_ATMEL;
14463                 tg3_flag_set(tp, NVRAM_BUFFERED);
14464                 tg3_flag_set(tp, FLASH);
14465                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14466                 tp->nvram_pagesize = 256;
14467                 break;
14468         case FLASH_5761VENDOR_ST_A_M45PE20:
14469         case FLASH_5761VENDOR_ST_A_M45PE40:
14470         case FLASH_5761VENDOR_ST_A_M45PE80:
14471         case FLASH_5761VENDOR_ST_A_M45PE16:
14472         case FLASH_5761VENDOR_ST_M_M45PE20:
14473         case FLASH_5761VENDOR_ST_M_M45PE40:
14474         case FLASH_5761VENDOR_ST_M_M45PE80:
14475         case FLASH_5761VENDOR_ST_M_M45PE16:
14476                 tp->nvram_jedecnum = JEDEC_ST;
14477                 tg3_flag_set(tp, NVRAM_BUFFERED);
14478                 tg3_flag_set(tp, FLASH);
14479                 tp->nvram_pagesize = 256;
14480                 break;
14481         }
14482
14483         if (protect) {
14484                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14485         } else {
14486                 switch (nvcfg1) {
14487                 case FLASH_5761VENDOR_ATMEL_ADB161D:
14488                 case FLASH_5761VENDOR_ATMEL_MDB161D:
14489                 case FLASH_5761VENDOR_ST_A_M45PE16:
14490                 case FLASH_5761VENDOR_ST_M_M45PE16:
14491                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14492                         break;
14493                 case FLASH_5761VENDOR_ATMEL_ADB081D:
14494                 case FLASH_5761VENDOR_ATMEL_MDB081D:
14495                 case FLASH_5761VENDOR_ST_A_M45PE80:
14496                 case FLASH_5761VENDOR_ST_M_M45PE80:
14497                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14498                         break;
14499                 case FLASH_5761VENDOR_ATMEL_ADB041D:
14500                 case FLASH_5761VENDOR_ATMEL_MDB041D:
14501                 case FLASH_5761VENDOR_ST_A_M45PE40:
14502                 case FLASH_5761VENDOR_ST_M_M45PE40:
14503                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14504                         break;
14505                 case FLASH_5761VENDOR_ATMEL_ADB021D:
14506                 case FLASH_5761VENDOR_ATMEL_MDB021D:
14507                 case FLASH_5761VENDOR_ST_A_M45PE20:
14508                 case FLASH_5761VENDOR_ST_M_M45PE20:
14509                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14510                         break;
14511                 }
14512         }
14513 }
14514
14515 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14516 {
14517         tp->nvram_jedecnum = JEDEC_ATMEL;
14518         tg3_flag_set(tp, NVRAM_BUFFERED);
14519         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14520 }
14521
14522 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14523 {
14524         u32 nvcfg1;
14525
14526         nvcfg1 = tr32(NVRAM_CFG1);
14527
14528         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14529         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14530         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14531                 tp->nvram_jedecnum = JEDEC_ATMEL;
14532                 tg3_flag_set(tp, NVRAM_BUFFERED);
14533                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14534
14535                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14536                 tw32(NVRAM_CFG1, nvcfg1);
14537                 return;
14538         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14539         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14540         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14541         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14542         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14543         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14544         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14545                 tp->nvram_jedecnum = JEDEC_ATMEL;
14546                 tg3_flag_set(tp, NVRAM_BUFFERED);
14547                 tg3_flag_set(tp, FLASH);
14548
14549                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14550                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14551                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14552                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14553                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14554                         break;
14555                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14556                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14557                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14558                         break;
14559                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14560                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14561                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14562                         break;
14563                 }
14564                 break;
14565         case FLASH_5752VENDOR_ST_M45PE10:
14566         case FLASH_5752VENDOR_ST_M45PE20:
14567         case FLASH_5752VENDOR_ST_M45PE40:
14568                 tp->nvram_jedecnum = JEDEC_ST;
14569                 tg3_flag_set(tp, NVRAM_BUFFERED);
14570                 tg3_flag_set(tp, FLASH);
14571
14572                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14573                 case FLASH_5752VENDOR_ST_M45PE10:
14574                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14575                         break;
14576                 case FLASH_5752VENDOR_ST_M45PE20:
14577                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14578                         break;
14579                 case FLASH_5752VENDOR_ST_M45PE40:
14580                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14581                         break;
14582                 }
14583                 break;
14584         default:
14585                 tg3_flag_set(tp, NO_NVRAM);
14586                 return;
14587         }
14588
14589         tg3_nvram_get_pagesize(tp, nvcfg1);
14590         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14591                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14592 }
14593
14594
14595 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14596 {
14597         u32 nvcfg1;
14598
14599         nvcfg1 = tr32(NVRAM_CFG1);
14600
14601         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14602         case FLASH_5717VENDOR_ATMEL_EEPROM:
14603         case FLASH_5717VENDOR_MICRO_EEPROM:
14604                 tp->nvram_jedecnum = JEDEC_ATMEL;
14605                 tg3_flag_set(tp, NVRAM_BUFFERED);
14606                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14607
14608                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14609                 tw32(NVRAM_CFG1, nvcfg1);
14610                 return;
14611         case FLASH_5717VENDOR_ATMEL_MDB011D:
14612         case FLASH_5717VENDOR_ATMEL_ADB011B:
14613         case FLASH_5717VENDOR_ATMEL_ADB011D:
14614         case FLASH_5717VENDOR_ATMEL_MDB021D:
14615         case FLASH_5717VENDOR_ATMEL_ADB021B:
14616         case FLASH_5717VENDOR_ATMEL_ADB021D:
14617         case FLASH_5717VENDOR_ATMEL_45USPT:
14618                 tp->nvram_jedecnum = JEDEC_ATMEL;
14619                 tg3_flag_set(tp, NVRAM_BUFFERED);
14620                 tg3_flag_set(tp, FLASH);
14621
14622                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14623                 case FLASH_5717VENDOR_ATMEL_MDB021D:
14624                         /* Detect size with tg3_nvram_get_size() */
14625                         break;
14626                 case FLASH_5717VENDOR_ATMEL_ADB021B:
14627                 case FLASH_5717VENDOR_ATMEL_ADB021D:
14628                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14629                         break;
14630                 default:
14631                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14632                         break;
14633                 }
14634                 break;
14635         case FLASH_5717VENDOR_ST_M_M25PE10:
14636         case FLASH_5717VENDOR_ST_A_M25PE10:
14637         case FLASH_5717VENDOR_ST_M_M45PE10:
14638         case FLASH_5717VENDOR_ST_A_M45PE10:
14639         case FLASH_5717VENDOR_ST_M_M25PE20:
14640         case FLASH_5717VENDOR_ST_A_M25PE20:
14641         case FLASH_5717VENDOR_ST_M_M45PE20:
14642         case FLASH_5717VENDOR_ST_A_M45PE20:
14643         case FLASH_5717VENDOR_ST_25USPT:
14644         case FLASH_5717VENDOR_ST_45USPT:
14645                 tp->nvram_jedecnum = JEDEC_ST;
14646                 tg3_flag_set(tp, NVRAM_BUFFERED);
14647                 tg3_flag_set(tp, FLASH);
14648
14649                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14650                 case FLASH_5717VENDOR_ST_M_M25PE20:
14651                 case FLASH_5717VENDOR_ST_M_M45PE20:
14652                         /* Detect size with tg3_nvram_get_size() */
14653                         break;
14654                 case FLASH_5717VENDOR_ST_A_M25PE20:
14655                 case FLASH_5717VENDOR_ST_A_M45PE20:
14656                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14657                         break;
14658                 default:
14659                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14660                         break;
14661                 }
14662                 break;
14663         default:
14664                 tg3_flag_set(tp, NO_NVRAM);
14665                 return;
14666         }
14667
14668         tg3_nvram_get_pagesize(tp, nvcfg1);
14669         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14670                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14671 }
14672
14673 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14674 {
14675         u32 nvcfg1, nvmpinstrp;
14676
14677         nvcfg1 = tr32(NVRAM_CFG1);
14678         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14679
14680         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14681                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14682                         tg3_flag_set(tp, NO_NVRAM);
14683                         return;
14684                 }
14685
14686                 switch (nvmpinstrp) {
14687                 case FLASH_5762_EEPROM_HD:
14688                         nvmpinstrp = FLASH_5720_EEPROM_HD;
14689                         break;
14690                 case FLASH_5762_EEPROM_LD:
14691                         nvmpinstrp = FLASH_5720_EEPROM_LD;
14692                         break;
14693                 case FLASH_5720VENDOR_M_ST_M45PE20:
14694                         /* This pinstrap supports multiple sizes, so force it
14695                          * to read the actual size from location 0xf0.
14696                          */
14697                         nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14698                         break;
14699                 }
14700         }
14701
14702         switch (nvmpinstrp) {
14703         case FLASH_5720_EEPROM_HD:
14704         case FLASH_5720_EEPROM_LD:
14705                 tp->nvram_jedecnum = JEDEC_ATMEL;
14706                 tg3_flag_set(tp, NVRAM_BUFFERED);
14707
14708                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14709                 tw32(NVRAM_CFG1, nvcfg1);
14710                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14711                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14712                 else
14713                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14714                 return;
14715         case FLASH_5720VENDOR_M_ATMEL_DB011D:
14716         case FLASH_5720VENDOR_A_ATMEL_DB011B:
14717         case FLASH_5720VENDOR_A_ATMEL_DB011D:
14718         case FLASH_5720VENDOR_M_ATMEL_DB021D:
14719         case FLASH_5720VENDOR_A_ATMEL_DB021B:
14720         case FLASH_5720VENDOR_A_ATMEL_DB021D:
14721         case FLASH_5720VENDOR_M_ATMEL_DB041D:
14722         case FLASH_5720VENDOR_A_ATMEL_DB041B:
14723         case FLASH_5720VENDOR_A_ATMEL_DB041D:
14724         case FLASH_5720VENDOR_M_ATMEL_DB081D:
14725         case FLASH_5720VENDOR_A_ATMEL_DB081D:
14726         case FLASH_5720VENDOR_ATMEL_45USPT:
14727                 tp->nvram_jedecnum = JEDEC_ATMEL;
14728                 tg3_flag_set(tp, NVRAM_BUFFERED);
14729                 tg3_flag_set(tp, FLASH);
14730
14731                 switch (nvmpinstrp) {
14732                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14733                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14734                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14735                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14736                         break;
14737                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14738                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14739                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14740                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14741                         break;
14742                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14743                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14744                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14745                         break;
14746                 default:
14747                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14748                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14749                         break;
14750                 }
14751                 break;
14752         case FLASH_5720VENDOR_M_ST_M25PE10:
14753         case FLASH_5720VENDOR_M_ST_M45PE10:
14754         case FLASH_5720VENDOR_A_ST_M25PE10:
14755         case FLASH_5720VENDOR_A_ST_M45PE10:
14756         case FLASH_5720VENDOR_M_ST_M25PE20:
14757         case FLASH_5720VENDOR_M_ST_M45PE20:
14758         case FLASH_5720VENDOR_A_ST_M25PE20:
14759         case FLASH_5720VENDOR_A_ST_M45PE20:
14760         case FLASH_5720VENDOR_M_ST_M25PE40:
14761         case FLASH_5720VENDOR_M_ST_M45PE40:
14762         case FLASH_5720VENDOR_A_ST_M25PE40:
14763         case FLASH_5720VENDOR_A_ST_M45PE40:
14764         case FLASH_5720VENDOR_M_ST_M25PE80:
14765         case FLASH_5720VENDOR_M_ST_M45PE80:
14766         case FLASH_5720VENDOR_A_ST_M25PE80:
14767         case FLASH_5720VENDOR_A_ST_M45PE80:
14768         case FLASH_5720VENDOR_ST_25USPT:
14769         case FLASH_5720VENDOR_ST_45USPT:
14770                 tp->nvram_jedecnum = JEDEC_ST;
14771                 tg3_flag_set(tp, NVRAM_BUFFERED);
14772                 tg3_flag_set(tp, FLASH);
14773
14774                 switch (nvmpinstrp) {
14775                 case FLASH_5720VENDOR_M_ST_M25PE20:
14776                 case FLASH_5720VENDOR_M_ST_M45PE20:
14777                 case FLASH_5720VENDOR_A_ST_M25PE20:
14778                 case FLASH_5720VENDOR_A_ST_M45PE20:
14779                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14780                         break;
14781                 case FLASH_5720VENDOR_M_ST_M25PE40:
14782                 case FLASH_5720VENDOR_M_ST_M45PE40:
14783                 case FLASH_5720VENDOR_A_ST_M25PE40:
14784                 case FLASH_5720VENDOR_A_ST_M45PE40:
14785                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14786                         break;
14787                 case FLASH_5720VENDOR_M_ST_M25PE80:
14788                 case FLASH_5720VENDOR_M_ST_M45PE80:
14789                 case FLASH_5720VENDOR_A_ST_M25PE80:
14790                 case FLASH_5720VENDOR_A_ST_M45PE80:
14791                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14792                         break;
14793                 default:
14794                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14795                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14796                         break;
14797                 }
14798                 break;
14799         default:
14800                 tg3_flag_set(tp, NO_NVRAM);
14801                 return;
14802         }
14803
14804         tg3_nvram_get_pagesize(tp, nvcfg1);
14805         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14806                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14807
14808         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14809                 u32 val;
14810
14811                 if (tg3_nvram_read(tp, 0, &val))
14812                         return;
14813
14814                 if (val != TG3_EEPROM_MAGIC &&
14815                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14816                         tg3_flag_set(tp, NO_NVRAM);
14817         }
14818 }
14819
14820 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14821 static void tg3_nvram_init(struct tg3 *tp)
14822 {
14823         if (tg3_flag(tp, IS_SSB_CORE)) {
14824                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14825                 tg3_flag_clear(tp, NVRAM);
14826                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14827                 tg3_flag_set(tp, NO_NVRAM);
14828                 return;
14829         }
14830
14831         tw32_f(GRC_EEPROM_ADDR,
14832              (EEPROM_ADDR_FSM_RESET |
14833               (EEPROM_DEFAULT_CLOCK_PERIOD <<
14834                EEPROM_ADDR_CLKPERD_SHIFT)));
14835
14836         msleep(1);
14837
14838         /* Enable seeprom accesses. */
14839         tw32_f(GRC_LOCAL_CTRL,
14840              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14841         udelay(100);
14842
14843         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14844             tg3_asic_rev(tp) != ASIC_REV_5701) {
14845                 tg3_flag_set(tp, NVRAM);
14846
14847                 if (tg3_nvram_lock(tp)) {
14848                         netdev_warn(tp->dev,
14849                                     "Cannot get nvram lock, %s failed\n",
14850                                     __func__);
14851                         return;
14852                 }
14853                 tg3_enable_nvram_access(tp);
14854
14855                 tp->nvram_size = 0;
14856
14857                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14858                         tg3_get_5752_nvram_info(tp);
14859                 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14860                         tg3_get_5755_nvram_info(tp);
14861                 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14862                          tg3_asic_rev(tp) == ASIC_REV_5784 ||
14863                          tg3_asic_rev(tp) == ASIC_REV_5785)
14864                         tg3_get_5787_nvram_info(tp);
14865                 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14866                         tg3_get_5761_nvram_info(tp);
14867                 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14868                         tg3_get_5906_nvram_info(tp);
14869                 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14870                          tg3_flag(tp, 57765_CLASS))
14871                         tg3_get_57780_nvram_info(tp);
14872                 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14873                          tg3_asic_rev(tp) == ASIC_REV_5719)
14874                         tg3_get_5717_nvram_info(tp);
14875                 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14876                          tg3_asic_rev(tp) == ASIC_REV_5762)
14877                         tg3_get_5720_nvram_info(tp);
14878                 else
14879                         tg3_get_nvram_info(tp);
14880
14881                 if (tp->nvram_size == 0)
14882                         tg3_get_nvram_size(tp);
14883
14884                 tg3_disable_nvram_access(tp);
14885                 tg3_nvram_unlock(tp);
14886
14887         } else {
14888                 tg3_flag_clear(tp, NVRAM);
14889                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14890
14891                 tg3_get_eeprom_size(tp);
14892         }
14893 }
14894
14895 struct subsys_tbl_ent {
14896         u16 subsys_vendor, subsys_devid;
14897         u32 phy_id;
14898 };
14899
14900 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14901         /* Broadcom boards. */
14902         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14903           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14904         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14905           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14906         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14907           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14908         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14909           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14910         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14911           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14912         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14913           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14914         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14915           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14916         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14917           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14918         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14919           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14920         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14921           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14922         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14923           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14924
14925         /* 3com boards. */
14926         { TG3PCI_SUBVENDOR_ID_3COM,
14927           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14928         { TG3PCI_SUBVENDOR_ID_3COM,
14929           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14930         { TG3PCI_SUBVENDOR_ID_3COM,
14931           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14932         { TG3PCI_SUBVENDOR_ID_3COM,
14933           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14934         { TG3PCI_SUBVENDOR_ID_3COM,
14935           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14936
14937         /* DELL boards. */
14938         { TG3PCI_SUBVENDOR_ID_DELL,
14939           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14940         { TG3PCI_SUBVENDOR_ID_DELL,
14941           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14942         { TG3PCI_SUBVENDOR_ID_DELL,
14943           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14944         { TG3PCI_SUBVENDOR_ID_DELL,
14945           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14946
14947         /* Compaq boards. */
14948         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14949           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14950         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14951           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14952         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14953           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14954         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14955           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14956         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14957           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14958
14959         /* IBM boards. */
14960         { TG3PCI_SUBVENDOR_ID_IBM,
14961           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14962 };
14963
14964 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14965 {
14966         int i;
14967
14968         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14969                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14970                      tp->pdev->subsystem_vendor) &&
14971                     (subsys_id_to_phy_id[i].subsys_devid ==
14972                      tp->pdev->subsystem_device))
14973                         return &subsys_id_to_phy_id[i];
14974         }
14975         return NULL;
14976 }
14977
14978 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14979 {
14980         u32 val;
14981
14982         tp->phy_id = TG3_PHY_ID_INVALID;
14983         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14984
14985         /* Assume an onboard device and WOL capable by default.  */
14986         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14987         tg3_flag_set(tp, WOL_CAP);
14988
14989         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14990                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14991                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14992                         tg3_flag_set(tp, IS_NIC);
14993                 }
14994                 val = tr32(VCPU_CFGSHDW);
14995                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14996                         tg3_flag_set(tp, ASPM_WORKAROUND);
14997                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14998                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14999                         tg3_flag_set(tp, WOL_ENABLE);
15000                         device_set_wakeup_enable(&tp->pdev->dev, true);
15001                 }
15002                 goto done;
15003         }
15004
15005         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15006         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15007                 u32 nic_cfg, led_cfg;
15008                 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15009                 u32 nic_phy_id, ver, eeprom_phy_id;
15010                 int eeprom_phy_serdes = 0;
15011
15012                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15013                 tp->nic_sram_data_cfg = nic_cfg;
15014
15015                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15016                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15017                 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15018                     tg3_asic_rev(tp) != ASIC_REV_5701 &&
15019                     tg3_asic_rev(tp) != ASIC_REV_5703 &&
15020                     (ver > 0) && (ver < 0x100))
15021                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15022
15023                 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15024                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15025
15026                 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15027                     tg3_asic_rev(tp) == ASIC_REV_5719 ||
15028                     tg3_asic_rev(tp) == ASIC_REV_5720)
15029                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15030
15031                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15032                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15033                         eeprom_phy_serdes = 1;
15034
15035                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15036                 if (nic_phy_id != 0) {
15037                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15038                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15039
15040                         eeprom_phy_id  = (id1 >> 16) << 10;
15041                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
15042                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
15043                 } else
15044                         eeprom_phy_id = 0;
15045
15046                 tp->phy_id = eeprom_phy_id;
15047                 if (eeprom_phy_serdes) {
15048                         if (!tg3_flag(tp, 5705_PLUS))
15049                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15050                         else
15051                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15052                 }
15053
15054                 if (tg3_flag(tp, 5750_PLUS))
15055                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15056                                     SHASTA_EXT_LED_MODE_MASK);
15057                 else
15058                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15059
15060                 switch (led_cfg) {
15061                 default:
15062                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15063                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15064                         break;
15065
15066                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15067                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15068                         break;
15069
15070                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15071                         tp->led_ctrl = LED_CTRL_MODE_MAC;
15072
15073                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15074                          * read on some older 5700/5701 bootcode.
15075                          */
15076                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15077                             tg3_asic_rev(tp) == ASIC_REV_5701)
15078                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15079
15080                         break;
15081
15082                 case SHASTA_EXT_LED_SHARED:
15083                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
15084                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15085                             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15086                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15087                                                  LED_CTRL_MODE_PHY_2);
15088
15089                         if (tg3_flag(tp, 5717_PLUS) ||
15090                             tg3_asic_rev(tp) == ASIC_REV_5762)
15091                                 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15092                                                 LED_CTRL_BLINK_RATE_MASK;
15093
15094                         break;
15095
15096                 case SHASTA_EXT_LED_MAC:
15097                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15098                         break;
15099
15100                 case SHASTA_EXT_LED_COMBO:
15101                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
15102                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15103                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15104                                                  LED_CTRL_MODE_PHY_2);
15105                         break;
15106
15107                 }
15108
15109                 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15110                      tg3_asic_rev(tp) == ASIC_REV_5701) &&
15111                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15112                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15113
15114                 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15115                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15116
15117                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15118                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15119                         if ((tp->pdev->subsystem_vendor ==
15120                              PCI_VENDOR_ID_ARIMA) &&
15121                             (tp->pdev->subsystem_device == 0x205a ||
15122                              tp->pdev->subsystem_device == 0x2063))
15123                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15124                 } else {
15125                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15126                         tg3_flag_set(tp, IS_NIC);
15127                 }
15128
15129                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15130                         tg3_flag_set(tp, ENABLE_ASF);
15131                         if (tg3_flag(tp, 5750_PLUS))
15132                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15133                 }
15134
15135                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15136                     tg3_flag(tp, 5750_PLUS))
15137                         tg3_flag_set(tp, ENABLE_APE);
15138
15139                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15140                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15141                         tg3_flag_clear(tp, WOL_CAP);
15142
15143                 if (tg3_flag(tp, WOL_CAP) &&
15144                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15145                         tg3_flag_set(tp, WOL_ENABLE);
15146                         device_set_wakeup_enable(&tp->pdev->dev, true);
15147                 }
15148
15149                 if (cfg2 & (1 << 17))
15150                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15151
15152                 /* serdes signal pre-emphasis in register 0x590 set by */
15153                 /* bootcode if bit 18 is set */
15154                 if (cfg2 & (1 << 18))
15155                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15156
15157                 if ((tg3_flag(tp, 57765_PLUS) ||
15158                      (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15159                       tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15160                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15161                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15162
15163                 if (tg3_flag(tp, PCI_EXPRESS)) {
15164                         u32 cfg3;
15165
15166                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15167                         if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15168                             !tg3_flag(tp, 57765_PLUS) &&
15169                             (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15170                                 tg3_flag_set(tp, ASPM_WORKAROUND);
15171                         if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15172                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15173                         if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15174                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15175                 }
15176
15177                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15178                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15179                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15180                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15181                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15182                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15183
15184                 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15185                         tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15186         }
15187 done:
15188         if (tg3_flag(tp, WOL_CAP))
15189                 device_set_wakeup_enable(&tp->pdev->dev,
15190                                          tg3_flag(tp, WOL_ENABLE));
15191         else
15192                 device_set_wakeup_capable(&tp->pdev->dev, false);
15193 }
15194
15195 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15196 {
15197         int i, err;
15198         u32 val2, off = offset * 8;
15199
15200         err = tg3_nvram_lock(tp);
15201         if (err)
15202                 return err;
15203
15204         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15205         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15206                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15207         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15208         udelay(10);
15209
15210         for (i = 0; i < 100; i++) {
15211                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15212                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15213                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15214                         break;
15215                 }
15216                 udelay(10);
15217         }
15218
15219         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15220
15221         tg3_nvram_unlock(tp);
15222         if (val2 & APE_OTP_STATUS_CMD_DONE)
15223                 return 0;
15224
15225         return -EBUSY;
15226 }
15227
15228 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15229 {
15230         int i;
15231         u32 val;
15232
15233         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15234         tw32(OTP_CTRL, cmd);
15235
15236         /* Wait for up to 1 ms for command to execute. */
15237         for (i = 0; i < 100; i++) {
15238                 val = tr32(OTP_STATUS);
15239                 if (val & OTP_STATUS_CMD_DONE)
15240                         break;
15241                 udelay(10);
15242         }
15243
15244         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15245 }
15246
15247 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15248  * configuration is a 32-bit value that straddles the alignment boundary.
15249  * We do two 32-bit reads and then shift and merge the results.
15250  */
15251 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15252 {
15253         u32 bhalf_otp, thalf_otp;
15254
15255         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15256
15257         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15258                 return 0;
15259
15260         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15261
15262         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15263                 return 0;
15264
15265         thalf_otp = tr32(OTP_READ_DATA);
15266
15267         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15268
15269         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15270                 return 0;
15271
15272         bhalf_otp = tr32(OTP_READ_DATA);
15273
15274         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15275 }
15276
15277 static void tg3_phy_init_link_config(struct tg3 *tp)
15278 {
15279         u32 adv = ADVERTISED_Autoneg;
15280
15281         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15282                 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15283                         adv |= ADVERTISED_1000baseT_Half;
15284                 adv |= ADVERTISED_1000baseT_Full;
15285         }
15286
15287         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15288                 adv |= ADVERTISED_100baseT_Half |
15289                        ADVERTISED_100baseT_Full |
15290                        ADVERTISED_10baseT_Half |
15291                        ADVERTISED_10baseT_Full |
15292                        ADVERTISED_TP;
15293         else
15294                 adv |= ADVERTISED_FIBRE;
15295
15296         tp->link_config.advertising = adv;
15297         tp->link_config.speed = SPEED_UNKNOWN;
15298         tp->link_config.duplex = DUPLEX_UNKNOWN;
15299         tp->link_config.autoneg = AUTONEG_ENABLE;
15300         tp->link_config.active_speed = SPEED_UNKNOWN;
15301         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15302
15303         tp->old_link = -1;
15304 }
15305
15306 static int tg3_phy_probe(struct tg3 *tp)
15307 {
15308         u32 hw_phy_id_1, hw_phy_id_2;
15309         u32 hw_phy_id, hw_phy_id_masked;
15310         int err;
15311
15312         /* flow control autonegotiation is default behavior */
15313         tg3_flag_set(tp, PAUSE_AUTONEG);
15314         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15315
15316         if (tg3_flag(tp, ENABLE_APE)) {
15317                 switch (tp->pci_fn) {
15318                 case 0:
15319                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15320                         break;
15321                 case 1:
15322                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15323                         break;
15324                 case 2:
15325                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15326                         break;
15327                 case 3:
15328                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15329                         break;
15330                 }
15331         }
15332
15333         if (!tg3_flag(tp, ENABLE_ASF) &&
15334             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15335             !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15336                 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15337                                    TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15338
15339         if (tg3_flag(tp, USE_PHYLIB))
15340                 return tg3_phy_init(tp);
15341
15342         /* Reading the PHY ID register can conflict with ASF
15343          * firmware access to the PHY hardware.
15344          */
15345         err = 0;
15346         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15347                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15348         } else {
15349                 /* Now read the physical PHY_ID from the chip and verify
15350                  * that it is sane.  If it doesn't look good, we fall back
15351                  * to either the hard-coded table based PHY_ID and failing
15352                  * that the value found in the eeprom area.
15353                  */
15354                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15355                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15356
15357                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15358                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15359                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15360
15361                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15362         }
15363
15364         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15365                 tp->phy_id = hw_phy_id;
15366                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15367                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15368                 else
15369                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15370         } else {
15371                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15372                         /* Do nothing, phy ID already set up in
15373                          * tg3_get_eeprom_hw_cfg().
15374                          */
15375                 } else {
15376                         struct subsys_tbl_ent *p;
15377
15378                         /* No eeprom signature?  Try the hardcoded
15379                          * subsys device table.
15380                          */
15381                         p = tg3_lookup_by_subsys(tp);
15382                         if (p) {
15383                                 tp->phy_id = p->phy_id;
15384                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15385                                 /* For now we saw the IDs 0xbc050cd0,
15386                                  * 0xbc050f80 and 0xbc050c30 on devices
15387                                  * connected to an BCM4785 and there are
15388                                  * probably more. Just assume that the phy is
15389                                  * supported when it is connected to a SSB core
15390                                  * for now.
15391                                  */
15392                                 return -ENODEV;
15393                         }
15394
15395                         if (!tp->phy_id ||
15396                             tp->phy_id == TG3_PHY_ID_BCM8002)
15397                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15398                 }
15399         }
15400
15401         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15402             (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15403              tg3_asic_rev(tp) == ASIC_REV_5720 ||
15404              tg3_asic_rev(tp) == ASIC_REV_57766 ||
15405              tg3_asic_rev(tp) == ASIC_REV_5762 ||
15406              (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15407               tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15408              (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15409               tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15410                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15411
15412                 tp->eee.supported = SUPPORTED_100baseT_Full |
15413                                     SUPPORTED_1000baseT_Full;
15414                 tp->eee.advertised = ADVERTISED_100baseT_Full |
15415                                      ADVERTISED_1000baseT_Full;
15416                 tp->eee.eee_enabled = 1;
15417                 tp->eee.tx_lpi_enabled = 1;
15418                 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15419         }
15420
15421         tg3_phy_init_link_config(tp);
15422
15423         if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15424             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15425             !tg3_flag(tp, ENABLE_APE) &&
15426             !tg3_flag(tp, ENABLE_ASF)) {
15427                 u32 bmsr, dummy;
15428
15429                 tg3_readphy(tp, MII_BMSR, &bmsr);
15430                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15431                     (bmsr & BMSR_LSTATUS))
15432                         goto skip_phy_reset;
15433
15434                 err = tg3_phy_reset(tp);
15435                 if (err)
15436                         return err;
15437
15438                 tg3_phy_set_wirespeed(tp);
15439
15440                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15441                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15442                                             tp->link_config.flowctrl);
15443
15444                         tg3_writephy(tp, MII_BMCR,
15445                                      BMCR_ANENABLE | BMCR_ANRESTART);
15446                 }
15447         }
15448
15449 skip_phy_reset:
15450         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15451                 err = tg3_init_5401phy_dsp(tp);
15452                 if (err)
15453                         return err;
15454
15455                 err = tg3_init_5401phy_dsp(tp);
15456         }
15457
15458         return err;
15459 }
15460
15461 static void tg3_read_vpd(struct tg3 *tp)
15462 {
15463         u8 *vpd_data;
15464         unsigned int block_end, rosize, len;
15465         u32 vpdlen;
15466         int j, i = 0;
15467
15468         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15469         if (!vpd_data)
15470                 goto out_no_vpd;
15471
15472         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15473         if (i < 0)
15474                 goto out_not_found;
15475
15476         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15477         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15478         i += PCI_VPD_LRDT_TAG_SIZE;
15479
15480         if (block_end > vpdlen)
15481                 goto out_not_found;
15482
15483         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15484                                       PCI_VPD_RO_KEYWORD_MFR_ID);
15485         if (j > 0) {
15486                 len = pci_vpd_info_field_size(&vpd_data[j]);
15487
15488                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15489                 if (j + len > block_end || len != 4 ||
15490                     memcmp(&vpd_data[j], "1028", 4))
15491                         goto partno;
15492
15493                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15494                                               PCI_VPD_RO_KEYWORD_VENDOR0);
15495                 if (j < 0)
15496                         goto partno;
15497
15498                 len = pci_vpd_info_field_size(&vpd_data[j]);
15499
15500                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15501                 if (j + len > block_end)
15502                         goto partno;
15503
15504                 if (len >= sizeof(tp->fw_ver))
15505                         len = sizeof(tp->fw_ver) - 1;
15506                 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15507                 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15508                          &vpd_data[j]);
15509         }
15510
15511 partno:
15512         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15513                                       PCI_VPD_RO_KEYWORD_PARTNO);
15514         if (i < 0)
15515                 goto out_not_found;
15516
15517         len = pci_vpd_info_field_size(&vpd_data[i]);
15518
15519         i += PCI_VPD_INFO_FLD_HDR_SIZE;
15520         if (len > TG3_BPN_SIZE ||
15521             (len + i) > vpdlen)
15522                 goto out_not_found;
15523
15524         memcpy(tp->board_part_number, &vpd_data[i], len);
15525
15526 out_not_found:
15527         kfree(vpd_data);
15528         if (tp->board_part_number[0])
15529                 return;
15530
15531 out_no_vpd:
15532         if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15533                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15534                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15535                         strcpy(tp->board_part_number, "BCM5717");
15536                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15537                         strcpy(tp->board_part_number, "BCM5718");
15538                 else
15539                         goto nomatch;
15540         } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15541                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15542                         strcpy(tp->board_part_number, "BCM57780");
15543                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15544                         strcpy(tp->board_part_number, "BCM57760");
15545                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15546                         strcpy(tp->board_part_number, "BCM57790");
15547                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15548                         strcpy(tp->board_part_number, "BCM57788");
15549                 else
15550                         goto nomatch;
15551         } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15552                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15553                         strcpy(tp->board_part_number, "BCM57761");
15554                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15555                         strcpy(tp->board_part_number, "BCM57765");
15556                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15557                         strcpy(tp->board_part_number, "BCM57781");
15558                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15559                         strcpy(tp->board_part_number, "BCM57785");
15560                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15561                         strcpy(tp->board_part_number, "BCM57791");
15562                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15563                         strcpy(tp->board_part_number, "BCM57795");
15564                 else
15565                         goto nomatch;
15566         } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15567                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15568                         strcpy(tp->board_part_number, "BCM57762");
15569                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15570                         strcpy(tp->board_part_number, "BCM57766");
15571                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15572                         strcpy(tp->board_part_number, "BCM57782");
15573                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15574                         strcpy(tp->board_part_number, "BCM57786");
15575                 else
15576                         goto nomatch;
15577         } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15578                 strcpy(tp->board_part_number, "BCM95906");
15579         } else {
15580 nomatch:
15581                 strcpy(tp->board_part_number, "none");
15582         }
15583 }
15584
15585 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15586 {
15587         u32 val;
15588
15589         if (tg3_nvram_read(tp, offset, &val) ||
15590             (val & 0xfc000000) != 0x0c000000 ||
15591             tg3_nvram_read(tp, offset + 4, &val) ||
15592             val != 0)
15593                 return 0;
15594
15595         return 1;
15596 }
15597
15598 static void tg3_read_bc_ver(struct tg3 *tp)
15599 {
15600         u32 val, offset, start, ver_offset;
15601         int i, dst_off;
15602         bool newver = false;
15603
15604         if (tg3_nvram_read(tp, 0xc, &offset) ||
15605             tg3_nvram_read(tp, 0x4, &start))
15606                 return;
15607
15608         offset = tg3_nvram_logical_addr(tp, offset);
15609
15610         if (tg3_nvram_read(tp, offset, &val))
15611                 return;
15612
15613         if ((val & 0xfc000000) == 0x0c000000) {
15614                 if (tg3_nvram_read(tp, offset + 4, &val))
15615                         return;
15616
15617                 if (val == 0)
15618                         newver = true;
15619         }
15620
15621         dst_off = strlen(tp->fw_ver);
15622
15623         if (newver) {
15624                 if (TG3_VER_SIZE - dst_off < 16 ||
15625                     tg3_nvram_read(tp, offset + 8, &ver_offset))
15626                         return;
15627
15628                 offset = offset + ver_offset - start;
15629                 for (i = 0; i < 16; i += 4) {
15630                         __be32 v;
15631                         if (tg3_nvram_read_be32(tp, offset + i, &v))
15632                                 return;
15633
15634                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15635                 }
15636         } else {
15637                 u32 major, minor;
15638
15639                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15640                         return;
15641
15642                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15643                         TG3_NVM_BCVER_MAJSFT;
15644                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15645                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15646                          "v%d.%02d", major, minor);
15647         }
15648 }
15649
15650 static void tg3_read_hwsb_ver(struct tg3 *tp)
15651 {
15652         u32 val, major, minor;
15653
15654         /* Use native endian representation */
15655         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15656                 return;
15657
15658         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15659                 TG3_NVM_HWSB_CFG1_MAJSFT;
15660         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15661                 TG3_NVM_HWSB_CFG1_MINSFT;
15662
15663         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15664 }
15665
15666 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15667 {
15668         u32 offset, major, minor, build;
15669
15670         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15671
15672         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15673                 return;
15674
15675         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15676         case TG3_EEPROM_SB_REVISION_0:
15677                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15678                 break;
15679         case TG3_EEPROM_SB_REVISION_2:
15680                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15681                 break;
15682         case TG3_EEPROM_SB_REVISION_3:
15683                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15684                 break;
15685         case TG3_EEPROM_SB_REVISION_4:
15686                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15687                 break;
15688         case TG3_EEPROM_SB_REVISION_5:
15689                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15690                 break;
15691         case TG3_EEPROM_SB_REVISION_6:
15692                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15693                 break;
15694         default:
15695                 return;
15696         }
15697
15698         if (tg3_nvram_read(tp, offset, &val))
15699                 return;
15700
15701         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15702                 TG3_EEPROM_SB_EDH_BLD_SHFT;
15703         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15704                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15705         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15706
15707         if (minor > 99 || build > 26)
15708                 return;
15709
15710         offset = strlen(tp->fw_ver);
15711         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15712                  " v%d.%02d", major, minor);
15713
15714         if (build > 0) {
15715                 offset = strlen(tp->fw_ver);
15716                 if (offset < TG3_VER_SIZE - 1)
15717                         tp->fw_ver[offset] = 'a' + build - 1;
15718         }
15719 }
15720
15721 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15722 {
15723         u32 val, offset, start;
15724         int i, vlen;
15725
15726         for (offset = TG3_NVM_DIR_START;
15727              offset < TG3_NVM_DIR_END;
15728              offset += TG3_NVM_DIRENT_SIZE) {
15729                 if (tg3_nvram_read(tp, offset, &val))
15730                         return;
15731
15732                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15733                         break;
15734         }
15735
15736         if (offset == TG3_NVM_DIR_END)
15737                 return;
15738
15739         if (!tg3_flag(tp, 5705_PLUS))
15740                 start = 0x08000000;
15741         else if (tg3_nvram_read(tp, offset - 4, &start))
15742                 return;
15743
15744         if (tg3_nvram_read(tp, offset + 4, &offset) ||
15745             !tg3_fw_img_is_valid(tp, offset) ||
15746             tg3_nvram_read(tp, offset + 8, &val))
15747                 return;
15748
15749         offset += val - start;
15750
15751         vlen = strlen(tp->fw_ver);
15752
15753         tp->fw_ver[vlen++] = ',';
15754         tp->fw_ver[vlen++] = ' ';
15755
15756         for (i = 0; i < 4; i++) {
15757                 __be32 v;
15758                 if (tg3_nvram_read_be32(tp, offset, &v))
15759                         return;
15760
15761                 offset += sizeof(v);
15762
15763                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15764                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15765                         break;
15766                 }
15767
15768                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15769                 vlen += sizeof(v);
15770         }
15771 }
15772
15773 static void tg3_probe_ncsi(struct tg3 *tp)
15774 {
15775         u32 apedata;
15776
15777         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15778         if (apedata != APE_SEG_SIG_MAGIC)
15779                 return;
15780
15781         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15782         if (!(apedata & APE_FW_STATUS_READY))
15783                 return;
15784
15785         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15786                 tg3_flag_set(tp, APE_HAS_NCSI);
15787 }
15788
15789 static void tg3_read_dash_ver(struct tg3 *tp)
15790 {
15791         int vlen;
15792         u32 apedata;
15793         char *fwtype;
15794
15795         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15796
15797         if (tg3_flag(tp, APE_HAS_NCSI))
15798                 fwtype = "NCSI";
15799         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15800                 fwtype = "SMASH";
15801         else
15802                 fwtype = "DASH";
15803
15804         vlen = strlen(tp->fw_ver);
15805
15806         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15807                  fwtype,
15808                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15809                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15810                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15811                  (apedata & APE_FW_VERSION_BLDMSK));
15812 }
15813
15814 static void tg3_read_otp_ver(struct tg3 *tp)
15815 {
15816         u32 val, val2;
15817
15818         if (tg3_asic_rev(tp) != ASIC_REV_5762)
15819                 return;
15820
15821         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15822             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15823             TG3_OTP_MAGIC0_VALID(val)) {
15824                 u64 val64 = (u64) val << 32 | val2;
15825                 u32 ver = 0;
15826                 int i, vlen;
15827
15828                 for (i = 0; i < 7; i++) {
15829                         if ((val64 & 0xff) == 0)
15830                                 break;
15831                         ver = val64 & 0xff;
15832                         val64 >>= 8;
15833                 }
15834                 vlen = strlen(tp->fw_ver);
15835                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15836         }
15837 }
15838
15839 static void tg3_read_fw_ver(struct tg3 *tp)
15840 {
15841         u32 val;
15842         bool vpd_vers = false;
15843
15844         if (tp->fw_ver[0] != 0)
15845                 vpd_vers = true;
15846
15847         if (tg3_flag(tp, NO_NVRAM)) {
15848                 strcat(tp->fw_ver, "sb");
15849                 tg3_read_otp_ver(tp);
15850                 return;
15851         }
15852
15853         if (tg3_nvram_read(tp, 0, &val))
15854                 return;
15855
15856         if (val == TG3_EEPROM_MAGIC)
15857                 tg3_read_bc_ver(tp);
15858         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15859                 tg3_read_sb_ver(tp, val);
15860         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15861                 tg3_read_hwsb_ver(tp);
15862
15863         if (tg3_flag(tp, ENABLE_ASF)) {
15864                 if (tg3_flag(tp, ENABLE_APE)) {
15865                         tg3_probe_ncsi(tp);
15866                         if (!vpd_vers)
15867                                 tg3_read_dash_ver(tp);
15868                 } else if (!vpd_vers) {
15869                         tg3_read_mgmtfw_ver(tp);
15870                 }
15871         }
15872
15873         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15874 }
15875
15876 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15877 {
15878         if (tg3_flag(tp, LRG_PROD_RING_CAP))
15879                 return TG3_RX_RET_MAX_SIZE_5717;
15880         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15881                 return TG3_RX_RET_MAX_SIZE_5700;
15882         else
15883                 return TG3_RX_RET_MAX_SIZE_5705;
15884 }
15885
15886 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15887         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15888         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15889         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15890         { },
15891 };
15892
15893 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15894 {
15895         struct pci_dev *peer;
15896         unsigned int func, devnr = tp->pdev->devfn & ~7;
15897
15898         for (func = 0; func < 8; func++) {
15899                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15900                 if (peer && peer != tp->pdev)
15901                         break;
15902                 pci_dev_put(peer);
15903         }
15904         /* 5704 can be configured in single-port mode, set peer to
15905          * tp->pdev in that case.
15906          */
15907         if (!peer) {
15908                 peer = tp->pdev;
15909                 return peer;
15910         }
15911
15912         /*
15913          * We don't need to keep the refcount elevated; there's no way
15914          * to remove one half of this device without removing the other
15915          */
15916         pci_dev_put(peer);
15917
15918         return peer;
15919 }
15920
15921 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15922 {
15923         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15924         if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15925                 u32 reg;
15926
15927                 /* All devices that use the alternate
15928                  * ASIC REV location have a CPMU.
15929                  */
15930                 tg3_flag_set(tp, CPMU_PRESENT);
15931
15932                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15933                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15934                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15935                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15936                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15937                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
15938                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
15939                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15940                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15941                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
15942                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
15943                         reg = TG3PCI_GEN2_PRODID_ASICREV;
15944                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15945                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15946                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15947                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15948                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15949                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15950                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15951                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15952                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15953                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15954                         reg = TG3PCI_GEN15_PRODID_ASICREV;
15955                 else
15956                         reg = TG3PCI_PRODID_ASICREV;
15957
15958                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15959         }
15960
15961         /* Wrong chip ID in 5752 A0. This code can be removed later
15962          * as A0 is not in production.
15963          */
15964         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15965                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15966
15967         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15968                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15969
15970         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15971             tg3_asic_rev(tp) == ASIC_REV_5719 ||
15972             tg3_asic_rev(tp) == ASIC_REV_5720)
15973                 tg3_flag_set(tp, 5717_PLUS);
15974
15975         if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15976             tg3_asic_rev(tp) == ASIC_REV_57766)
15977                 tg3_flag_set(tp, 57765_CLASS);
15978
15979         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15980              tg3_asic_rev(tp) == ASIC_REV_5762)
15981                 tg3_flag_set(tp, 57765_PLUS);
15982
15983         /* Intentionally exclude ASIC_REV_5906 */
15984         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15985             tg3_asic_rev(tp) == ASIC_REV_5787 ||
15986             tg3_asic_rev(tp) == ASIC_REV_5784 ||
15987             tg3_asic_rev(tp) == ASIC_REV_5761 ||
15988             tg3_asic_rev(tp) == ASIC_REV_5785 ||
15989             tg3_asic_rev(tp) == ASIC_REV_57780 ||
15990             tg3_flag(tp, 57765_PLUS))
15991                 tg3_flag_set(tp, 5755_PLUS);
15992
15993         if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15994             tg3_asic_rev(tp) == ASIC_REV_5714)
15995                 tg3_flag_set(tp, 5780_CLASS);
15996
15997         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15998             tg3_asic_rev(tp) == ASIC_REV_5752 ||
15999             tg3_asic_rev(tp) == ASIC_REV_5906 ||
16000             tg3_flag(tp, 5755_PLUS) ||
16001             tg3_flag(tp, 5780_CLASS))
16002                 tg3_flag_set(tp, 5750_PLUS);
16003
16004         if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16005             tg3_flag(tp, 5750_PLUS))
16006                 tg3_flag_set(tp, 5705_PLUS);
16007 }
16008
16009 static bool tg3_10_100_only_device(struct tg3 *tp,
16010                                    const struct pci_device_id *ent)
16011 {
16012         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16013
16014         if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16015              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16016             (tp->phy_flags & TG3_PHYFLG_IS_FET))
16017                 return true;
16018
16019         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16020                 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16021                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16022                                 return true;
16023                 } else {
16024                         return true;
16025                 }
16026         }
16027
16028         return false;
16029 }
16030
16031 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16032 {
16033         u32 misc_ctrl_reg;
16034         u32 pci_state_reg, grc_misc_cfg;
16035         u32 val;
16036         u16 pci_cmd;
16037         int err;
16038
16039         /* Force memory write invalidate off.  If we leave it on,
16040          * then on 5700_BX chips we have to enable a workaround.
16041          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16042          * to match the cacheline size.  The Broadcom driver have this
16043          * workaround but turns MWI off all the times so never uses
16044          * it.  This seems to suggest that the workaround is insufficient.
16045          */
16046         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16047         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16048         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16049
16050         /* Important! -- Make sure register accesses are byteswapped
16051          * correctly.  Also, for those chips that require it, make
16052          * sure that indirect register accesses are enabled before
16053          * the first operation.
16054          */
16055         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16056                               &misc_ctrl_reg);
16057         tp->misc_host_ctrl |= (misc_ctrl_reg &
16058                                MISC_HOST_CTRL_CHIPREV);
16059         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16060                                tp->misc_host_ctrl);
16061
16062         tg3_detect_asic_rev(tp, misc_ctrl_reg);
16063
16064         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16065          * we need to disable memory and use config. cycles
16066          * only to access all registers. The 5702/03 chips
16067          * can mistakenly decode the special cycles from the
16068          * ICH chipsets as memory write cycles, causing corruption
16069          * of register and memory space. Only certain ICH bridges
16070          * will drive special cycles with non-zero data during the
16071          * address phase which can fall within the 5703's address
16072          * range. This is not an ICH bug as the PCI spec allows
16073          * non-zero address during special cycles. However, only
16074          * these ICH bridges are known to drive non-zero addresses
16075          * during special cycles.
16076          *
16077          * Since special cycles do not cross PCI bridges, we only
16078          * enable this workaround if the 5703 is on the secondary
16079          * bus of these ICH bridges.
16080          */
16081         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16082             (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16083                 static struct tg3_dev_id {
16084                         u32     vendor;
16085                         u32     device;
16086                         u32     rev;
16087                 } ich_chipsets[] = {
16088                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16089                           PCI_ANY_ID },
16090                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16091                           PCI_ANY_ID },
16092                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16093                           0xa },
16094                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16095                           PCI_ANY_ID },
16096                         { },
16097                 };
16098                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16099                 struct pci_dev *bridge = NULL;
16100
16101                 while (pci_id->vendor != 0) {
16102                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
16103                                                 bridge);
16104                         if (!bridge) {
16105                                 pci_id++;
16106                                 continue;
16107                         }
16108                         if (pci_id->rev != PCI_ANY_ID) {
16109                                 if (bridge->revision > pci_id->rev)
16110                                         continue;
16111                         }
16112                         if (bridge->subordinate &&
16113                             (bridge->subordinate->number ==
16114                              tp->pdev->bus->number)) {
16115                                 tg3_flag_set(tp, ICH_WORKAROUND);
16116                                 pci_dev_put(bridge);
16117                                 break;
16118                         }
16119                 }
16120         }
16121
16122         if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16123                 static struct tg3_dev_id {
16124                         u32     vendor;
16125                         u32     device;
16126                 } bridge_chipsets[] = {
16127                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16128                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16129                         { },
16130                 };
16131                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16132                 struct pci_dev *bridge = NULL;
16133
16134                 while (pci_id->vendor != 0) {
16135                         bridge = pci_get_device(pci_id->vendor,
16136                                                 pci_id->device,
16137                                                 bridge);
16138                         if (!bridge) {
16139                                 pci_id++;
16140                                 continue;
16141                         }
16142                         if (bridge->subordinate &&
16143                             (bridge->subordinate->number <=
16144                              tp->pdev->bus->number) &&
16145                             (bridge->subordinate->busn_res.end >=
16146                              tp->pdev->bus->number)) {
16147                                 tg3_flag_set(tp, 5701_DMA_BUG);
16148                                 pci_dev_put(bridge);
16149                                 break;
16150                         }
16151                 }
16152         }
16153
16154         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16155          * DMA addresses > 40-bit. This bridge may have other additional
16156          * 57xx devices behind it in some 4-port NIC designs for example.
16157          * Any tg3 device found behind the bridge will also need the 40-bit
16158          * DMA workaround.
16159          */
16160         if (tg3_flag(tp, 5780_CLASS)) {
16161                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16162                 tp->msi_cap = tp->pdev->msi_cap;
16163         } else {
16164                 struct pci_dev *bridge = NULL;
16165
16166                 do {
16167                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16168                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
16169                                                 bridge);
16170                         if (bridge && bridge->subordinate &&
16171                             (bridge->subordinate->number <=
16172                              tp->pdev->bus->number) &&
16173                             (bridge->subordinate->busn_res.end >=
16174                              tp->pdev->bus->number)) {
16175                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16176                                 pci_dev_put(bridge);
16177                                 break;
16178                         }
16179                 } while (bridge);
16180         }
16181
16182         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16183             tg3_asic_rev(tp) == ASIC_REV_5714)
16184                 tp->pdev_peer = tg3_find_peer(tp);
16185
16186         /* Determine TSO capabilities */
16187         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16188                 ; /* Do nothing. HW bug. */
16189         else if (tg3_flag(tp, 57765_PLUS))
16190                 tg3_flag_set(tp, HW_TSO_3);
16191         else if (tg3_flag(tp, 5755_PLUS) ||
16192                  tg3_asic_rev(tp) == ASIC_REV_5906)
16193                 tg3_flag_set(tp, HW_TSO_2);
16194         else if (tg3_flag(tp, 5750_PLUS)) {
16195                 tg3_flag_set(tp, HW_TSO_1);
16196                 tg3_flag_set(tp, TSO_BUG);
16197                 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16198                     tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16199                         tg3_flag_clear(tp, TSO_BUG);
16200         } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16201                    tg3_asic_rev(tp) != ASIC_REV_5701 &&
16202                    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16203                 tg3_flag_set(tp, FW_TSO);
16204                 tg3_flag_set(tp, TSO_BUG);
16205                 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16206                         tp->fw_needed = FIRMWARE_TG3TSO5;
16207                 else
16208                         tp->fw_needed = FIRMWARE_TG3TSO;
16209         }
16210
16211         /* Selectively allow TSO based on operating conditions */
16212         if (tg3_flag(tp, HW_TSO_1) ||
16213             tg3_flag(tp, HW_TSO_2) ||
16214             tg3_flag(tp, HW_TSO_3) ||
16215             tg3_flag(tp, FW_TSO)) {
16216                 /* For firmware TSO, assume ASF is disabled.
16217                  * We'll disable TSO later if we discover ASF
16218                  * is enabled in tg3_get_eeprom_hw_cfg().
16219                  */
16220                 tg3_flag_set(tp, TSO_CAPABLE);
16221         } else {
16222                 tg3_flag_clear(tp, TSO_CAPABLE);
16223                 tg3_flag_clear(tp, TSO_BUG);
16224                 tp->fw_needed = NULL;
16225         }
16226
16227         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16228                 tp->fw_needed = FIRMWARE_TG3;
16229
16230         if (tg3_asic_rev(tp) == ASIC_REV_57766)
16231                 tp->fw_needed = FIRMWARE_TG357766;
16232
16233         tp->irq_max = 1;
16234
16235         if (tg3_flag(tp, 5750_PLUS)) {
16236                 tg3_flag_set(tp, SUPPORT_MSI);
16237                 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16238                     tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16239                     (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16240                      tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16241                      tp->pdev_peer == tp->pdev))
16242                         tg3_flag_clear(tp, SUPPORT_MSI);
16243
16244                 if (tg3_flag(tp, 5755_PLUS) ||
16245                     tg3_asic_rev(tp) == ASIC_REV_5906) {
16246                         tg3_flag_set(tp, 1SHOT_MSI);
16247                 }
16248
16249                 if (tg3_flag(tp, 57765_PLUS)) {
16250                         tg3_flag_set(tp, SUPPORT_MSIX);
16251                         tp->irq_max = TG3_IRQ_MAX_VECS;
16252                 }
16253         }
16254
16255         tp->txq_max = 1;
16256         tp->rxq_max = 1;
16257         if (tp->irq_max > 1) {
16258                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16259                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16260
16261                 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16262                     tg3_asic_rev(tp) == ASIC_REV_5720)
16263                         tp->txq_max = tp->irq_max - 1;
16264         }
16265
16266         if (tg3_flag(tp, 5755_PLUS) ||
16267             tg3_asic_rev(tp) == ASIC_REV_5906)
16268                 tg3_flag_set(tp, SHORT_DMA_BUG);
16269
16270         if (tg3_asic_rev(tp) == ASIC_REV_5719)
16271                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16272
16273         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16274             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16275             tg3_asic_rev(tp) == ASIC_REV_5720 ||
16276             tg3_asic_rev(tp) == ASIC_REV_5762)
16277                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16278
16279         if (tg3_flag(tp, 57765_PLUS) &&
16280             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16281                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16282
16283         if (!tg3_flag(tp, 5705_PLUS) ||
16284             tg3_flag(tp, 5780_CLASS) ||
16285             tg3_flag(tp, USE_JUMBO_BDFLAG))
16286                 tg3_flag_set(tp, JUMBO_CAPABLE);
16287
16288         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16289                               &pci_state_reg);
16290
16291         if (pci_is_pcie(tp->pdev)) {
16292                 u16 lnkctl;
16293
16294                 tg3_flag_set(tp, PCI_EXPRESS);
16295
16296                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16297                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16298                         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16299                                 tg3_flag_clear(tp, HW_TSO_2);
16300                                 tg3_flag_clear(tp, TSO_CAPABLE);
16301                         }
16302                         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16303                             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16304                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16305                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16306                                 tg3_flag_set(tp, CLKREQ_BUG);
16307                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16308                         tg3_flag_set(tp, L1PLLPD_EN);
16309                 }
16310         } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16311                 /* BCM5785 devices are effectively PCIe devices, and should
16312                  * follow PCIe codepaths, but do not have a PCIe capabilities
16313                  * section.
16314                  */
16315                 tg3_flag_set(tp, PCI_EXPRESS);
16316         } else if (!tg3_flag(tp, 5705_PLUS) ||
16317                    tg3_flag(tp, 5780_CLASS)) {
16318                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16319                 if (!tp->pcix_cap) {
16320                         dev_err(&tp->pdev->dev,
16321                                 "Cannot find PCI-X capability, aborting\n");
16322                         return -EIO;
16323                 }
16324
16325                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16326                         tg3_flag_set(tp, PCIX_MODE);
16327         }
16328
16329         /* If we have an AMD 762 or VIA K8T800 chipset, write
16330          * reordering to the mailbox registers done by the host
16331          * controller can cause major troubles.  We read back from
16332          * every mailbox register write to force the writes to be
16333          * posted to the chip in order.
16334          */
16335         if (pci_dev_present(tg3_write_reorder_chipsets) &&
16336             !tg3_flag(tp, PCI_EXPRESS))
16337                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16338
16339         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16340                              &tp->pci_cacheline_sz);
16341         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16342                              &tp->pci_lat_timer);
16343         if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16344             tp->pci_lat_timer < 64) {
16345                 tp->pci_lat_timer = 64;
16346                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16347                                       tp->pci_lat_timer);
16348         }
16349
16350         /* Important! -- It is critical that the PCI-X hw workaround
16351          * situation is decided before the first MMIO register access.
16352          */
16353         if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16354                 /* 5700 BX chips need to have their TX producer index
16355                  * mailboxes written twice to workaround a bug.
16356                  */
16357                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16358
16359                 /* If we are in PCI-X mode, enable register write workaround.
16360                  *
16361                  * The workaround is to use indirect register accesses
16362                  * for all chip writes not to mailbox registers.
16363                  */
16364                 if (tg3_flag(tp, PCIX_MODE)) {
16365                         u32 pm_reg;
16366
16367                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16368
16369                         /* The chip can have it's power management PCI config
16370                          * space registers clobbered due to this bug.
16371                          * So explicitly force the chip into D0 here.
16372                          */
16373                         pci_read_config_dword(tp->pdev,
16374                                               tp->pdev->pm_cap + PCI_PM_CTRL,
16375                                               &pm_reg);
16376                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16377                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16378                         pci_write_config_dword(tp->pdev,
16379                                                tp->pdev->pm_cap + PCI_PM_CTRL,
16380                                                pm_reg);
16381
16382                         /* Also, force SERR#/PERR# in PCI command. */
16383                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16384                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16385                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16386                 }
16387         }
16388
16389         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16390                 tg3_flag_set(tp, PCI_HIGH_SPEED);
16391         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16392                 tg3_flag_set(tp, PCI_32BIT);
16393
16394         /* Chip-specific fixup from Broadcom driver */
16395         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16396             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16397                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16398                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16399         }
16400
16401         /* Default fast path register access methods */
16402         tp->read32 = tg3_read32;
16403         tp->write32 = tg3_write32;
16404         tp->read32_mbox = tg3_read32;
16405         tp->write32_mbox = tg3_write32;
16406         tp->write32_tx_mbox = tg3_write32;
16407         tp->write32_rx_mbox = tg3_write32;
16408
16409         /* Various workaround register access methods */
16410         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16411                 tp->write32 = tg3_write_indirect_reg32;
16412         else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16413                  (tg3_flag(tp, PCI_EXPRESS) &&
16414                   tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16415                 /*
16416                  * Back to back register writes can cause problems on these
16417                  * chips, the workaround is to read back all reg writes
16418                  * except those to mailbox regs.
16419                  *
16420                  * See tg3_write_indirect_reg32().
16421                  */
16422                 tp->write32 = tg3_write_flush_reg32;
16423         }
16424
16425         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16426                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16427                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16428                         tp->write32_rx_mbox = tg3_write_flush_reg32;
16429         }
16430
16431         if (tg3_flag(tp, ICH_WORKAROUND)) {
16432                 tp->read32 = tg3_read_indirect_reg32;
16433                 tp->write32 = tg3_write_indirect_reg32;
16434                 tp->read32_mbox = tg3_read_indirect_mbox;
16435                 tp->write32_mbox = tg3_write_indirect_mbox;
16436                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16437                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16438
16439                 iounmap(tp->regs);
16440                 tp->regs = NULL;
16441
16442                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16443                 pci_cmd &= ~PCI_COMMAND_MEMORY;
16444                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16445         }
16446         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16447                 tp->read32_mbox = tg3_read32_mbox_5906;
16448                 tp->write32_mbox = tg3_write32_mbox_5906;
16449                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16450                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16451         }
16452
16453         if (tp->write32 == tg3_write_indirect_reg32 ||
16454             (tg3_flag(tp, PCIX_MODE) &&
16455              (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16456               tg3_asic_rev(tp) == ASIC_REV_5701)))
16457                 tg3_flag_set(tp, SRAM_USE_CONFIG);
16458
16459         /* The memory arbiter has to be enabled in order for SRAM accesses
16460          * to succeed.  Normally on powerup the tg3 chip firmware will make
16461          * sure it is enabled, but other entities such as system netboot
16462          * code might disable it.
16463          */
16464         val = tr32(MEMARB_MODE);
16465         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16466
16467         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16468         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16469             tg3_flag(tp, 5780_CLASS)) {
16470                 if (tg3_flag(tp, PCIX_MODE)) {
16471                         pci_read_config_dword(tp->pdev,
16472                                               tp->pcix_cap + PCI_X_STATUS,
16473                                               &val);
16474                         tp->pci_fn = val & 0x7;
16475                 }
16476         } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16477                    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16478                    tg3_asic_rev(tp) == ASIC_REV_5720) {
16479                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16480                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16481                         val = tr32(TG3_CPMU_STATUS);
16482
16483                 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16484                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16485                 else
16486                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16487                                      TG3_CPMU_STATUS_FSHFT_5719;
16488         }
16489
16490         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16491                 tp->write32_tx_mbox = tg3_write_flush_reg32;
16492                 tp->write32_rx_mbox = tg3_write_flush_reg32;
16493         }
16494
16495         /* Get eeprom hw config before calling tg3_set_power_state().
16496          * In particular, the TG3_FLAG_IS_NIC flag must be
16497          * determined before calling tg3_set_power_state() so that
16498          * we know whether or not to switch out of Vaux power.
16499          * When the flag is set, it means that GPIO1 is used for eeprom
16500          * write protect and also implies that it is a LOM where GPIOs
16501          * are not used to switch power.
16502          */
16503         tg3_get_eeprom_hw_cfg(tp);
16504
16505         if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16506                 tg3_flag_clear(tp, TSO_CAPABLE);
16507                 tg3_flag_clear(tp, TSO_BUG);
16508                 tp->fw_needed = NULL;
16509         }
16510
16511         if (tg3_flag(tp, ENABLE_APE)) {
16512                 /* Allow reads and writes to the
16513                  * APE register and memory space.
16514                  */
16515                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16516                                  PCISTATE_ALLOW_APE_SHMEM_WR |
16517                                  PCISTATE_ALLOW_APE_PSPACE_WR;
16518                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16519                                        pci_state_reg);
16520
16521                 tg3_ape_lock_init(tp);
16522         }
16523
16524         /* Set up tp->grc_local_ctrl before calling
16525          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16526          * will bring 5700's external PHY out of reset.
16527          * It is also used as eeprom write protect on LOMs.
16528          */
16529         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16530         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16531             tg3_flag(tp, EEPROM_WRITE_PROT))
16532                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16533                                        GRC_LCLCTRL_GPIO_OUTPUT1);
16534         /* Unused GPIO3 must be driven as output on 5752 because there
16535          * are no pull-up resistors on unused GPIO pins.
16536          */
16537         else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16538                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16539
16540         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16541             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16542             tg3_flag(tp, 57765_CLASS))
16543                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16544
16545         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16546             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16547                 /* Turn off the debug UART. */
16548                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16549                 if (tg3_flag(tp, IS_NIC))
16550                         /* Keep VMain power. */
16551                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16552                                               GRC_LCLCTRL_GPIO_OUTPUT0;
16553         }
16554
16555         if (tg3_asic_rev(tp) == ASIC_REV_5762)
16556                 tp->grc_local_ctrl |=
16557                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16558
16559         /* Switch out of Vaux if it is a NIC */
16560         tg3_pwrsrc_switch_to_vmain(tp);
16561
16562         /* Derive initial jumbo mode from MTU assigned in
16563          * ether_setup() via the alloc_etherdev() call
16564          */
16565         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16566                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16567
16568         /* Determine WakeOnLan speed to use. */
16569         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16570             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16571             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16572             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16573                 tg3_flag_clear(tp, WOL_SPEED_100MB);
16574         } else {
16575                 tg3_flag_set(tp, WOL_SPEED_100MB);
16576         }
16577
16578         if (tg3_asic_rev(tp) == ASIC_REV_5906)
16579                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16580
16581         /* A few boards don't want Ethernet@WireSpeed phy feature */
16582         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16583             (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16584              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16585              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16586             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16587             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16588                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16589
16590         if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16591             tg3_chip_rev(tp) == CHIPREV_5704_AX)
16592                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16593         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16594                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16595
16596         if (tg3_flag(tp, 5705_PLUS) &&
16597             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16598             tg3_asic_rev(tp) != ASIC_REV_5785 &&
16599             tg3_asic_rev(tp) != ASIC_REV_57780 &&
16600             !tg3_flag(tp, 57765_PLUS)) {
16601                 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16602                     tg3_asic_rev(tp) == ASIC_REV_5787 ||
16603                     tg3_asic_rev(tp) == ASIC_REV_5784 ||
16604                     tg3_asic_rev(tp) == ASIC_REV_5761) {
16605                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16606                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16607                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16608                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16609                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16610                 } else
16611                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16612         }
16613
16614         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16615             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16616                 tp->phy_otp = tg3_read_otp_phycfg(tp);
16617                 if (tp->phy_otp == 0)
16618                         tp->phy_otp = TG3_OTP_DEFAULT;
16619         }
16620
16621         if (tg3_flag(tp, CPMU_PRESENT))
16622                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16623         else
16624                 tp->mi_mode = MAC_MI_MODE_BASE;
16625
16626         tp->coalesce_mode = 0;
16627         if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16628             tg3_chip_rev(tp) != CHIPREV_5700_BX)
16629                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16630
16631         /* Set these bits to enable statistics workaround. */
16632         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16633             tg3_asic_rev(tp) == ASIC_REV_5762 ||
16634             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16635             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16636                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16637                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16638         }
16639
16640         if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16641             tg3_asic_rev(tp) == ASIC_REV_57780)
16642                 tg3_flag_set(tp, USE_PHYLIB);
16643
16644         err = tg3_mdio_init(tp);
16645         if (err)
16646                 return err;
16647
16648         /* Initialize data/descriptor byte/word swapping. */
16649         val = tr32(GRC_MODE);
16650         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16651             tg3_asic_rev(tp) == ASIC_REV_5762)
16652                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16653                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
16654                         GRC_MODE_B2HRX_ENABLE |
16655                         GRC_MODE_HTX2B_ENABLE |
16656                         GRC_MODE_HOST_STACKUP);
16657         else
16658                 val &= GRC_MODE_HOST_STACKUP;
16659
16660         tw32(GRC_MODE, val | tp->grc_mode);
16661
16662         tg3_switch_clocks(tp);
16663
16664         /* Clear this out for sanity. */
16665         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16666
16667         /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16668         tw32(TG3PCI_REG_BASE_ADDR, 0);
16669
16670         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16671                               &pci_state_reg);
16672         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16673             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16674                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16675                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16676                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16677                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16678                         void __iomem *sram_base;
16679
16680                         /* Write some dummy words into the SRAM status block
16681                          * area, see if it reads back correctly.  If the return
16682                          * value is bad, force enable the PCIX workaround.
16683                          */
16684                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16685
16686                         writel(0x00000000, sram_base);
16687                         writel(0x00000000, sram_base + 4);
16688                         writel(0xffffffff, sram_base + 4);
16689                         if (readl(sram_base) != 0x00000000)
16690                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16691                 }
16692         }
16693
16694         udelay(50);
16695         tg3_nvram_init(tp);
16696
16697         /* If the device has an NVRAM, no need to load patch firmware */
16698         if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16699             !tg3_flag(tp, NO_NVRAM))
16700                 tp->fw_needed = NULL;
16701
16702         grc_misc_cfg = tr32(GRC_MISC_CFG);
16703         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16704
16705         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16706             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16707              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16708                 tg3_flag_set(tp, IS_5788);
16709
16710         if (!tg3_flag(tp, IS_5788) &&
16711             tg3_asic_rev(tp) != ASIC_REV_5700)
16712                 tg3_flag_set(tp, TAGGED_STATUS);
16713         if (tg3_flag(tp, TAGGED_STATUS)) {
16714                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16715                                       HOSTCC_MODE_CLRTICK_TXBD);
16716
16717                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16718                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16719                                        tp->misc_host_ctrl);
16720         }
16721
16722         /* Preserve the APE MAC_MODE bits */
16723         if (tg3_flag(tp, ENABLE_APE))
16724                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16725         else
16726                 tp->mac_mode = 0;
16727
16728         if (tg3_10_100_only_device(tp, ent))
16729                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16730
16731         err = tg3_phy_probe(tp);
16732         if (err) {
16733                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16734                 /* ... but do not return immediately ... */
16735                 tg3_mdio_fini(tp);
16736         }
16737
16738         tg3_read_vpd(tp);
16739         tg3_read_fw_ver(tp);
16740
16741         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16742                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16743         } else {
16744                 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16745                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16746                 else
16747                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16748         }
16749
16750         /* 5700 {AX,BX} chips have a broken status block link
16751          * change bit implementation, so we must use the
16752          * status register in those cases.
16753          */
16754         if (tg3_asic_rev(tp) == ASIC_REV_5700)
16755                 tg3_flag_set(tp, USE_LINKCHG_REG);
16756         else
16757                 tg3_flag_clear(tp, USE_LINKCHG_REG);
16758
16759         /* The led_ctrl is set during tg3_phy_probe, here we might
16760          * have to force the link status polling mechanism based
16761          * upon subsystem IDs.
16762          */
16763         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16764             tg3_asic_rev(tp) == ASIC_REV_5701 &&
16765             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16766                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16767                 tg3_flag_set(tp, USE_LINKCHG_REG);
16768         }
16769
16770         /* For all SERDES we poll the MAC status register. */
16771         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16772                 tg3_flag_set(tp, POLL_SERDES);
16773         else
16774                 tg3_flag_clear(tp, POLL_SERDES);
16775
16776         if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16777                 tg3_flag_set(tp, POLL_CPMU_LINK);
16778
16779         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16780         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16781         if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16782             tg3_flag(tp, PCIX_MODE)) {
16783                 tp->rx_offset = NET_SKB_PAD;
16784 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16785                 tp->rx_copy_thresh = ~(u16)0;
16786 #endif
16787         }
16788
16789         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16790         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16791         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16792
16793         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16794
16795         /* Increment the rx prod index on the rx std ring by at most
16796          * 8 for these chips to workaround hw errata.
16797          */
16798         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16799             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16800             tg3_asic_rev(tp) == ASIC_REV_5755)
16801                 tp->rx_std_max_post = 8;
16802
16803         if (tg3_flag(tp, ASPM_WORKAROUND))
16804                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16805                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
16806
16807         return err;
16808 }
16809
16810 #ifdef CONFIG_SPARC
16811 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16812 {
16813         struct net_device *dev = tp->dev;
16814         struct pci_dev *pdev = tp->pdev;
16815         struct device_node *dp = pci_device_to_OF_node(pdev);
16816         const unsigned char *addr;
16817         int len;
16818
16819         addr = of_get_property(dp, "local-mac-address", &len);
16820         if (addr && len == ETH_ALEN) {
16821                 memcpy(dev->dev_addr, addr, ETH_ALEN);
16822                 return 0;
16823         }
16824         return -ENODEV;
16825 }
16826
16827 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16828 {
16829         struct net_device *dev = tp->dev;
16830
16831         memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
16832         return 0;
16833 }
16834 #endif
16835
16836 static int tg3_get_device_address(struct tg3 *tp)
16837 {
16838         struct net_device *dev = tp->dev;
16839         u32 hi, lo, mac_offset;
16840         int addr_ok = 0;
16841         int err;
16842
16843 #ifdef CONFIG_SPARC
16844         if (!tg3_get_macaddr_sparc(tp))
16845                 return 0;
16846 #endif
16847
16848         if (tg3_flag(tp, IS_SSB_CORE)) {
16849                 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16850                 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16851                         return 0;
16852         }
16853
16854         mac_offset = 0x7c;
16855         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16856             tg3_flag(tp, 5780_CLASS)) {
16857                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16858                         mac_offset = 0xcc;
16859                 if (tg3_nvram_lock(tp))
16860                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16861                 else
16862                         tg3_nvram_unlock(tp);
16863         } else if (tg3_flag(tp, 5717_PLUS)) {
16864                 if (tp->pci_fn & 1)
16865                         mac_offset = 0xcc;
16866                 if (tp->pci_fn > 1)
16867                         mac_offset += 0x18c;
16868         } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16869                 mac_offset = 0x10;
16870
16871         /* First try to get it from MAC address mailbox. */
16872         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16873         if ((hi >> 16) == 0x484b) {
16874                 dev->dev_addr[0] = (hi >>  8) & 0xff;
16875                 dev->dev_addr[1] = (hi >>  0) & 0xff;
16876
16877                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16878                 dev->dev_addr[2] = (lo >> 24) & 0xff;
16879                 dev->dev_addr[3] = (lo >> 16) & 0xff;
16880                 dev->dev_addr[4] = (lo >>  8) & 0xff;
16881                 dev->dev_addr[5] = (lo >>  0) & 0xff;
16882
16883                 /* Some old bootcode may report a 0 MAC address in SRAM */
16884                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16885         }
16886         if (!addr_ok) {
16887                 /* Next, try NVRAM. */
16888                 if (!tg3_flag(tp, NO_NVRAM) &&
16889                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16890                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16891                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16892                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16893                 }
16894                 /* Finally just fetch it out of the MAC control regs. */
16895                 else {
16896                         hi = tr32(MAC_ADDR_0_HIGH);
16897                         lo = tr32(MAC_ADDR_0_LOW);
16898
16899                         dev->dev_addr[5] = lo & 0xff;
16900                         dev->dev_addr[4] = (lo >> 8) & 0xff;
16901                         dev->dev_addr[3] = (lo >> 16) & 0xff;
16902                         dev->dev_addr[2] = (lo >> 24) & 0xff;
16903                         dev->dev_addr[1] = hi & 0xff;
16904                         dev->dev_addr[0] = (hi >> 8) & 0xff;
16905                 }
16906         }
16907
16908         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16909 #ifdef CONFIG_SPARC
16910                 if (!tg3_get_default_macaddr_sparc(tp))
16911                         return 0;
16912 #endif
16913                 return -EINVAL;
16914         }
16915         return 0;
16916 }
16917
16918 #define BOUNDARY_SINGLE_CACHELINE       1
16919 #define BOUNDARY_MULTI_CACHELINE        2
16920
16921 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16922 {
16923         int cacheline_size;
16924         u8 byte;
16925         int goal;
16926
16927         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16928         if (byte == 0)
16929                 cacheline_size = 1024;
16930         else
16931                 cacheline_size = (int) byte * 4;
16932
16933         /* On 5703 and later chips, the boundary bits have no
16934          * effect.
16935          */
16936         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16937             tg3_asic_rev(tp) != ASIC_REV_5701 &&
16938             !tg3_flag(tp, PCI_EXPRESS))
16939                 goto out;
16940
16941 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16942         goal = BOUNDARY_MULTI_CACHELINE;
16943 #else
16944 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16945         goal = BOUNDARY_SINGLE_CACHELINE;
16946 #else
16947         goal = 0;
16948 #endif
16949 #endif
16950
16951         if (tg3_flag(tp, 57765_PLUS)) {
16952                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16953                 goto out;
16954         }
16955
16956         if (!goal)
16957                 goto out;
16958
16959         /* PCI controllers on most RISC systems tend to disconnect
16960          * when a device tries to burst across a cache-line boundary.
16961          * Therefore, letting tg3 do so just wastes PCI bandwidth.
16962          *
16963          * Unfortunately, for PCI-E there are only limited
16964          * write-side controls for this, and thus for reads
16965          * we will still get the disconnects.  We'll also waste
16966          * these PCI cycles for both read and write for chips
16967          * other than 5700 and 5701 which do not implement the
16968          * boundary bits.
16969          */
16970         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16971                 switch (cacheline_size) {
16972                 case 16:
16973                 case 32:
16974                 case 64:
16975                 case 128:
16976                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16977                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16978                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16979                         } else {
16980                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16981                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16982                         }
16983                         break;
16984
16985                 case 256:
16986                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16987                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16988                         break;
16989
16990                 default:
16991                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16992                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16993                         break;
16994                 }
16995         } else if (tg3_flag(tp, PCI_EXPRESS)) {
16996                 switch (cacheline_size) {
16997                 case 16:
16998                 case 32:
16999                 case 64:
17000                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17001                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17002                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17003                                 break;
17004                         }
17005                         /* fallthrough */
17006                 case 128:
17007                 default:
17008                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17009                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17010                         break;
17011                 }
17012         } else {
17013                 switch (cacheline_size) {
17014                 case 16:
17015                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17016                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17017                                         DMA_RWCTRL_WRITE_BNDRY_16);
17018                                 break;
17019                         }
17020                         /* fallthrough */
17021                 case 32:
17022                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17023                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17024                                         DMA_RWCTRL_WRITE_BNDRY_32);
17025                                 break;
17026                         }
17027                         /* fallthrough */
17028                 case 64:
17029                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17030                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17031                                         DMA_RWCTRL_WRITE_BNDRY_64);
17032                                 break;
17033                         }
17034                         /* fallthrough */
17035                 case 128:
17036                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17037                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17038                                         DMA_RWCTRL_WRITE_BNDRY_128);
17039                                 break;
17040                         }
17041                         /* fallthrough */
17042                 case 256:
17043                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
17044                                 DMA_RWCTRL_WRITE_BNDRY_256);
17045                         break;
17046                 case 512:
17047                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
17048                                 DMA_RWCTRL_WRITE_BNDRY_512);
17049                         break;
17050                 case 1024:
17051                 default:
17052                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17053                                 DMA_RWCTRL_WRITE_BNDRY_1024);
17054                         break;
17055                 }
17056         }
17057
17058 out:
17059         return val;
17060 }
17061
17062 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17063                            int size, bool to_device)
17064 {
17065         struct tg3_internal_buffer_desc test_desc;
17066         u32 sram_dma_descs;
17067         int i, ret;
17068
17069         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17070
17071         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17072         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17073         tw32(RDMAC_STATUS, 0);
17074         tw32(WDMAC_STATUS, 0);
17075
17076         tw32(BUFMGR_MODE, 0);
17077         tw32(FTQ_RESET, 0);
17078
17079         test_desc.addr_hi = ((u64) buf_dma) >> 32;
17080         test_desc.addr_lo = buf_dma & 0xffffffff;
17081         test_desc.nic_mbuf = 0x00002100;
17082         test_desc.len = size;
17083
17084         /*
17085          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17086          * the *second* time the tg3 driver was getting loaded after an
17087          * initial scan.
17088          *
17089          * Broadcom tells me:
17090          *   ...the DMA engine is connected to the GRC block and a DMA
17091          *   reset may affect the GRC block in some unpredictable way...
17092          *   The behavior of resets to individual blocks has not been tested.
17093          *
17094          * Broadcom noted the GRC reset will also reset all sub-components.
17095          */
17096         if (to_device) {
17097                 test_desc.cqid_sqid = (13 << 8) | 2;
17098
17099                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17100                 udelay(40);
17101         } else {
17102                 test_desc.cqid_sqid = (16 << 8) | 7;
17103
17104                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17105                 udelay(40);
17106         }
17107         test_desc.flags = 0x00000005;
17108
17109         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17110                 u32 val;
17111
17112                 val = *(((u32 *)&test_desc) + i);
17113                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17114                                        sram_dma_descs + (i * sizeof(u32)));
17115                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17116         }
17117         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17118
17119         if (to_device)
17120                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17121         else
17122                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17123
17124         ret = -ENODEV;
17125         for (i = 0; i < 40; i++) {
17126                 u32 val;
17127
17128                 if (to_device)
17129                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17130                 else
17131                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17132                 if ((val & 0xffff) == sram_dma_descs) {
17133                         ret = 0;
17134                         break;
17135                 }
17136
17137                 udelay(100);
17138         }
17139
17140         return ret;
17141 }
17142
17143 #define TEST_BUFFER_SIZE        0x2000
17144
17145 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
17146         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17147         { },
17148 };
17149
17150 static int tg3_test_dma(struct tg3 *tp)
17151 {
17152         dma_addr_t buf_dma;
17153         u32 *buf, saved_dma_rwctrl;
17154         int ret = 0;
17155
17156         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17157                                  &buf_dma, GFP_KERNEL);
17158         if (!buf) {
17159                 ret = -ENOMEM;
17160                 goto out_nofree;
17161         }
17162
17163         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17164                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17165
17166         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17167
17168         if (tg3_flag(tp, 57765_PLUS))
17169                 goto out;
17170
17171         if (tg3_flag(tp, PCI_EXPRESS)) {
17172                 /* DMA read watermark not used on PCIE */
17173                 tp->dma_rwctrl |= 0x00180000;
17174         } else if (!tg3_flag(tp, PCIX_MODE)) {
17175                 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17176                     tg3_asic_rev(tp) == ASIC_REV_5750)
17177                         tp->dma_rwctrl |= 0x003f0000;
17178                 else
17179                         tp->dma_rwctrl |= 0x003f000f;
17180         } else {
17181                 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17182                     tg3_asic_rev(tp) == ASIC_REV_5704) {
17183                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17184                         u32 read_water = 0x7;
17185
17186                         /* If the 5704 is behind the EPB bridge, we can
17187                          * do the less restrictive ONE_DMA workaround for
17188                          * better performance.
17189                          */
17190                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17191                             tg3_asic_rev(tp) == ASIC_REV_5704)
17192                                 tp->dma_rwctrl |= 0x8000;
17193                         else if (ccval == 0x6 || ccval == 0x7)
17194                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17195
17196                         if (tg3_asic_rev(tp) == ASIC_REV_5703)
17197                                 read_water = 4;
17198                         /* Set bit 23 to enable PCIX hw bug fix */
17199                         tp->dma_rwctrl |=
17200                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17201                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17202                                 (1 << 23);
17203                 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17204                         /* 5780 always in PCIX mode */
17205                         tp->dma_rwctrl |= 0x00144000;
17206                 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17207                         /* 5714 always in PCIX mode */
17208                         tp->dma_rwctrl |= 0x00148000;
17209                 } else {
17210                         tp->dma_rwctrl |= 0x001b000f;
17211                 }
17212         }
17213         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17214                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17215
17216         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17217             tg3_asic_rev(tp) == ASIC_REV_5704)
17218                 tp->dma_rwctrl &= 0xfffffff0;
17219
17220         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17221             tg3_asic_rev(tp) == ASIC_REV_5701) {
17222                 /* Remove this if it causes problems for some boards. */
17223                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17224
17225                 /* On 5700/5701 chips, we need to set this bit.
17226                  * Otherwise the chip will issue cacheline transactions
17227                  * to streamable DMA memory with not all the byte
17228                  * enables turned on.  This is an error on several
17229                  * RISC PCI controllers, in particular sparc64.
17230                  *
17231                  * On 5703/5704 chips, this bit has been reassigned
17232                  * a different meaning.  In particular, it is used
17233                  * on those chips to enable a PCI-X workaround.
17234                  */
17235                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17236         }
17237
17238         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17239
17240
17241         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17242             tg3_asic_rev(tp) != ASIC_REV_5701)
17243                 goto out;
17244
17245         /* It is best to perform DMA test with maximum write burst size
17246          * to expose the 5700/5701 write DMA bug.
17247          */
17248         saved_dma_rwctrl = tp->dma_rwctrl;
17249         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17250         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17251
17252         while (1) {
17253                 u32 *p = buf, i;
17254
17255                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17256                         p[i] = i;
17257
17258                 /* Send the buffer to the chip. */
17259                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17260                 if (ret) {
17261                         dev_err(&tp->pdev->dev,
17262                                 "%s: Buffer write failed. err = %d\n",
17263                                 __func__, ret);
17264                         break;
17265                 }
17266
17267                 /* Now read it back. */
17268                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17269                 if (ret) {
17270                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17271                                 "err = %d\n", __func__, ret);
17272                         break;
17273                 }
17274
17275                 /* Verify it. */
17276                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17277                         if (p[i] == i)
17278                                 continue;
17279
17280                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17281                             DMA_RWCTRL_WRITE_BNDRY_16) {
17282                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17283                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17284                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17285                                 break;
17286                         } else {
17287                                 dev_err(&tp->pdev->dev,
17288                                         "%s: Buffer corrupted on read back! "
17289                                         "(%d != %d)\n", __func__, p[i], i);
17290                                 ret = -ENODEV;
17291                                 goto out;
17292                         }
17293                 }
17294
17295                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17296                         /* Success. */
17297                         ret = 0;
17298                         break;
17299                 }
17300         }
17301         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17302             DMA_RWCTRL_WRITE_BNDRY_16) {
17303                 /* DMA test passed without adjusting DMA boundary,
17304                  * now look for chipsets that are known to expose the
17305                  * DMA bug without failing the test.
17306                  */
17307                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17308                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17309                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17310                 } else {
17311                         /* Safe to use the calculated DMA boundary. */
17312                         tp->dma_rwctrl = saved_dma_rwctrl;
17313                 }
17314
17315                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17316         }
17317
17318 out:
17319         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17320 out_nofree:
17321         return ret;
17322 }
17323
17324 static void tg3_init_bufmgr_config(struct tg3 *tp)
17325 {
17326         if (tg3_flag(tp, 57765_PLUS)) {
17327                 tp->bufmgr_config.mbuf_read_dma_low_water =
17328                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17329                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17330                         DEFAULT_MB_MACRX_LOW_WATER_57765;
17331                 tp->bufmgr_config.mbuf_high_water =
17332                         DEFAULT_MB_HIGH_WATER_57765;
17333
17334                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17335                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17336                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17337                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17338                 tp->bufmgr_config.mbuf_high_water_jumbo =
17339                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17340         } else if (tg3_flag(tp, 5705_PLUS)) {
17341                 tp->bufmgr_config.mbuf_read_dma_low_water =
17342                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17343                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17344                         DEFAULT_MB_MACRX_LOW_WATER_5705;
17345                 tp->bufmgr_config.mbuf_high_water =
17346                         DEFAULT_MB_HIGH_WATER_5705;
17347                 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17348                         tp->bufmgr_config.mbuf_mac_rx_low_water =
17349                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
17350                         tp->bufmgr_config.mbuf_high_water =
17351                                 DEFAULT_MB_HIGH_WATER_5906;
17352                 }
17353
17354                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17355                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17356                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17357                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17358                 tp->bufmgr_config.mbuf_high_water_jumbo =
17359                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17360         } else {
17361                 tp->bufmgr_config.mbuf_read_dma_low_water =
17362                         DEFAULT_MB_RDMA_LOW_WATER;
17363                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17364                         DEFAULT_MB_MACRX_LOW_WATER;
17365                 tp->bufmgr_config.mbuf_high_water =
17366                         DEFAULT_MB_HIGH_WATER;
17367
17368                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17369                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17370                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17371                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17372                 tp->bufmgr_config.mbuf_high_water_jumbo =
17373                         DEFAULT_MB_HIGH_WATER_JUMBO;
17374         }
17375
17376         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17377         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17378 }
17379
17380 static char *tg3_phy_string(struct tg3 *tp)
17381 {
17382         switch (tp->phy_id & TG3_PHY_ID_MASK) {
17383         case TG3_PHY_ID_BCM5400:        return "5400";
17384         case TG3_PHY_ID_BCM5401:        return "5401";
17385         case TG3_PHY_ID_BCM5411:        return "5411";
17386         case TG3_PHY_ID_BCM5701:        return "5701";
17387         case TG3_PHY_ID_BCM5703:        return "5703";
17388         case TG3_PHY_ID_BCM5704:        return "5704";
17389         case TG3_PHY_ID_BCM5705:        return "5705";
17390         case TG3_PHY_ID_BCM5750:        return "5750";
17391         case TG3_PHY_ID_BCM5752:        return "5752";
17392         case TG3_PHY_ID_BCM5714:        return "5714";
17393         case TG3_PHY_ID_BCM5780:        return "5780";
17394         case TG3_PHY_ID_BCM5755:        return "5755";
17395         case TG3_PHY_ID_BCM5787:        return "5787";
17396         case TG3_PHY_ID_BCM5784:        return "5784";
17397         case TG3_PHY_ID_BCM5756:        return "5722/5756";
17398         case TG3_PHY_ID_BCM5906:        return "5906";
17399         case TG3_PHY_ID_BCM5761:        return "5761";
17400         case TG3_PHY_ID_BCM5718C:       return "5718C";
17401         case TG3_PHY_ID_BCM5718S:       return "5718S";
17402         case TG3_PHY_ID_BCM57765:       return "57765";
17403         case TG3_PHY_ID_BCM5719C:       return "5719C";
17404         case TG3_PHY_ID_BCM5720C:       return "5720C";
17405         case TG3_PHY_ID_BCM5762:        return "5762C";
17406         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
17407         case 0:                 return "serdes";
17408         default:                return "unknown";
17409         }
17410 }
17411
17412 static char *tg3_bus_string(struct tg3 *tp, char *str)
17413 {
17414         if (tg3_flag(tp, PCI_EXPRESS)) {
17415                 strcpy(str, "PCI Express");
17416                 return str;
17417         } else if (tg3_flag(tp, PCIX_MODE)) {
17418                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17419
17420                 strcpy(str, "PCIX:");
17421
17422                 if ((clock_ctrl == 7) ||
17423                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17424                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17425                         strcat(str, "133MHz");
17426                 else if (clock_ctrl == 0)
17427                         strcat(str, "33MHz");
17428                 else if (clock_ctrl == 2)
17429                         strcat(str, "50MHz");
17430                 else if (clock_ctrl == 4)
17431                         strcat(str, "66MHz");
17432                 else if (clock_ctrl == 6)
17433                         strcat(str, "100MHz");
17434         } else {
17435                 strcpy(str, "PCI:");
17436                 if (tg3_flag(tp, PCI_HIGH_SPEED))
17437                         strcat(str, "66MHz");
17438                 else
17439                         strcat(str, "33MHz");
17440         }
17441         if (tg3_flag(tp, PCI_32BIT))
17442                 strcat(str, ":32-bit");
17443         else
17444                 strcat(str, ":64-bit");
17445         return str;
17446 }
17447
17448 static void tg3_init_coal(struct tg3 *tp)
17449 {
17450         struct ethtool_coalesce *ec = &tp->coal;
17451
17452         memset(ec, 0, sizeof(*ec));
17453         ec->cmd = ETHTOOL_GCOALESCE;
17454         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17455         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17456         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17457         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17458         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17459         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17460         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17461         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17462         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17463
17464         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17465                                  HOSTCC_MODE_CLRTICK_TXBD)) {
17466                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17467                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17468                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17469                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17470         }
17471
17472         if (tg3_flag(tp, 5705_PLUS)) {
17473                 ec->rx_coalesce_usecs_irq = 0;
17474                 ec->tx_coalesce_usecs_irq = 0;
17475                 ec->stats_block_coalesce_usecs = 0;
17476         }
17477 }
17478
17479 static int tg3_init_one(struct pci_dev *pdev,
17480                                   const struct pci_device_id *ent)
17481 {
17482         struct net_device *dev;
17483         struct tg3 *tp;
17484         int i, err;
17485         u32 sndmbx, rcvmbx, intmbx;
17486         char str[40];
17487         u64 dma_mask, persist_dma_mask;
17488         netdev_features_t features = 0;
17489
17490         printk_once(KERN_INFO "%s\n", version);
17491
17492         err = pci_enable_device(pdev);
17493         if (err) {
17494                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17495                 return err;
17496         }
17497
17498         err = pci_request_regions(pdev, DRV_MODULE_NAME);
17499         if (err) {
17500                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17501                 goto err_out_disable_pdev;
17502         }
17503
17504         pci_set_master(pdev);
17505
17506         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17507         if (!dev) {
17508                 err = -ENOMEM;
17509                 goto err_out_free_res;
17510         }
17511
17512         SET_NETDEV_DEV(dev, &pdev->dev);
17513
17514         tp = netdev_priv(dev);
17515         tp->pdev = pdev;
17516         tp->dev = dev;
17517         tp->rx_mode = TG3_DEF_RX_MODE;
17518         tp->tx_mode = TG3_DEF_TX_MODE;
17519         tp->irq_sync = 1;
17520
17521         if (tg3_debug > 0)
17522                 tp->msg_enable = tg3_debug;
17523         else
17524                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17525
17526         if (pdev_is_ssb_gige_core(pdev)) {
17527                 tg3_flag_set(tp, IS_SSB_CORE);
17528                 if (ssb_gige_must_flush_posted_writes(pdev))
17529                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17530                 if (ssb_gige_one_dma_at_once(pdev))
17531                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17532                 if (ssb_gige_have_roboswitch(pdev)) {
17533                         tg3_flag_set(tp, USE_PHYLIB);
17534                         tg3_flag_set(tp, ROBOSWITCH);
17535                 }
17536                 if (ssb_gige_is_rgmii(pdev))
17537                         tg3_flag_set(tp, RGMII_MODE);
17538         }
17539
17540         /* The word/byte swap controls here control register access byte
17541          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17542          * setting below.
17543          */
17544         tp->misc_host_ctrl =
17545                 MISC_HOST_CTRL_MASK_PCI_INT |
17546                 MISC_HOST_CTRL_WORD_SWAP |
17547                 MISC_HOST_CTRL_INDIR_ACCESS |
17548                 MISC_HOST_CTRL_PCISTATE_RW;
17549
17550         /* The NONFRM (non-frame) byte/word swap controls take effect
17551          * on descriptor entries, anything which isn't packet data.
17552          *
17553          * The StrongARM chips on the board (one for tx, one for rx)
17554          * are running in big-endian mode.
17555          */
17556         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17557                         GRC_MODE_WSWAP_NONFRM_DATA);
17558 #ifdef __BIG_ENDIAN
17559         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17560 #endif
17561         spin_lock_init(&tp->lock);
17562         spin_lock_init(&tp->indirect_lock);
17563         INIT_WORK(&tp->reset_task, tg3_reset_task);
17564
17565         tp->regs = pci_ioremap_bar(pdev, BAR_0);
17566         if (!tp->regs) {
17567                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17568                 err = -ENOMEM;
17569                 goto err_out_free_dev;
17570         }
17571
17572         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17573             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17574             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17575             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17576             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17577             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17578             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17579             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17580             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17581             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17582             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17583             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17584             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17585             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17586             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17587                 tg3_flag_set(tp, ENABLE_APE);
17588                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17589                 if (!tp->aperegs) {
17590                         dev_err(&pdev->dev,
17591                                 "Cannot map APE registers, aborting\n");
17592                         err = -ENOMEM;
17593                         goto err_out_iounmap;
17594                 }
17595         }
17596
17597         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17598         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17599
17600         dev->ethtool_ops = &tg3_ethtool_ops;
17601         dev->watchdog_timeo = TG3_TX_TIMEOUT;
17602         dev->netdev_ops = &tg3_netdev_ops;
17603         dev->irq = pdev->irq;
17604
17605         err = tg3_get_invariants(tp, ent);
17606         if (err) {
17607                 dev_err(&pdev->dev,
17608                         "Problem fetching invariants of chip, aborting\n");
17609                 goto err_out_apeunmap;
17610         }
17611
17612         /* The EPB bridge inside 5714, 5715, and 5780 and any
17613          * device behind the EPB cannot support DMA addresses > 40-bit.
17614          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17615          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17616          * do DMA address check in tg3_start_xmit().
17617          */
17618         if (tg3_flag(tp, IS_5788))
17619                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17620         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17621                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17622 #ifdef CONFIG_HIGHMEM
17623                 dma_mask = DMA_BIT_MASK(64);
17624 #endif
17625         } else
17626                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17627
17628         /* Configure DMA attributes. */
17629         if (dma_mask > DMA_BIT_MASK(32)) {
17630                 err = pci_set_dma_mask(pdev, dma_mask);
17631                 if (!err) {
17632                         features |= NETIF_F_HIGHDMA;
17633                         err = pci_set_consistent_dma_mask(pdev,
17634                                                           persist_dma_mask);
17635                         if (err < 0) {
17636                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17637                                         "DMA for consistent allocations\n");
17638                                 goto err_out_apeunmap;
17639                         }
17640                 }
17641         }
17642         if (err || dma_mask == DMA_BIT_MASK(32)) {
17643                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17644                 if (err) {
17645                         dev_err(&pdev->dev,
17646                                 "No usable DMA configuration, aborting\n");
17647                         goto err_out_apeunmap;
17648                 }
17649         }
17650
17651         tg3_init_bufmgr_config(tp);
17652
17653         features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
17654
17655         /* 5700 B0 chips do not support checksumming correctly due
17656          * to hardware bugs.
17657          */
17658         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17659                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17660
17661                 if (tg3_flag(tp, 5755_PLUS))
17662                         features |= NETIF_F_IPV6_CSUM;
17663         }
17664
17665         /* TSO is on by default on chips that support hardware TSO.
17666          * Firmware TSO on older chips gives lower performance, so it
17667          * is off by default, but can be enabled using ethtool.
17668          */
17669         if ((tg3_flag(tp, HW_TSO_1) ||
17670              tg3_flag(tp, HW_TSO_2) ||
17671              tg3_flag(tp, HW_TSO_3)) &&
17672             (features & NETIF_F_IP_CSUM))
17673                 features |= NETIF_F_TSO;
17674         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17675                 if (features & NETIF_F_IPV6_CSUM)
17676                         features |= NETIF_F_TSO6;
17677                 if (tg3_flag(tp, HW_TSO_3) ||
17678                     tg3_asic_rev(tp) == ASIC_REV_5761 ||
17679                     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17680                      tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17681                     tg3_asic_rev(tp) == ASIC_REV_5785 ||
17682                     tg3_asic_rev(tp) == ASIC_REV_57780)
17683                         features |= NETIF_F_TSO_ECN;
17684         }
17685
17686         dev->features |= features;
17687         dev->vlan_features |= features;
17688
17689         /*
17690          * Add loopback capability only for a subset of devices that support
17691          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17692          * loopback for the remaining devices.
17693          */
17694         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17695             !tg3_flag(tp, CPMU_PRESENT))
17696                 /* Add the loopback capability */
17697                 features |= NETIF_F_LOOPBACK;
17698
17699         dev->hw_features |= features;
17700         dev->priv_flags |= IFF_UNICAST_FLT;
17701
17702         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17703             !tg3_flag(tp, TSO_CAPABLE) &&
17704             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17705                 tg3_flag_set(tp, MAX_RXPEND_64);
17706                 tp->rx_pending = 63;
17707         }
17708
17709         err = tg3_get_device_address(tp);
17710         if (err) {
17711                 dev_err(&pdev->dev,
17712                         "Could not obtain valid ethernet address, aborting\n");
17713                 goto err_out_apeunmap;
17714         }
17715
17716         /*
17717          * Reset chip in case UNDI or EFI driver did not shutdown
17718          * DMA self test will enable WDMAC and we'll see (spurious)
17719          * pending DMA on the PCI bus at that point.
17720          */
17721         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17722             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17723                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17724                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17725         }
17726
17727         err = tg3_test_dma(tp);
17728         if (err) {
17729                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17730                 goto err_out_apeunmap;
17731         }
17732
17733         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17734         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17735         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17736         for (i = 0; i < tp->irq_max; i++) {
17737                 struct tg3_napi *tnapi = &tp->napi[i];
17738
17739                 tnapi->tp = tp;
17740                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17741
17742                 tnapi->int_mbox = intmbx;
17743                 if (i <= 4)
17744                         intmbx += 0x8;
17745                 else
17746                         intmbx += 0x4;
17747
17748                 tnapi->consmbox = rcvmbx;
17749                 tnapi->prodmbox = sndmbx;
17750
17751                 if (i)
17752                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17753                 else
17754                         tnapi->coal_now = HOSTCC_MODE_NOW;
17755
17756                 if (!tg3_flag(tp, SUPPORT_MSIX))
17757                         break;
17758
17759                 /*
17760                  * If we support MSIX, we'll be using RSS.  If we're using
17761                  * RSS, the first vector only handles link interrupts and the
17762                  * remaining vectors handle rx and tx interrupts.  Reuse the
17763                  * mailbox values for the next iteration.  The values we setup
17764                  * above are still useful for the single vectored mode.
17765                  */
17766                 if (!i)
17767                         continue;
17768
17769                 rcvmbx += 0x8;
17770
17771                 if (sndmbx & 0x4)
17772                         sndmbx -= 0x4;
17773                 else
17774                         sndmbx += 0xc;
17775         }
17776
17777         tg3_init_coal(tp);
17778
17779         pci_set_drvdata(pdev, dev);
17780
17781         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17782             tg3_asic_rev(tp) == ASIC_REV_5720 ||
17783             tg3_asic_rev(tp) == ASIC_REV_5762)
17784                 tg3_flag_set(tp, PTP_CAPABLE);
17785
17786         tg3_timer_init(tp);
17787
17788         tg3_carrier_off(tp);
17789
17790         err = register_netdev(dev);
17791         if (err) {
17792                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17793                 goto err_out_apeunmap;
17794         }
17795
17796         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17797                     tp->board_part_number,
17798                     tg3_chip_rev_id(tp),
17799                     tg3_bus_string(tp, str),
17800                     dev->dev_addr);
17801
17802         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17803                 struct phy_device *phydev;
17804                 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
17805                 netdev_info(dev,
17806                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17807                             phydev->drv->name, dev_name(&phydev->dev));
17808         } else {
17809                 char *ethtype;
17810
17811                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17812                         ethtype = "10/100Base-TX";
17813                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17814                         ethtype = "1000Base-SX";
17815                 else
17816                         ethtype = "10/100/1000Base-T";
17817
17818                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17819                             "(WireSpeed[%d], EEE[%d])\n",
17820                             tg3_phy_string(tp), ethtype,
17821                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17822                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17823         }
17824
17825         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17826                     (dev->features & NETIF_F_RXCSUM) != 0,
17827                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
17828                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17829                     tg3_flag(tp, ENABLE_ASF) != 0,
17830                     tg3_flag(tp, TSO_CAPABLE) != 0);
17831         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17832                     tp->dma_rwctrl,
17833                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17834                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17835
17836         pci_save_state(pdev);
17837
17838         return 0;
17839
17840 err_out_apeunmap:
17841         if (tp->aperegs) {
17842                 iounmap(tp->aperegs);
17843                 tp->aperegs = NULL;
17844         }
17845
17846 err_out_iounmap:
17847         if (tp->regs) {
17848                 iounmap(tp->regs);
17849                 tp->regs = NULL;
17850         }
17851
17852 err_out_free_dev:
17853         free_netdev(dev);
17854
17855 err_out_free_res:
17856         pci_release_regions(pdev);
17857
17858 err_out_disable_pdev:
17859         if (pci_is_enabled(pdev))
17860                 pci_disable_device(pdev);
17861         return err;
17862 }
17863
17864 static void tg3_remove_one(struct pci_dev *pdev)
17865 {
17866         struct net_device *dev = pci_get_drvdata(pdev);
17867
17868         if (dev) {
17869                 struct tg3 *tp = netdev_priv(dev);
17870
17871                 release_firmware(tp->fw);
17872
17873                 tg3_reset_task_cancel(tp);
17874
17875                 if (tg3_flag(tp, USE_PHYLIB)) {
17876                         tg3_phy_fini(tp);
17877                         tg3_mdio_fini(tp);
17878                 }
17879
17880                 unregister_netdev(dev);
17881                 if (tp->aperegs) {
17882                         iounmap(tp->aperegs);
17883                         tp->aperegs = NULL;
17884                 }
17885                 if (tp->regs) {
17886                         iounmap(tp->regs);
17887                         tp->regs = NULL;
17888                 }
17889                 free_netdev(dev);
17890                 pci_release_regions(pdev);
17891                 pci_disable_device(pdev);
17892         }
17893 }
17894
17895 #ifdef CONFIG_PM_SLEEP
17896 static int tg3_suspend(struct device *device)
17897 {
17898         struct pci_dev *pdev = to_pci_dev(device);
17899         struct net_device *dev = pci_get_drvdata(pdev);
17900         struct tg3 *tp = netdev_priv(dev);
17901         int err = 0;
17902
17903         rtnl_lock();
17904
17905         if (!netif_running(dev))
17906                 goto unlock;
17907
17908         tg3_reset_task_cancel(tp);
17909         tg3_phy_stop(tp);
17910         tg3_netif_stop(tp);
17911
17912         tg3_timer_stop(tp);
17913
17914         tg3_full_lock(tp, 1);
17915         tg3_disable_ints(tp);
17916         tg3_full_unlock(tp);
17917
17918         netif_device_detach(dev);
17919
17920         tg3_full_lock(tp, 0);
17921         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17922         tg3_flag_clear(tp, INIT_COMPLETE);
17923         tg3_full_unlock(tp);
17924
17925         err = tg3_power_down_prepare(tp);
17926         if (err) {
17927                 int err2;
17928
17929                 tg3_full_lock(tp, 0);
17930
17931                 tg3_flag_set(tp, INIT_COMPLETE);
17932                 err2 = tg3_restart_hw(tp, true);
17933                 if (err2)
17934                         goto out;
17935
17936                 tg3_timer_start(tp);
17937
17938                 netif_device_attach(dev);
17939                 tg3_netif_start(tp);
17940
17941 out:
17942                 tg3_full_unlock(tp);
17943
17944                 if (!err2)
17945                         tg3_phy_start(tp);
17946         }
17947
17948 unlock:
17949         rtnl_unlock();
17950         return err;
17951 }
17952
17953 static int tg3_resume(struct device *device)
17954 {
17955         struct pci_dev *pdev = to_pci_dev(device);
17956         struct net_device *dev = pci_get_drvdata(pdev);
17957         struct tg3 *tp = netdev_priv(dev);
17958         int err = 0;
17959
17960         rtnl_lock();
17961
17962         if (!netif_running(dev))
17963                 goto unlock;
17964
17965         netif_device_attach(dev);
17966
17967         tg3_full_lock(tp, 0);
17968
17969         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
17970
17971         tg3_flag_set(tp, INIT_COMPLETE);
17972         err = tg3_restart_hw(tp,
17973                              !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
17974         if (err)
17975                 goto out;
17976
17977         tg3_timer_start(tp);
17978
17979         tg3_netif_start(tp);
17980
17981 out:
17982         tg3_full_unlock(tp);
17983
17984         if (!err)
17985                 tg3_phy_start(tp);
17986
17987 unlock:
17988         rtnl_unlock();
17989         return err;
17990 }
17991 #endif /* CONFIG_PM_SLEEP */
17992
17993 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17994
17995 static void tg3_shutdown(struct pci_dev *pdev)
17996 {
17997         struct net_device *dev = pci_get_drvdata(pdev);
17998         struct tg3 *tp = netdev_priv(dev);
17999
18000         rtnl_lock();
18001         netif_device_detach(dev);
18002
18003         if (netif_running(dev))
18004                 dev_close(dev);
18005
18006         if (system_state == SYSTEM_POWER_OFF)
18007                 tg3_power_down(tp);
18008
18009         rtnl_unlock();
18010 }
18011
18012 /**
18013  * tg3_io_error_detected - called when PCI error is detected
18014  * @pdev: Pointer to PCI device
18015  * @state: The current pci connection state
18016  *
18017  * This function is called after a PCI bus error affecting
18018  * this device has been detected.
18019  */
18020 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18021                                               pci_channel_state_t state)
18022 {
18023         struct net_device *netdev = pci_get_drvdata(pdev);
18024         struct tg3 *tp = netdev_priv(netdev);
18025         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18026
18027         netdev_info(netdev, "PCI I/O error detected\n");
18028
18029         rtnl_lock();
18030
18031         /* We probably don't have netdev yet */
18032         if (!netdev || !netif_running(netdev))
18033                 goto done;
18034
18035         tg3_phy_stop(tp);
18036
18037         tg3_netif_stop(tp);
18038
18039         tg3_timer_stop(tp);
18040
18041         /* Want to make sure that the reset task doesn't run */
18042         tg3_reset_task_cancel(tp);
18043
18044         netif_device_detach(netdev);
18045
18046         /* Clean up software state, even if MMIO is blocked */
18047         tg3_full_lock(tp, 0);
18048         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18049         tg3_full_unlock(tp);
18050
18051 done:
18052         if (state == pci_channel_io_perm_failure) {
18053                 if (netdev) {
18054                         tg3_napi_enable(tp);
18055                         dev_close(netdev);
18056                 }
18057                 err = PCI_ERS_RESULT_DISCONNECT;
18058         } else {
18059                 pci_disable_device(pdev);
18060         }
18061
18062         rtnl_unlock();
18063
18064         return err;
18065 }
18066
18067 /**
18068  * tg3_io_slot_reset - called after the pci bus has been reset.
18069  * @pdev: Pointer to PCI device
18070  *
18071  * Restart the card from scratch, as if from a cold-boot.
18072  * At this point, the card has exprienced a hard reset,
18073  * followed by fixups by BIOS, and has its config space
18074  * set up identically to what it was at cold boot.
18075  */
18076 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18077 {
18078         struct net_device *netdev = pci_get_drvdata(pdev);
18079         struct tg3 *tp = netdev_priv(netdev);
18080         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18081         int err;
18082
18083         rtnl_lock();
18084
18085         if (pci_enable_device(pdev)) {
18086                 dev_err(&pdev->dev,
18087                         "Cannot re-enable PCI device after reset.\n");
18088                 goto done;
18089         }
18090
18091         pci_set_master(pdev);
18092         pci_restore_state(pdev);
18093         pci_save_state(pdev);
18094
18095         if (!netdev || !netif_running(netdev)) {
18096                 rc = PCI_ERS_RESULT_RECOVERED;
18097                 goto done;
18098         }
18099
18100         err = tg3_power_up(tp);
18101         if (err)
18102                 goto done;
18103
18104         rc = PCI_ERS_RESULT_RECOVERED;
18105
18106 done:
18107         if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18108                 tg3_napi_enable(tp);
18109                 dev_close(netdev);
18110         }
18111         rtnl_unlock();
18112
18113         return rc;
18114 }
18115
18116 /**
18117  * tg3_io_resume - called when traffic can start flowing again.
18118  * @pdev: Pointer to PCI device
18119  *
18120  * This callback is called when the error recovery driver tells
18121  * us that its OK to resume normal operation.
18122  */
18123 static void tg3_io_resume(struct pci_dev *pdev)
18124 {
18125         struct net_device *netdev = pci_get_drvdata(pdev);
18126         struct tg3 *tp = netdev_priv(netdev);
18127         int err;
18128
18129         rtnl_lock();
18130
18131         if (!netif_running(netdev))
18132                 goto done;
18133
18134         tg3_full_lock(tp, 0);
18135         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18136         tg3_flag_set(tp, INIT_COMPLETE);
18137         err = tg3_restart_hw(tp, true);
18138         if (err) {
18139                 tg3_full_unlock(tp);
18140                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18141                 goto done;
18142         }
18143
18144         netif_device_attach(netdev);
18145
18146         tg3_timer_start(tp);
18147
18148         tg3_netif_start(tp);
18149
18150         tg3_full_unlock(tp);
18151
18152         tg3_phy_start(tp);
18153
18154 done:
18155         rtnl_unlock();
18156 }
18157
18158 static const struct pci_error_handlers tg3_err_handler = {
18159         .error_detected = tg3_io_error_detected,
18160         .slot_reset     = tg3_io_slot_reset,
18161         .resume         = tg3_io_resume
18162 };
18163
18164 static struct pci_driver tg3_driver = {
18165         .name           = DRV_MODULE_NAME,
18166         .id_table       = tg3_pci_tbl,
18167         .probe          = tg3_init_one,
18168         .remove         = tg3_remove_one,
18169         .err_handler    = &tg3_err_handler,
18170         .driver.pm      = &tg3_pm_ops,
18171         .shutdown       = tg3_shutdown,
18172 };
18173
18174 module_pci_driver(tg3_driver);