]> Pileus Git - ~andy/linux/blob - drivers/net/ethernet/broadcom/tg3.c
tg3: Add SGMII phy support for 5719/5718 serdes
[~andy/linux] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2013 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50
51 #include <net/checksum.h>
52 #include <net/ip.h>
53
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65
66 #define BAR_0   0
67 #define BAR_2   2
68
69 #include "tg3.h"
70
71 /* Functions & macros to verify TG3_FLAGS types */
72
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         return test_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         set_bit(flag, bits);
81 }
82
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85         clear_bit(flag, bits);
86 }
87
88 #define tg3_flag(tp, flag)                              \
89         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag)                          \
91         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag)                        \
93         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94
95 #define DRV_MODULE_NAME         "tg3"
96 #define TG3_MAJ_NUM                     3
97 #define TG3_MIN_NUM                     130
98 #define DRV_MODULE_VERSION      \
99         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE      "February 14, 2013"
101
102 #define RESET_KIND_SHUTDOWN     0
103 #define RESET_KIND_INIT         1
104 #define RESET_KIND_SUSPEND      2
105
106 #define TG3_DEF_RX_MODE         0
107 #define TG3_DEF_TX_MODE         0
108 #define TG3_DEF_MSG_ENABLE        \
109         (NETIF_MSG_DRV          | \
110          NETIF_MSG_PROBE        | \
111          NETIF_MSG_LINK         | \
112          NETIF_MSG_TIMER        | \
113          NETIF_MSG_IFDOWN       | \
114          NETIF_MSG_IFUP         | \
115          NETIF_MSG_RX_ERR       | \
116          NETIF_MSG_TX_ERR)
117
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
119
120 /* length of time before we decide the hardware is borked,
121  * and dev->tx_timeout() should be called to fix the problem
122  */
123
124 #define TG3_TX_TIMEOUT                  (5 * HZ)
125
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU                     60
128 #define TG3_MAX_MTU(tp) \
129         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132  * You can't change the ring sizes, but you can change where you place
133  * them in the NIC onboard memory.
134  */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING         200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
143
144 /* Do not place this n-ring entries value into the tp struct itself,
145  * we really want to expose these constants to GCC so that modulo et
146  * al.  operations are done with shifts and masks instead of with
147  * hw multiply/modulo instructions.  Another solution would be to
148  * replace things like '% foo' with '& (foo - 1)'.
149  */
150
151 #define TG3_TX_RING_SIZE                512
152 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
153
154 #define TG3_RX_STD_RING_BYTES(tp) \
155         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
161                                  TG3_TX_RING_SIZE)
162 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
163
164 #define TG3_DMA_BYTE_ENAB               64
165
166 #define TG3_RX_STD_DMA_SZ               1536
167 #define TG3_RX_JMB_DMA_SZ               9046
168
169 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
170
171 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181  * that are at least dword aligned when used in PCIX mode.  The driver
182  * works around this bug by double copying the packet.  This workaround
183  * is built into the normal double copy length check for efficiency.
184  *
185  * However, the double copy is only necessary on those architectures
186  * where unaligned memory accesses are inefficient.  For those architectures
187  * where unaligned memory accesses incur little penalty, we can reintegrate
188  * the 5701 in the normal rx path.  Doing so saves a device structure
189  * dereference by hardcoding the double copy threshold in place.
190  */
191 #define TG3_RX_COPY_THRESHOLD           256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
194 #else
195         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
196 #endif
197
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
202 #endif
203
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K            2048
207 #define TG3_TX_BD_DMA_MAX_4K            4096
208
209 #define TG3_RAW_IP_ALIGN 2
210
211 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
212 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213
214 #define FIRMWARE_TG3            "tigon/tg3.bin"
215 #define FIRMWARE_TG357766       "tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
218
219 static char version[] =
220         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
221
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
229
230 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
233
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
236
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258                         TG3_DRV_DATA_FLAG_5705_10_100},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261                         TG3_DRV_DATA_FLAG_5705_10_100},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265                         TG3_DRV_DATA_FLAG_5705_10_100},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287                         PCI_VENDOR_ID_LENOVO,
288                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
341         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
342         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
343         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
344         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
345         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
346         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
347         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
348         {}
349 };
350
351 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
352
353 static const struct {
354         const char string[ETH_GSTRING_LEN];
355 } ethtool_stats_keys[] = {
356         { "rx_octets" },
357         { "rx_fragments" },
358         { "rx_ucast_packets" },
359         { "rx_mcast_packets" },
360         { "rx_bcast_packets" },
361         { "rx_fcs_errors" },
362         { "rx_align_errors" },
363         { "rx_xon_pause_rcvd" },
364         { "rx_xoff_pause_rcvd" },
365         { "rx_mac_ctrl_rcvd" },
366         { "rx_xoff_entered" },
367         { "rx_frame_too_long_errors" },
368         { "rx_jabbers" },
369         { "rx_undersize_packets" },
370         { "rx_in_length_errors" },
371         { "rx_out_length_errors" },
372         { "rx_64_or_less_octet_packets" },
373         { "rx_65_to_127_octet_packets" },
374         { "rx_128_to_255_octet_packets" },
375         { "rx_256_to_511_octet_packets" },
376         { "rx_512_to_1023_octet_packets" },
377         { "rx_1024_to_1522_octet_packets" },
378         { "rx_1523_to_2047_octet_packets" },
379         { "rx_2048_to_4095_octet_packets" },
380         { "rx_4096_to_8191_octet_packets" },
381         { "rx_8192_to_9022_octet_packets" },
382
383         { "tx_octets" },
384         { "tx_collisions" },
385
386         { "tx_xon_sent" },
387         { "tx_xoff_sent" },
388         { "tx_flow_control" },
389         { "tx_mac_errors" },
390         { "tx_single_collisions" },
391         { "tx_mult_collisions" },
392         { "tx_deferred" },
393         { "tx_excessive_collisions" },
394         { "tx_late_collisions" },
395         { "tx_collide_2times" },
396         { "tx_collide_3times" },
397         { "tx_collide_4times" },
398         { "tx_collide_5times" },
399         { "tx_collide_6times" },
400         { "tx_collide_7times" },
401         { "tx_collide_8times" },
402         { "tx_collide_9times" },
403         { "tx_collide_10times" },
404         { "tx_collide_11times" },
405         { "tx_collide_12times" },
406         { "tx_collide_13times" },
407         { "tx_collide_14times" },
408         { "tx_collide_15times" },
409         { "tx_ucast_packets" },
410         { "tx_mcast_packets" },
411         { "tx_bcast_packets" },
412         { "tx_carrier_sense_errors" },
413         { "tx_discards" },
414         { "tx_errors" },
415
416         { "dma_writeq_full" },
417         { "dma_write_prioq_full" },
418         { "rxbds_empty" },
419         { "rx_discards" },
420         { "rx_errors" },
421         { "rx_threshold_hit" },
422
423         { "dma_readq_full" },
424         { "dma_read_prioq_full" },
425         { "tx_comp_queue_full" },
426
427         { "ring_set_send_prod_index" },
428         { "ring_status_update" },
429         { "nic_irqs" },
430         { "nic_avoided_irqs" },
431         { "nic_tx_threshold_hit" },
432
433         { "mbuf_lwm_thresh_hit" },
434 };
435
436 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST          0
438 #define TG3_LINK_TEST           1
439 #define TG3_REGISTER_TEST       2
440 #define TG3_MEMORY_TEST         3
441 #define TG3_MAC_LOOPB_TEST      4
442 #define TG3_PHY_LOOPB_TEST      5
443 #define TG3_EXT_LOOPB_TEST      6
444 #define TG3_INTERRUPT_TEST      7
445
446
447 static const struct {
448         const char string[ETH_GSTRING_LEN];
449 } ethtool_test_keys[] = {
450         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
451         [TG3_LINK_TEST]         = { "link test         (online) " },
452         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
453         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
454         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
455         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
456         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
457         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
458 };
459
460 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
461
462
463 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
464 {
465         writel(val, tp->regs + off);
466 }
467
468 static u32 tg3_read32(struct tg3 *tp, u32 off)
469 {
470         return readl(tp->regs + off);
471 }
472
473 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
474 {
475         writel(val, tp->aperegs + off);
476 }
477
478 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
479 {
480         return readl(tp->aperegs + off);
481 }
482
483 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
484 {
485         unsigned long flags;
486
487         spin_lock_irqsave(&tp->indirect_lock, flags);
488         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
489         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
490         spin_unlock_irqrestore(&tp->indirect_lock, flags);
491 }
492
493 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
494 {
495         writel(val, tp->regs + off);
496         readl(tp->regs + off);
497 }
498
499 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
500 {
501         unsigned long flags;
502         u32 val;
503
504         spin_lock_irqsave(&tp->indirect_lock, flags);
505         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
506         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507         spin_unlock_irqrestore(&tp->indirect_lock, flags);
508         return val;
509 }
510
511 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
512 {
513         unsigned long flags;
514
515         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
516                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
517                                        TG3_64BIT_REG_LOW, val);
518                 return;
519         }
520         if (off == TG3_RX_STD_PROD_IDX_REG) {
521                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
522                                        TG3_64BIT_REG_LOW, val);
523                 return;
524         }
525
526         spin_lock_irqsave(&tp->indirect_lock, flags);
527         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
528         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
529         spin_unlock_irqrestore(&tp->indirect_lock, flags);
530
531         /* In indirect mode when disabling interrupts, we also need
532          * to clear the interrupt bit in the GRC local ctrl register.
533          */
534         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
535             (val == 0x1)) {
536                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
537                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
538         }
539 }
540
541 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
542 {
543         unsigned long flags;
544         u32 val;
545
546         spin_lock_irqsave(&tp->indirect_lock, flags);
547         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
548         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
549         spin_unlock_irqrestore(&tp->indirect_lock, flags);
550         return val;
551 }
552
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554  * where it is unsafe to read back the register without some delay.
555  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
557  */
558 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
559 {
560         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
561                 /* Non-posted methods */
562                 tp->write32(tp, off, val);
563         else {
564                 /* Posted method */
565                 tg3_write32(tp, off, val);
566                 if (usec_wait)
567                         udelay(usec_wait);
568                 tp->read32(tp, off);
569         }
570         /* Wait again after the read for the posted method to guarantee that
571          * the wait time is met.
572          */
573         if (usec_wait)
574                 udelay(usec_wait);
575 }
576
577 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
578 {
579         tp->write32_mbox(tp, off, val);
580         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
581             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
582              !tg3_flag(tp, ICH_WORKAROUND)))
583                 tp->read32_mbox(tp, off);
584 }
585
586 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
587 {
588         void __iomem *mbox = tp->regs + off;
589         writel(val, mbox);
590         if (tg3_flag(tp, TXD_MBOX_HWBUG))
591                 writel(val, mbox);
592         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
593             tg3_flag(tp, FLUSH_POSTED_WRITES))
594                 readl(mbox);
595 }
596
597 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
598 {
599         return readl(tp->regs + off + GRCMBOX_BASE);
600 }
601
602 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
603 {
604         writel(val, tp->regs + off + GRCMBOX_BASE);
605 }
606
607 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
612
613 #define tw32(reg, val)                  tp->write32(tp, reg, val)
614 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg)                       tp->read32(tp, reg)
617
618 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
619 {
620         unsigned long flags;
621
622         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
623             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
624                 return;
625
626         spin_lock_irqsave(&tp->indirect_lock, flags);
627         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
628                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
629                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
630
631                 /* Always leave this as zero. */
632                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
633         } else {
634                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
635                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
636
637                 /* Always leave this as zero. */
638                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
639         }
640         spin_unlock_irqrestore(&tp->indirect_lock, flags);
641 }
642
643 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
644 {
645         unsigned long flags;
646
647         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
648             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
649                 *val = 0;
650                 return;
651         }
652
653         spin_lock_irqsave(&tp->indirect_lock, flags);
654         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
655                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
656                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
657
658                 /* Always leave this as zero. */
659                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
660         } else {
661                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
662                 *val = tr32(TG3PCI_MEM_WIN_DATA);
663
664                 /* Always leave this as zero. */
665                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
666         }
667         spin_unlock_irqrestore(&tp->indirect_lock, flags);
668 }
669
670 static void tg3_ape_lock_init(struct tg3 *tp)
671 {
672         int i;
673         u32 regbase, bit;
674
675         if (tg3_asic_rev(tp) == ASIC_REV_5761)
676                 regbase = TG3_APE_LOCK_GRANT;
677         else
678                 regbase = TG3_APE_PER_LOCK_GRANT;
679
680         /* Make sure the driver hasn't any stale locks. */
681         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
682                 switch (i) {
683                 case TG3_APE_LOCK_PHY0:
684                 case TG3_APE_LOCK_PHY1:
685                 case TG3_APE_LOCK_PHY2:
686                 case TG3_APE_LOCK_PHY3:
687                         bit = APE_LOCK_GRANT_DRIVER;
688                         break;
689                 default:
690                         if (!tp->pci_fn)
691                                 bit = APE_LOCK_GRANT_DRIVER;
692                         else
693                                 bit = 1 << tp->pci_fn;
694                 }
695                 tg3_ape_write32(tp, regbase + 4 * i, bit);
696         }
697
698 }
699
700 static int tg3_ape_lock(struct tg3 *tp, int locknum)
701 {
702         int i, off;
703         int ret = 0;
704         u32 status, req, gnt, bit;
705
706         if (!tg3_flag(tp, ENABLE_APE))
707                 return 0;
708
709         switch (locknum) {
710         case TG3_APE_LOCK_GPIO:
711                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
712                         return 0;
713         case TG3_APE_LOCK_GRC:
714         case TG3_APE_LOCK_MEM:
715                 if (!tp->pci_fn)
716                         bit = APE_LOCK_REQ_DRIVER;
717                 else
718                         bit = 1 << tp->pci_fn;
719                 break;
720         case TG3_APE_LOCK_PHY0:
721         case TG3_APE_LOCK_PHY1:
722         case TG3_APE_LOCK_PHY2:
723         case TG3_APE_LOCK_PHY3:
724                 bit = APE_LOCK_REQ_DRIVER;
725                 break;
726         default:
727                 return -EINVAL;
728         }
729
730         if (tg3_asic_rev(tp) == ASIC_REV_5761) {
731                 req = TG3_APE_LOCK_REQ;
732                 gnt = TG3_APE_LOCK_GRANT;
733         } else {
734                 req = TG3_APE_PER_LOCK_REQ;
735                 gnt = TG3_APE_PER_LOCK_GRANT;
736         }
737
738         off = 4 * locknum;
739
740         tg3_ape_write32(tp, req + off, bit);
741
742         /* Wait for up to 1 millisecond to acquire lock. */
743         for (i = 0; i < 100; i++) {
744                 status = tg3_ape_read32(tp, gnt + off);
745                 if (status == bit)
746                         break;
747                 udelay(10);
748         }
749
750         if (status != bit) {
751                 /* Revoke the lock request. */
752                 tg3_ape_write32(tp, gnt + off, bit);
753                 ret = -EBUSY;
754         }
755
756         return ret;
757 }
758
759 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
760 {
761         u32 gnt, bit;
762
763         if (!tg3_flag(tp, ENABLE_APE))
764                 return;
765
766         switch (locknum) {
767         case TG3_APE_LOCK_GPIO:
768                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
769                         return;
770         case TG3_APE_LOCK_GRC:
771         case TG3_APE_LOCK_MEM:
772                 if (!tp->pci_fn)
773                         bit = APE_LOCK_GRANT_DRIVER;
774                 else
775                         bit = 1 << tp->pci_fn;
776                 break;
777         case TG3_APE_LOCK_PHY0:
778         case TG3_APE_LOCK_PHY1:
779         case TG3_APE_LOCK_PHY2:
780         case TG3_APE_LOCK_PHY3:
781                 bit = APE_LOCK_GRANT_DRIVER;
782                 break;
783         default:
784                 return;
785         }
786
787         if (tg3_asic_rev(tp) == ASIC_REV_5761)
788                 gnt = TG3_APE_LOCK_GRANT;
789         else
790                 gnt = TG3_APE_PER_LOCK_GRANT;
791
792         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
793 }
794
795 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
796 {
797         u32 apedata;
798
799         while (timeout_us) {
800                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
801                         return -EBUSY;
802
803                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
804                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
805                         break;
806
807                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
808
809                 udelay(10);
810                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
811         }
812
813         return timeout_us ? 0 : -EBUSY;
814 }
815
816 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
817 {
818         u32 i, apedata;
819
820         for (i = 0; i < timeout_us / 10; i++) {
821                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
822
823                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
824                         break;
825
826                 udelay(10);
827         }
828
829         return i == timeout_us / 10;
830 }
831
832 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
833                                    u32 len)
834 {
835         int err;
836         u32 i, bufoff, msgoff, maxlen, apedata;
837
838         if (!tg3_flag(tp, APE_HAS_NCSI))
839                 return 0;
840
841         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
842         if (apedata != APE_SEG_SIG_MAGIC)
843                 return -ENODEV;
844
845         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
846         if (!(apedata & APE_FW_STATUS_READY))
847                 return -EAGAIN;
848
849         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
850                  TG3_APE_SHMEM_BASE;
851         msgoff = bufoff + 2 * sizeof(u32);
852         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
853
854         while (len) {
855                 u32 length;
856
857                 /* Cap xfer sizes to scratchpad limits. */
858                 length = (len > maxlen) ? maxlen : len;
859                 len -= length;
860
861                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
862                 if (!(apedata & APE_FW_STATUS_READY))
863                         return -EAGAIN;
864
865                 /* Wait for up to 1 msec for APE to service previous event. */
866                 err = tg3_ape_event_lock(tp, 1000);
867                 if (err)
868                         return err;
869
870                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
871                           APE_EVENT_STATUS_SCRTCHPD_READ |
872                           APE_EVENT_STATUS_EVENT_PENDING;
873                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
874
875                 tg3_ape_write32(tp, bufoff, base_off);
876                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
877
878                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
879                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
880
881                 base_off += length;
882
883                 if (tg3_ape_wait_for_event(tp, 30000))
884                         return -EAGAIN;
885
886                 for (i = 0; length; i += 4, length -= 4) {
887                         u32 val = tg3_ape_read32(tp, msgoff + i);
888                         memcpy(data, &val, sizeof(u32));
889                         data++;
890                 }
891         }
892
893         return 0;
894 }
895
896 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
897 {
898         int err;
899         u32 apedata;
900
901         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
902         if (apedata != APE_SEG_SIG_MAGIC)
903                 return -EAGAIN;
904
905         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
906         if (!(apedata & APE_FW_STATUS_READY))
907                 return -EAGAIN;
908
909         /* Wait for up to 1 millisecond for APE to service previous event. */
910         err = tg3_ape_event_lock(tp, 1000);
911         if (err)
912                 return err;
913
914         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
915                         event | APE_EVENT_STATUS_EVENT_PENDING);
916
917         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
918         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
919
920         return 0;
921 }
922
923 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
924 {
925         u32 event;
926         u32 apedata;
927
928         if (!tg3_flag(tp, ENABLE_APE))
929                 return;
930
931         switch (kind) {
932         case RESET_KIND_INIT:
933                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
934                                 APE_HOST_SEG_SIG_MAGIC);
935                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
936                                 APE_HOST_SEG_LEN_MAGIC);
937                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
938                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
939                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
940                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
941                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
942                                 APE_HOST_BEHAV_NO_PHYLOCK);
943                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
944                                     TG3_APE_HOST_DRVR_STATE_START);
945
946                 event = APE_EVENT_STATUS_STATE_START;
947                 break;
948         case RESET_KIND_SHUTDOWN:
949                 /* With the interface we are currently using,
950                  * APE does not track driver state.  Wiping
951                  * out the HOST SEGMENT SIGNATURE forces
952                  * the APE to assume OS absent status.
953                  */
954                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
955
956                 if (device_may_wakeup(&tp->pdev->dev) &&
957                     tg3_flag(tp, WOL_ENABLE)) {
958                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
959                                             TG3_APE_HOST_WOL_SPEED_AUTO);
960                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
961                 } else
962                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
963
964                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
965
966                 event = APE_EVENT_STATUS_STATE_UNLOAD;
967                 break;
968         case RESET_KIND_SUSPEND:
969                 event = APE_EVENT_STATUS_STATE_SUSPEND;
970                 break;
971         default:
972                 return;
973         }
974
975         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
976
977         tg3_ape_send_event(tp, event);
978 }
979
980 static void tg3_disable_ints(struct tg3 *tp)
981 {
982         int i;
983
984         tw32(TG3PCI_MISC_HOST_CTRL,
985              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
986         for (i = 0; i < tp->irq_max; i++)
987                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
988 }
989
990 static void tg3_enable_ints(struct tg3 *tp)
991 {
992         int i;
993
994         tp->irq_sync = 0;
995         wmb();
996
997         tw32(TG3PCI_MISC_HOST_CTRL,
998              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
999
1000         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1001         for (i = 0; i < tp->irq_cnt; i++) {
1002                 struct tg3_napi *tnapi = &tp->napi[i];
1003
1004                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1005                 if (tg3_flag(tp, 1SHOT_MSI))
1006                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1007
1008                 tp->coal_now |= tnapi->coal_now;
1009         }
1010
1011         /* Force an initial interrupt */
1012         if (!tg3_flag(tp, TAGGED_STATUS) &&
1013             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1014                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1015         else
1016                 tw32(HOSTCC_MODE, tp->coal_now);
1017
1018         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1019 }
1020
1021 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1022 {
1023         struct tg3 *tp = tnapi->tp;
1024         struct tg3_hw_status *sblk = tnapi->hw_status;
1025         unsigned int work_exists = 0;
1026
1027         /* check for phy events */
1028         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1029                 if (sblk->status & SD_STATUS_LINK_CHG)
1030                         work_exists = 1;
1031         }
1032
1033         /* check for TX work to do */
1034         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1035                 work_exists = 1;
1036
1037         /* check for RX work to do */
1038         if (tnapi->rx_rcb_prod_idx &&
1039             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1040                 work_exists = 1;
1041
1042         return work_exists;
1043 }
1044
1045 /* tg3_int_reenable
1046  *  similar to tg3_enable_ints, but it accurately determines whether there
1047  *  is new work pending and can return without flushing the PIO write
1048  *  which reenables interrupts
1049  */
1050 static void tg3_int_reenable(struct tg3_napi *tnapi)
1051 {
1052         struct tg3 *tp = tnapi->tp;
1053
1054         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1055         mmiowb();
1056
1057         /* When doing tagged status, this work check is unnecessary.
1058          * The last_tag we write above tells the chip which piece of
1059          * work we've completed.
1060          */
1061         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1062                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1063                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1064 }
1065
1066 static void tg3_switch_clocks(struct tg3 *tp)
1067 {
1068         u32 clock_ctrl;
1069         u32 orig_clock_ctrl;
1070
1071         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1072                 return;
1073
1074         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1075
1076         orig_clock_ctrl = clock_ctrl;
1077         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1078                        CLOCK_CTRL_CLKRUN_OENABLE |
1079                        0x1f);
1080         tp->pci_clock_ctrl = clock_ctrl;
1081
1082         if (tg3_flag(tp, 5705_PLUS)) {
1083                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1084                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1085                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1086                 }
1087         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1088                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1089                             clock_ctrl |
1090                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1091                             40);
1092                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1093                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1094                             40);
1095         }
1096         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1097 }
1098
1099 #define PHY_BUSY_LOOPS  5000
1100
1101 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1102                          u32 *val)
1103 {
1104         u32 frame_val;
1105         unsigned int loops;
1106         int ret;
1107
1108         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1109                 tw32_f(MAC_MI_MODE,
1110                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1111                 udelay(80);
1112         }
1113
1114         tg3_ape_lock(tp, tp->phy_ape_lock);
1115
1116         *val = 0x0;
1117
1118         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1119                       MI_COM_PHY_ADDR_MASK);
1120         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1121                       MI_COM_REG_ADDR_MASK);
1122         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1123
1124         tw32_f(MAC_MI_COM, frame_val);
1125
1126         loops = PHY_BUSY_LOOPS;
1127         while (loops != 0) {
1128                 udelay(10);
1129                 frame_val = tr32(MAC_MI_COM);
1130
1131                 if ((frame_val & MI_COM_BUSY) == 0) {
1132                         udelay(5);
1133                         frame_val = tr32(MAC_MI_COM);
1134                         break;
1135                 }
1136                 loops -= 1;
1137         }
1138
1139         ret = -EBUSY;
1140         if (loops != 0) {
1141                 *val = frame_val & MI_COM_DATA_MASK;
1142                 ret = 0;
1143         }
1144
1145         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1146                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1147                 udelay(80);
1148         }
1149
1150         tg3_ape_unlock(tp, tp->phy_ape_lock);
1151
1152         return ret;
1153 }
1154
1155 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1156 {
1157         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1158 }
1159
1160 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1161                           u32 val)
1162 {
1163         u32 frame_val;
1164         unsigned int loops;
1165         int ret;
1166
1167         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1168             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1169                 return 0;
1170
1171         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1172                 tw32_f(MAC_MI_MODE,
1173                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1174                 udelay(80);
1175         }
1176
1177         tg3_ape_lock(tp, tp->phy_ape_lock);
1178
1179         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1180                       MI_COM_PHY_ADDR_MASK);
1181         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1182                       MI_COM_REG_ADDR_MASK);
1183         frame_val |= (val & MI_COM_DATA_MASK);
1184         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1185
1186         tw32_f(MAC_MI_COM, frame_val);
1187
1188         loops = PHY_BUSY_LOOPS;
1189         while (loops != 0) {
1190                 udelay(10);
1191                 frame_val = tr32(MAC_MI_COM);
1192                 if ((frame_val & MI_COM_BUSY) == 0) {
1193                         udelay(5);
1194                         frame_val = tr32(MAC_MI_COM);
1195                         break;
1196                 }
1197                 loops -= 1;
1198         }
1199
1200         ret = -EBUSY;
1201         if (loops != 0)
1202                 ret = 0;
1203
1204         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1205                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1206                 udelay(80);
1207         }
1208
1209         tg3_ape_unlock(tp, tp->phy_ape_lock);
1210
1211         return ret;
1212 }
1213
1214 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1215 {
1216         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1217 }
1218
1219 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1220 {
1221         int err;
1222
1223         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1224         if (err)
1225                 goto done;
1226
1227         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1228         if (err)
1229                 goto done;
1230
1231         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1232                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1233         if (err)
1234                 goto done;
1235
1236         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1237
1238 done:
1239         return err;
1240 }
1241
1242 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1243 {
1244         int err;
1245
1246         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1247         if (err)
1248                 goto done;
1249
1250         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1251         if (err)
1252                 goto done;
1253
1254         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1255                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1256         if (err)
1257                 goto done;
1258
1259         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1260
1261 done:
1262         return err;
1263 }
1264
1265 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1266 {
1267         int err;
1268
1269         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1270         if (!err)
1271                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1272
1273         return err;
1274 }
1275
1276 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1277 {
1278         int err;
1279
1280         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1281         if (!err)
1282                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1283
1284         return err;
1285 }
1286
1287 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1288 {
1289         int err;
1290
1291         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1292                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1293                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1294         if (!err)
1295                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1296
1297         return err;
1298 }
1299
1300 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1301 {
1302         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1303                 set |= MII_TG3_AUXCTL_MISC_WREN;
1304
1305         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1306 }
1307
1308 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1309 {
1310         u32 val;
1311         int err;
1312
1313         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1314
1315         if (err)
1316                 return err;
1317         if (enable)
1318
1319                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1320         else
1321                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1322
1323         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1324                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1325
1326         return err;
1327 }
1328
1329 static int tg3_bmcr_reset(struct tg3 *tp)
1330 {
1331         u32 phy_control;
1332         int limit, err;
1333
1334         /* OK, reset it, and poll the BMCR_RESET bit until it
1335          * clears or we time out.
1336          */
1337         phy_control = BMCR_RESET;
1338         err = tg3_writephy(tp, MII_BMCR, phy_control);
1339         if (err != 0)
1340                 return -EBUSY;
1341
1342         limit = 5000;
1343         while (limit--) {
1344                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1345                 if (err != 0)
1346                         return -EBUSY;
1347
1348                 if ((phy_control & BMCR_RESET) == 0) {
1349                         udelay(40);
1350                         break;
1351                 }
1352                 udelay(10);
1353         }
1354         if (limit < 0)
1355                 return -EBUSY;
1356
1357         return 0;
1358 }
1359
1360 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1361 {
1362         struct tg3 *tp = bp->priv;
1363         u32 val;
1364
1365         spin_lock_bh(&tp->lock);
1366
1367         if (tg3_readphy(tp, reg, &val))
1368                 val = -EIO;
1369
1370         spin_unlock_bh(&tp->lock);
1371
1372         return val;
1373 }
1374
1375 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1376 {
1377         struct tg3 *tp = bp->priv;
1378         u32 ret = 0;
1379
1380         spin_lock_bh(&tp->lock);
1381
1382         if (tg3_writephy(tp, reg, val))
1383                 ret = -EIO;
1384
1385         spin_unlock_bh(&tp->lock);
1386
1387         return ret;
1388 }
1389
1390 static int tg3_mdio_reset(struct mii_bus *bp)
1391 {
1392         return 0;
1393 }
1394
1395 static void tg3_mdio_config_5785(struct tg3 *tp)
1396 {
1397         u32 val;
1398         struct phy_device *phydev;
1399
1400         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1401         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1402         case PHY_ID_BCM50610:
1403         case PHY_ID_BCM50610M:
1404                 val = MAC_PHYCFG2_50610_LED_MODES;
1405                 break;
1406         case PHY_ID_BCMAC131:
1407                 val = MAC_PHYCFG2_AC131_LED_MODES;
1408                 break;
1409         case PHY_ID_RTL8211C:
1410                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1411                 break;
1412         case PHY_ID_RTL8201E:
1413                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1414                 break;
1415         default:
1416                 return;
1417         }
1418
1419         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1420                 tw32(MAC_PHYCFG2, val);
1421
1422                 val = tr32(MAC_PHYCFG1);
1423                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1424                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1425                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1426                 tw32(MAC_PHYCFG1, val);
1427
1428                 return;
1429         }
1430
1431         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1432                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1433                        MAC_PHYCFG2_FMODE_MASK_MASK |
1434                        MAC_PHYCFG2_GMODE_MASK_MASK |
1435                        MAC_PHYCFG2_ACT_MASK_MASK   |
1436                        MAC_PHYCFG2_QUAL_MASK_MASK |
1437                        MAC_PHYCFG2_INBAND_ENABLE;
1438
1439         tw32(MAC_PHYCFG2, val);
1440
1441         val = tr32(MAC_PHYCFG1);
1442         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1443                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1444         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1445                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1446                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1447                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1448                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1449         }
1450         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1451                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1452         tw32(MAC_PHYCFG1, val);
1453
1454         val = tr32(MAC_EXT_RGMII_MODE);
1455         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1456                  MAC_RGMII_MODE_RX_QUALITY |
1457                  MAC_RGMII_MODE_RX_ACTIVITY |
1458                  MAC_RGMII_MODE_RX_ENG_DET |
1459                  MAC_RGMII_MODE_TX_ENABLE |
1460                  MAC_RGMII_MODE_TX_LOWPWR |
1461                  MAC_RGMII_MODE_TX_RESET);
1462         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1463                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1464                         val |= MAC_RGMII_MODE_RX_INT_B |
1465                                MAC_RGMII_MODE_RX_QUALITY |
1466                                MAC_RGMII_MODE_RX_ACTIVITY |
1467                                MAC_RGMII_MODE_RX_ENG_DET;
1468                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1469                         val |= MAC_RGMII_MODE_TX_ENABLE |
1470                                MAC_RGMII_MODE_TX_LOWPWR |
1471                                MAC_RGMII_MODE_TX_RESET;
1472         }
1473         tw32(MAC_EXT_RGMII_MODE, val);
1474 }
1475
1476 static void tg3_mdio_start(struct tg3 *tp)
1477 {
1478         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1479         tw32_f(MAC_MI_MODE, tp->mi_mode);
1480         udelay(80);
1481
1482         if (tg3_flag(tp, MDIOBUS_INITED) &&
1483             tg3_asic_rev(tp) == ASIC_REV_5785)
1484                 tg3_mdio_config_5785(tp);
1485 }
1486
1487 static int tg3_mdio_init(struct tg3 *tp)
1488 {
1489         int i;
1490         u32 reg;
1491         struct phy_device *phydev;
1492
1493         if (tg3_flag(tp, 5717_PLUS)) {
1494                 u32 is_serdes;
1495
1496                 tp->phy_addr = tp->pci_fn + 1;
1497
1498                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1499                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1500                 else
1501                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1502                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1503                 if (is_serdes)
1504                         tp->phy_addr += 7;
1505         } else
1506                 tp->phy_addr = TG3_PHY_MII_ADDR;
1507
1508         tg3_mdio_start(tp);
1509
1510         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1511                 return 0;
1512
1513         tp->mdio_bus = mdiobus_alloc();
1514         if (tp->mdio_bus == NULL)
1515                 return -ENOMEM;
1516
1517         tp->mdio_bus->name     = "tg3 mdio bus";
1518         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1519                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1520         tp->mdio_bus->priv     = tp;
1521         tp->mdio_bus->parent   = &tp->pdev->dev;
1522         tp->mdio_bus->read     = &tg3_mdio_read;
1523         tp->mdio_bus->write    = &tg3_mdio_write;
1524         tp->mdio_bus->reset    = &tg3_mdio_reset;
1525         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1526         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1527
1528         for (i = 0; i < PHY_MAX_ADDR; i++)
1529                 tp->mdio_bus->irq[i] = PHY_POLL;
1530
1531         /* The bus registration will look for all the PHYs on the mdio bus.
1532          * Unfortunately, it does not ensure the PHY is powered up before
1533          * accessing the PHY ID registers.  A chip reset is the
1534          * quickest way to bring the device back to an operational state..
1535          */
1536         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1537                 tg3_bmcr_reset(tp);
1538
1539         i = mdiobus_register(tp->mdio_bus);
1540         if (i) {
1541                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1542                 mdiobus_free(tp->mdio_bus);
1543                 return i;
1544         }
1545
1546         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1547
1548         if (!phydev || !phydev->drv) {
1549                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1550                 mdiobus_unregister(tp->mdio_bus);
1551                 mdiobus_free(tp->mdio_bus);
1552                 return -ENODEV;
1553         }
1554
1555         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1556         case PHY_ID_BCM57780:
1557                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1558                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1559                 break;
1560         case PHY_ID_BCM50610:
1561         case PHY_ID_BCM50610M:
1562                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1563                                      PHY_BRCM_RX_REFCLK_UNUSED |
1564                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1565                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1566                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1567                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1568                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1569                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1570                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1571                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1572                 /* fallthru */
1573         case PHY_ID_RTL8211C:
1574                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1575                 break;
1576         case PHY_ID_RTL8201E:
1577         case PHY_ID_BCMAC131:
1578                 phydev->interface = PHY_INTERFACE_MODE_MII;
1579                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1580                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1581                 break;
1582         }
1583
1584         tg3_flag_set(tp, MDIOBUS_INITED);
1585
1586         if (tg3_asic_rev(tp) == ASIC_REV_5785)
1587                 tg3_mdio_config_5785(tp);
1588
1589         return 0;
1590 }
1591
1592 static void tg3_mdio_fini(struct tg3 *tp)
1593 {
1594         if (tg3_flag(tp, MDIOBUS_INITED)) {
1595                 tg3_flag_clear(tp, MDIOBUS_INITED);
1596                 mdiobus_unregister(tp->mdio_bus);
1597                 mdiobus_free(tp->mdio_bus);
1598         }
1599 }
1600
1601 /* tp->lock is held. */
1602 static inline void tg3_generate_fw_event(struct tg3 *tp)
1603 {
1604         u32 val;
1605
1606         val = tr32(GRC_RX_CPU_EVENT);
1607         val |= GRC_RX_CPU_DRIVER_EVENT;
1608         tw32_f(GRC_RX_CPU_EVENT, val);
1609
1610         tp->last_event_jiffies = jiffies;
1611 }
1612
1613 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1614
1615 /* tp->lock is held. */
1616 static void tg3_wait_for_event_ack(struct tg3 *tp)
1617 {
1618         int i;
1619         unsigned int delay_cnt;
1620         long time_remain;
1621
1622         /* If enough time has passed, no wait is necessary. */
1623         time_remain = (long)(tp->last_event_jiffies + 1 +
1624                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1625                       (long)jiffies;
1626         if (time_remain < 0)
1627                 return;
1628
1629         /* Check if we can shorten the wait time. */
1630         delay_cnt = jiffies_to_usecs(time_remain);
1631         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1632                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1633         delay_cnt = (delay_cnt >> 3) + 1;
1634
1635         for (i = 0; i < delay_cnt; i++) {
1636                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1637                         break;
1638                 udelay(8);
1639         }
1640 }
1641
1642 /* tp->lock is held. */
1643 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1644 {
1645         u32 reg, val;
1646
1647         val = 0;
1648         if (!tg3_readphy(tp, MII_BMCR, &reg))
1649                 val = reg << 16;
1650         if (!tg3_readphy(tp, MII_BMSR, &reg))
1651                 val |= (reg & 0xffff);
1652         *data++ = val;
1653
1654         val = 0;
1655         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1656                 val = reg << 16;
1657         if (!tg3_readphy(tp, MII_LPA, &reg))
1658                 val |= (reg & 0xffff);
1659         *data++ = val;
1660
1661         val = 0;
1662         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1663                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1664                         val = reg << 16;
1665                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1666                         val |= (reg & 0xffff);
1667         }
1668         *data++ = val;
1669
1670         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1671                 val = reg << 16;
1672         else
1673                 val = 0;
1674         *data++ = val;
1675 }
1676
1677 /* tp->lock is held. */
1678 static void tg3_ump_link_report(struct tg3 *tp)
1679 {
1680         u32 data[4];
1681
1682         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1683                 return;
1684
1685         tg3_phy_gather_ump_data(tp, data);
1686
1687         tg3_wait_for_event_ack(tp);
1688
1689         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1690         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1691         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1692         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1693         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1694         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1695
1696         tg3_generate_fw_event(tp);
1697 }
1698
1699 /* tp->lock is held. */
1700 static void tg3_stop_fw(struct tg3 *tp)
1701 {
1702         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1703                 /* Wait for RX cpu to ACK the previous event. */
1704                 tg3_wait_for_event_ack(tp);
1705
1706                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1707
1708                 tg3_generate_fw_event(tp);
1709
1710                 /* Wait for RX cpu to ACK this event. */
1711                 tg3_wait_for_event_ack(tp);
1712         }
1713 }
1714
1715 /* tp->lock is held. */
1716 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1717 {
1718         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1719                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1720
1721         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1722                 switch (kind) {
1723                 case RESET_KIND_INIT:
1724                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1725                                       DRV_STATE_START);
1726                         break;
1727
1728                 case RESET_KIND_SHUTDOWN:
1729                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1730                                       DRV_STATE_UNLOAD);
1731                         break;
1732
1733                 case RESET_KIND_SUSPEND:
1734                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1735                                       DRV_STATE_SUSPEND);
1736                         break;
1737
1738                 default:
1739                         break;
1740                 }
1741         }
1742
1743         if (kind == RESET_KIND_INIT ||
1744             kind == RESET_KIND_SUSPEND)
1745                 tg3_ape_driver_state_change(tp, kind);
1746 }
1747
1748 /* tp->lock is held. */
1749 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1750 {
1751         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1752                 switch (kind) {
1753                 case RESET_KIND_INIT:
1754                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1755                                       DRV_STATE_START_DONE);
1756                         break;
1757
1758                 case RESET_KIND_SHUTDOWN:
1759                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1760                                       DRV_STATE_UNLOAD_DONE);
1761                         break;
1762
1763                 default:
1764                         break;
1765                 }
1766         }
1767
1768         if (kind == RESET_KIND_SHUTDOWN)
1769                 tg3_ape_driver_state_change(tp, kind);
1770 }
1771
1772 /* tp->lock is held. */
1773 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1774 {
1775         if (tg3_flag(tp, ENABLE_ASF)) {
1776                 switch (kind) {
1777                 case RESET_KIND_INIT:
1778                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1779                                       DRV_STATE_START);
1780                         break;
1781
1782                 case RESET_KIND_SHUTDOWN:
1783                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1784                                       DRV_STATE_UNLOAD);
1785                         break;
1786
1787                 case RESET_KIND_SUSPEND:
1788                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1789                                       DRV_STATE_SUSPEND);
1790                         break;
1791
1792                 default:
1793                         break;
1794                 }
1795         }
1796 }
1797
1798 static int tg3_poll_fw(struct tg3 *tp)
1799 {
1800         int i;
1801         u32 val;
1802
1803         if (tg3_flag(tp, IS_SSB_CORE)) {
1804                 /* We don't use firmware. */
1805                 return 0;
1806         }
1807
1808         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1809                 /* Wait up to 20ms for init done. */
1810                 for (i = 0; i < 200; i++) {
1811                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1812                                 return 0;
1813                         udelay(100);
1814                 }
1815                 return -ENODEV;
1816         }
1817
1818         /* Wait for firmware initialization to complete. */
1819         for (i = 0; i < 100000; i++) {
1820                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1821                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1822                         break;
1823                 udelay(10);
1824         }
1825
1826         /* Chip might not be fitted with firmware.  Some Sun onboard
1827          * parts are configured like that.  So don't signal the timeout
1828          * of the above loop as an error, but do report the lack of
1829          * running firmware once.
1830          */
1831         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1832                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1833
1834                 netdev_info(tp->dev, "No firmware running\n");
1835         }
1836
1837         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1838                 /* The 57765 A0 needs a little more
1839                  * time to do some important work.
1840                  */
1841                 mdelay(10);
1842         }
1843
1844         return 0;
1845 }
1846
1847 static void tg3_link_report(struct tg3 *tp)
1848 {
1849         if (!netif_carrier_ok(tp->dev)) {
1850                 netif_info(tp, link, tp->dev, "Link is down\n");
1851                 tg3_ump_link_report(tp);
1852         } else if (netif_msg_link(tp)) {
1853                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1854                             (tp->link_config.active_speed == SPEED_1000 ?
1855                              1000 :
1856                              (tp->link_config.active_speed == SPEED_100 ?
1857                               100 : 10)),
1858                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1859                              "full" : "half"));
1860
1861                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1862                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1863                             "on" : "off",
1864                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1865                             "on" : "off");
1866
1867                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1868                         netdev_info(tp->dev, "EEE is %s\n",
1869                                     tp->setlpicnt ? "enabled" : "disabled");
1870
1871                 tg3_ump_link_report(tp);
1872         }
1873
1874         tp->link_up = netif_carrier_ok(tp->dev);
1875 }
1876
1877 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1878 {
1879         u16 miireg;
1880
1881         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1882                 miireg = ADVERTISE_1000XPAUSE;
1883         else if (flow_ctrl & FLOW_CTRL_TX)
1884                 miireg = ADVERTISE_1000XPSE_ASYM;
1885         else if (flow_ctrl & FLOW_CTRL_RX)
1886                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1887         else
1888                 miireg = 0;
1889
1890         return miireg;
1891 }
1892
1893 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1894 {
1895         u8 cap = 0;
1896
1897         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1898                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1899         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1900                 if (lcladv & ADVERTISE_1000XPAUSE)
1901                         cap = FLOW_CTRL_RX;
1902                 if (rmtadv & ADVERTISE_1000XPAUSE)
1903                         cap = FLOW_CTRL_TX;
1904         }
1905
1906         return cap;
1907 }
1908
1909 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1910 {
1911         u8 autoneg;
1912         u8 flowctrl = 0;
1913         u32 old_rx_mode = tp->rx_mode;
1914         u32 old_tx_mode = tp->tx_mode;
1915
1916         if (tg3_flag(tp, USE_PHYLIB))
1917                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1918         else
1919                 autoneg = tp->link_config.autoneg;
1920
1921         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1922                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1923                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1924                 else
1925                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1926         } else
1927                 flowctrl = tp->link_config.flowctrl;
1928
1929         tp->link_config.active_flowctrl = flowctrl;
1930
1931         if (flowctrl & FLOW_CTRL_RX)
1932                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1933         else
1934                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1935
1936         if (old_rx_mode != tp->rx_mode)
1937                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1938
1939         if (flowctrl & FLOW_CTRL_TX)
1940                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1941         else
1942                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1943
1944         if (old_tx_mode != tp->tx_mode)
1945                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1946 }
1947
1948 static void tg3_adjust_link(struct net_device *dev)
1949 {
1950         u8 oldflowctrl, linkmesg = 0;
1951         u32 mac_mode, lcl_adv, rmt_adv;
1952         struct tg3 *tp = netdev_priv(dev);
1953         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1954
1955         spin_lock_bh(&tp->lock);
1956
1957         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1958                                     MAC_MODE_HALF_DUPLEX);
1959
1960         oldflowctrl = tp->link_config.active_flowctrl;
1961
1962         if (phydev->link) {
1963                 lcl_adv = 0;
1964                 rmt_adv = 0;
1965
1966                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1967                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1968                 else if (phydev->speed == SPEED_1000 ||
1969                          tg3_asic_rev(tp) != ASIC_REV_5785)
1970                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1971                 else
1972                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1973
1974                 if (phydev->duplex == DUPLEX_HALF)
1975                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1976                 else {
1977                         lcl_adv = mii_advertise_flowctrl(
1978                                   tp->link_config.flowctrl);
1979
1980                         if (phydev->pause)
1981                                 rmt_adv = LPA_PAUSE_CAP;
1982                         if (phydev->asym_pause)
1983                                 rmt_adv |= LPA_PAUSE_ASYM;
1984                 }
1985
1986                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1987         } else
1988                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1989
1990         if (mac_mode != tp->mac_mode) {
1991                 tp->mac_mode = mac_mode;
1992                 tw32_f(MAC_MODE, tp->mac_mode);
1993                 udelay(40);
1994         }
1995
1996         if (tg3_asic_rev(tp) == ASIC_REV_5785) {
1997                 if (phydev->speed == SPEED_10)
1998                         tw32(MAC_MI_STAT,
1999                              MAC_MI_STAT_10MBPS_MODE |
2000                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2001                 else
2002                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2003         }
2004
2005         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2006                 tw32(MAC_TX_LENGTHS,
2007                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2008                       (6 << TX_LENGTHS_IPG_SHIFT) |
2009                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2010         else
2011                 tw32(MAC_TX_LENGTHS,
2012                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2013                       (6 << TX_LENGTHS_IPG_SHIFT) |
2014                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2015
2016         if (phydev->link != tp->old_link ||
2017             phydev->speed != tp->link_config.active_speed ||
2018             phydev->duplex != tp->link_config.active_duplex ||
2019             oldflowctrl != tp->link_config.active_flowctrl)
2020                 linkmesg = 1;
2021
2022         tp->old_link = phydev->link;
2023         tp->link_config.active_speed = phydev->speed;
2024         tp->link_config.active_duplex = phydev->duplex;
2025
2026         spin_unlock_bh(&tp->lock);
2027
2028         if (linkmesg)
2029                 tg3_link_report(tp);
2030 }
2031
2032 static int tg3_phy_init(struct tg3 *tp)
2033 {
2034         struct phy_device *phydev;
2035
2036         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2037                 return 0;
2038
2039         /* Bring the PHY back to a known state. */
2040         tg3_bmcr_reset(tp);
2041
2042         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2043
2044         /* Attach the MAC to the PHY. */
2045         phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2046                              tg3_adjust_link, phydev->interface);
2047         if (IS_ERR(phydev)) {
2048                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2049                 return PTR_ERR(phydev);
2050         }
2051
2052         /* Mask with MAC supported features. */
2053         switch (phydev->interface) {
2054         case PHY_INTERFACE_MODE_GMII:
2055         case PHY_INTERFACE_MODE_RGMII:
2056                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2057                         phydev->supported &= (PHY_GBIT_FEATURES |
2058                                               SUPPORTED_Pause |
2059                                               SUPPORTED_Asym_Pause);
2060                         break;
2061                 }
2062                 /* fallthru */
2063         case PHY_INTERFACE_MODE_MII:
2064                 phydev->supported &= (PHY_BASIC_FEATURES |
2065                                       SUPPORTED_Pause |
2066                                       SUPPORTED_Asym_Pause);
2067                 break;
2068         default:
2069                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2070                 return -EINVAL;
2071         }
2072
2073         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2074
2075         phydev->advertising = phydev->supported;
2076
2077         return 0;
2078 }
2079
2080 static void tg3_phy_start(struct tg3 *tp)
2081 {
2082         struct phy_device *phydev;
2083
2084         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2085                 return;
2086
2087         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2088
2089         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2090                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2091                 phydev->speed = tp->link_config.speed;
2092                 phydev->duplex = tp->link_config.duplex;
2093                 phydev->autoneg = tp->link_config.autoneg;
2094                 phydev->advertising = tp->link_config.advertising;
2095         }
2096
2097         phy_start(phydev);
2098
2099         phy_start_aneg(phydev);
2100 }
2101
2102 static void tg3_phy_stop(struct tg3 *tp)
2103 {
2104         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2105                 return;
2106
2107         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2108 }
2109
2110 static void tg3_phy_fini(struct tg3 *tp)
2111 {
2112         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2113                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2114                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2115         }
2116 }
2117
2118 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2119 {
2120         int err;
2121         u32 val;
2122
2123         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2124                 return 0;
2125
2126         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2127                 /* Cannot do read-modify-write on 5401 */
2128                 err = tg3_phy_auxctl_write(tp,
2129                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2130                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2131                                            0x4c20);
2132                 goto done;
2133         }
2134
2135         err = tg3_phy_auxctl_read(tp,
2136                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2137         if (err)
2138                 return err;
2139
2140         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2141         err = tg3_phy_auxctl_write(tp,
2142                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2143
2144 done:
2145         return err;
2146 }
2147
2148 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2149 {
2150         u32 phytest;
2151
2152         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2153                 u32 phy;
2154
2155                 tg3_writephy(tp, MII_TG3_FET_TEST,
2156                              phytest | MII_TG3_FET_SHADOW_EN);
2157                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2158                         if (enable)
2159                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2160                         else
2161                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2162                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2163                 }
2164                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2165         }
2166 }
2167
2168 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2169 {
2170         u32 reg;
2171
2172         if (!tg3_flag(tp, 5705_PLUS) ||
2173             (tg3_flag(tp, 5717_PLUS) &&
2174              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2175                 return;
2176
2177         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2178                 tg3_phy_fet_toggle_apd(tp, enable);
2179                 return;
2180         }
2181
2182         reg = MII_TG3_MISC_SHDW_WREN |
2183               MII_TG3_MISC_SHDW_SCR5_SEL |
2184               MII_TG3_MISC_SHDW_SCR5_LPED |
2185               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2186               MII_TG3_MISC_SHDW_SCR5_SDTL |
2187               MII_TG3_MISC_SHDW_SCR5_C125OE;
2188         if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2189                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2190
2191         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2192
2193
2194         reg = MII_TG3_MISC_SHDW_WREN |
2195               MII_TG3_MISC_SHDW_APD_SEL |
2196               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2197         if (enable)
2198                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2199
2200         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2201 }
2202
2203 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2204 {
2205         u32 phy;
2206
2207         if (!tg3_flag(tp, 5705_PLUS) ||
2208             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2209                 return;
2210
2211         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2212                 u32 ephy;
2213
2214                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2215                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2216
2217                         tg3_writephy(tp, MII_TG3_FET_TEST,
2218                                      ephy | MII_TG3_FET_SHADOW_EN);
2219                         if (!tg3_readphy(tp, reg, &phy)) {
2220                                 if (enable)
2221                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2222                                 else
2223                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2224                                 tg3_writephy(tp, reg, phy);
2225                         }
2226                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2227                 }
2228         } else {
2229                 int ret;
2230
2231                 ret = tg3_phy_auxctl_read(tp,
2232                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2233                 if (!ret) {
2234                         if (enable)
2235                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2236                         else
2237                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2238                         tg3_phy_auxctl_write(tp,
2239                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2240                 }
2241         }
2242 }
2243
2244 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2245 {
2246         int ret;
2247         u32 val;
2248
2249         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2250                 return;
2251
2252         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2253         if (!ret)
2254                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2255                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2256 }
2257
2258 static void tg3_phy_apply_otp(struct tg3 *tp)
2259 {
2260         u32 otp, phy;
2261
2262         if (!tp->phy_otp)
2263                 return;
2264
2265         otp = tp->phy_otp;
2266
2267         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2268                 return;
2269
2270         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2271         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2272         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2273
2274         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2275               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2276         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2277
2278         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2279         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2280         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2281
2282         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2283         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2284
2285         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2286         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2287
2288         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2289               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2290         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2291
2292         tg3_phy_toggle_auxctl_smdsp(tp, false);
2293 }
2294
2295 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2296 {
2297         u32 val;
2298
2299         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2300                 return;
2301
2302         tp->setlpicnt = 0;
2303
2304         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2305             current_link_up == 1 &&
2306             tp->link_config.active_duplex == DUPLEX_FULL &&
2307             (tp->link_config.active_speed == SPEED_100 ||
2308              tp->link_config.active_speed == SPEED_1000)) {
2309                 u32 eeectl;
2310
2311                 if (tp->link_config.active_speed == SPEED_1000)
2312                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2313                 else
2314                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2315
2316                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2317
2318                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2319                                   TG3_CL45_D7_EEERES_STAT, &val);
2320
2321                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2322                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2323                         tp->setlpicnt = 2;
2324         }
2325
2326         if (!tp->setlpicnt) {
2327                 if (current_link_up == 1 &&
2328                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2329                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2330                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2331                 }
2332
2333                 val = tr32(TG3_CPMU_EEE_MODE);
2334                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2335         }
2336 }
2337
2338 static void tg3_phy_eee_enable(struct tg3 *tp)
2339 {
2340         u32 val;
2341
2342         if (tp->link_config.active_speed == SPEED_1000 &&
2343             (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2344              tg3_asic_rev(tp) == ASIC_REV_5719 ||
2345              tg3_flag(tp, 57765_CLASS)) &&
2346             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2347                 val = MII_TG3_DSP_TAP26_ALNOKO |
2348                       MII_TG3_DSP_TAP26_RMRXSTO;
2349                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2350                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2351         }
2352
2353         val = tr32(TG3_CPMU_EEE_MODE);
2354         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2355 }
2356
2357 static int tg3_wait_macro_done(struct tg3 *tp)
2358 {
2359         int limit = 100;
2360
2361         while (limit--) {
2362                 u32 tmp32;
2363
2364                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2365                         if ((tmp32 & 0x1000) == 0)
2366                                 break;
2367                 }
2368         }
2369         if (limit < 0)
2370                 return -EBUSY;
2371
2372         return 0;
2373 }
2374
2375 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2376 {
2377         static const u32 test_pat[4][6] = {
2378         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2379         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2380         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2381         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2382         };
2383         int chan;
2384
2385         for (chan = 0; chan < 4; chan++) {
2386                 int i;
2387
2388                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2389                              (chan * 0x2000) | 0x0200);
2390                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2391
2392                 for (i = 0; i < 6; i++)
2393                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2394                                      test_pat[chan][i]);
2395
2396                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2397                 if (tg3_wait_macro_done(tp)) {
2398                         *resetp = 1;
2399                         return -EBUSY;
2400                 }
2401
2402                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2403                              (chan * 0x2000) | 0x0200);
2404                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2405                 if (tg3_wait_macro_done(tp)) {
2406                         *resetp = 1;
2407                         return -EBUSY;
2408                 }
2409
2410                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2411                 if (tg3_wait_macro_done(tp)) {
2412                         *resetp = 1;
2413                         return -EBUSY;
2414                 }
2415
2416                 for (i = 0; i < 6; i += 2) {
2417                         u32 low, high;
2418
2419                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2420                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2421                             tg3_wait_macro_done(tp)) {
2422                                 *resetp = 1;
2423                                 return -EBUSY;
2424                         }
2425                         low &= 0x7fff;
2426                         high &= 0x000f;
2427                         if (low != test_pat[chan][i] ||
2428                             high != test_pat[chan][i+1]) {
2429                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2430                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2431                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2432
2433                                 return -EBUSY;
2434                         }
2435                 }
2436         }
2437
2438         return 0;
2439 }
2440
2441 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2442 {
2443         int chan;
2444
2445         for (chan = 0; chan < 4; chan++) {
2446                 int i;
2447
2448                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2449                              (chan * 0x2000) | 0x0200);
2450                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2451                 for (i = 0; i < 6; i++)
2452                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2453                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2454                 if (tg3_wait_macro_done(tp))
2455                         return -EBUSY;
2456         }
2457
2458         return 0;
2459 }
2460
2461 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2462 {
2463         u32 reg32, phy9_orig;
2464         int retries, do_phy_reset, err;
2465
2466         retries = 10;
2467         do_phy_reset = 1;
2468         do {
2469                 if (do_phy_reset) {
2470                         err = tg3_bmcr_reset(tp);
2471                         if (err)
2472                                 return err;
2473                         do_phy_reset = 0;
2474                 }
2475
2476                 /* Disable transmitter and interrupt.  */
2477                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2478                         continue;
2479
2480                 reg32 |= 0x3000;
2481                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2482
2483                 /* Set full-duplex, 1000 mbps.  */
2484                 tg3_writephy(tp, MII_BMCR,
2485                              BMCR_FULLDPLX | BMCR_SPEED1000);
2486
2487                 /* Set to master mode.  */
2488                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2489                         continue;
2490
2491                 tg3_writephy(tp, MII_CTRL1000,
2492                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2493
2494                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2495                 if (err)
2496                         return err;
2497
2498                 /* Block the PHY control access.  */
2499                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2500
2501                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2502                 if (!err)
2503                         break;
2504         } while (--retries);
2505
2506         err = tg3_phy_reset_chanpat(tp);
2507         if (err)
2508                 return err;
2509
2510         tg3_phydsp_write(tp, 0x8005, 0x0000);
2511
2512         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2513         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2514
2515         tg3_phy_toggle_auxctl_smdsp(tp, false);
2516
2517         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2518
2519         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2520                 reg32 &= ~0x3000;
2521                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2522         } else if (!err)
2523                 err = -EBUSY;
2524
2525         return err;
2526 }
2527
2528 static void tg3_carrier_off(struct tg3 *tp)
2529 {
2530         netif_carrier_off(tp->dev);
2531         tp->link_up = false;
2532 }
2533
2534 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2535 {
2536         if (tg3_flag(tp, ENABLE_ASF))
2537                 netdev_warn(tp->dev,
2538                             "Management side-band traffic will be interrupted during phy settings change\n");
2539 }
2540
2541 /* This will reset the tigon3 PHY if there is no valid
2542  * link unless the FORCE argument is non-zero.
2543  */
2544 static int tg3_phy_reset(struct tg3 *tp)
2545 {
2546         u32 val, cpmuctrl;
2547         int err;
2548
2549         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2550                 val = tr32(GRC_MISC_CFG);
2551                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2552                 udelay(40);
2553         }
2554         err  = tg3_readphy(tp, MII_BMSR, &val);
2555         err |= tg3_readphy(tp, MII_BMSR, &val);
2556         if (err != 0)
2557                 return -EBUSY;
2558
2559         if (netif_running(tp->dev) && tp->link_up) {
2560                 netif_carrier_off(tp->dev);
2561                 tg3_link_report(tp);
2562         }
2563
2564         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2565             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2566             tg3_asic_rev(tp) == ASIC_REV_5705) {
2567                 err = tg3_phy_reset_5703_4_5(tp);
2568                 if (err)
2569                         return err;
2570                 goto out;
2571         }
2572
2573         cpmuctrl = 0;
2574         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2575             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2576                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2577                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2578                         tw32(TG3_CPMU_CTRL,
2579                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2580         }
2581
2582         err = tg3_bmcr_reset(tp);
2583         if (err)
2584                 return err;
2585
2586         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2587                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2588                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2589
2590                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2591         }
2592
2593         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2594             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2595                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2596                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2597                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2598                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2599                         udelay(40);
2600                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2601                 }
2602         }
2603
2604         if (tg3_flag(tp, 5717_PLUS) &&
2605             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2606                 return 0;
2607
2608         tg3_phy_apply_otp(tp);
2609
2610         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2611                 tg3_phy_toggle_apd(tp, true);
2612         else
2613                 tg3_phy_toggle_apd(tp, false);
2614
2615 out:
2616         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2617             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2618                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2619                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2620                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2621         }
2622
2623         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2624                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2625                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2626         }
2627
2628         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2629                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2630                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2631                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2632                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2633                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2634                 }
2635         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2636                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2637                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2638                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2639                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2640                                 tg3_writephy(tp, MII_TG3_TEST1,
2641                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2642                         } else
2643                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2644
2645                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2646                 }
2647         }
2648
2649         /* Set Extended packet length bit (bit 14) on all chips that */
2650         /* support jumbo frames */
2651         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2652                 /* Cannot do read-modify-write on 5401 */
2653                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2654         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2655                 /* Set bit 14 with read-modify-write to preserve other bits */
2656                 err = tg3_phy_auxctl_read(tp,
2657                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2658                 if (!err)
2659                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2660                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2661         }
2662
2663         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2664          * jumbo frames transmission.
2665          */
2666         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2667                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2668                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2669                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2670         }
2671
2672         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2673                 /* adjust output voltage */
2674                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2675         }
2676
2677         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2678                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2679
2680         tg3_phy_toggle_automdix(tp, 1);
2681         tg3_phy_set_wirespeed(tp);
2682         return 0;
2683 }
2684
2685 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2686 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2687 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2688                                           TG3_GPIO_MSG_NEED_VAUX)
2689 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2690         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2691          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2692          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2693          (TG3_GPIO_MSG_DRVR_PRES << 12))
2694
2695 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2696         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2697          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2698          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2699          (TG3_GPIO_MSG_NEED_VAUX << 12))
2700
2701 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2702 {
2703         u32 status, shift;
2704
2705         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2706             tg3_asic_rev(tp) == ASIC_REV_5719)
2707                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2708         else
2709                 status = tr32(TG3_CPMU_DRV_STATUS);
2710
2711         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2712         status &= ~(TG3_GPIO_MSG_MASK << shift);
2713         status |= (newstat << shift);
2714
2715         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2716             tg3_asic_rev(tp) == ASIC_REV_5719)
2717                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2718         else
2719                 tw32(TG3_CPMU_DRV_STATUS, status);
2720
2721         return status >> TG3_APE_GPIO_MSG_SHIFT;
2722 }
2723
2724 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2725 {
2726         if (!tg3_flag(tp, IS_NIC))
2727                 return 0;
2728
2729         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2730             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2731             tg3_asic_rev(tp) == ASIC_REV_5720) {
2732                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2733                         return -EIO;
2734
2735                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2736
2737                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2738                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2739
2740                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2741         } else {
2742                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2743                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2744         }
2745
2746         return 0;
2747 }
2748
2749 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2750 {
2751         u32 grc_local_ctrl;
2752
2753         if (!tg3_flag(tp, IS_NIC) ||
2754             tg3_asic_rev(tp) == ASIC_REV_5700 ||
2755             tg3_asic_rev(tp) == ASIC_REV_5701)
2756                 return;
2757
2758         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2759
2760         tw32_wait_f(GRC_LOCAL_CTRL,
2761                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2762                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2763
2764         tw32_wait_f(GRC_LOCAL_CTRL,
2765                     grc_local_ctrl,
2766                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2767
2768         tw32_wait_f(GRC_LOCAL_CTRL,
2769                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2770                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2771 }
2772
2773 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2774 {
2775         if (!tg3_flag(tp, IS_NIC))
2776                 return;
2777
2778         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2779             tg3_asic_rev(tp) == ASIC_REV_5701) {
2780                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2781                             (GRC_LCLCTRL_GPIO_OE0 |
2782                              GRC_LCLCTRL_GPIO_OE1 |
2783                              GRC_LCLCTRL_GPIO_OE2 |
2784                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2785                              GRC_LCLCTRL_GPIO_OUTPUT1),
2786                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2787         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2788                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2789                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2790                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2791                                      GRC_LCLCTRL_GPIO_OE1 |
2792                                      GRC_LCLCTRL_GPIO_OE2 |
2793                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2794                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2795                                      tp->grc_local_ctrl;
2796                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2797                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2798
2799                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2800                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2801                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2802
2803                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2804                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2805                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2806         } else {
2807                 u32 no_gpio2;
2808                 u32 grc_local_ctrl = 0;
2809
2810                 /* Workaround to prevent overdrawing Amps. */
2811                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2812                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2813                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2814                                     grc_local_ctrl,
2815                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2816                 }
2817
2818                 /* On 5753 and variants, GPIO2 cannot be used. */
2819                 no_gpio2 = tp->nic_sram_data_cfg &
2820                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2821
2822                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2823                                   GRC_LCLCTRL_GPIO_OE1 |
2824                                   GRC_LCLCTRL_GPIO_OE2 |
2825                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2826                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2827                 if (no_gpio2) {
2828                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2829                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2830                 }
2831                 tw32_wait_f(GRC_LOCAL_CTRL,
2832                             tp->grc_local_ctrl | grc_local_ctrl,
2833                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2834
2835                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2836
2837                 tw32_wait_f(GRC_LOCAL_CTRL,
2838                             tp->grc_local_ctrl | grc_local_ctrl,
2839                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2840
2841                 if (!no_gpio2) {
2842                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2843                         tw32_wait_f(GRC_LOCAL_CTRL,
2844                                     tp->grc_local_ctrl | grc_local_ctrl,
2845                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2846                 }
2847         }
2848 }
2849
2850 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2851 {
2852         u32 msg = 0;
2853
2854         /* Serialize power state transitions */
2855         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2856                 return;
2857
2858         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2859                 msg = TG3_GPIO_MSG_NEED_VAUX;
2860
2861         msg = tg3_set_function_status(tp, msg);
2862
2863         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2864                 goto done;
2865
2866         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2867                 tg3_pwrsrc_switch_to_vaux(tp);
2868         else
2869                 tg3_pwrsrc_die_with_vmain(tp);
2870
2871 done:
2872         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2873 }
2874
2875 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2876 {
2877         bool need_vaux = false;
2878
2879         /* The GPIOs do something completely different on 57765. */
2880         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2881                 return;
2882
2883         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2884             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2885             tg3_asic_rev(tp) == ASIC_REV_5720) {
2886                 tg3_frob_aux_power_5717(tp, include_wol ?
2887                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2888                 return;
2889         }
2890
2891         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2892                 struct net_device *dev_peer;
2893
2894                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2895
2896                 /* remove_one() may have been run on the peer. */
2897                 if (dev_peer) {
2898                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2899
2900                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2901                                 return;
2902
2903                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2904                             tg3_flag(tp_peer, ENABLE_ASF))
2905                                 need_vaux = true;
2906                 }
2907         }
2908
2909         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2910             tg3_flag(tp, ENABLE_ASF))
2911                 need_vaux = true;
2912
2913         if (need_vaux)
2914                 tg3_pwrsrc_switch_to_vaux(tp);
2915         else
2916                 tg3_pwrsrc_die_with_vmain(tp);
2917 }
2918
2919 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2920 {
2921         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2922                 return 1;
2923         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2924                 if (speed != SPEED_10)
2925                         return 1;
2926         } else if (speed == SPEED_10)
2927                 return 1;
2928
2929         return 0;
2930 }
2931
2932 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2933 {
2934         u32 val;
2935
2936         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2937                 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
2938                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2939                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2940
2941                         sg_dig_ctrl |=
2942                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2943                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2944                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2945                 }
2946                 return;
2947         }
2948
2949         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2950                 tg3_bmcr_reset(tp);
2951                 val = tr32(GRC_MISC_CFG);
2952                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2953                 udelay(40);
2954                 return;
2955         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2956                 u32 phytest;
2957                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2958                         u32 phy;
2959
2960                         tg3_writephy(tp, MII_ADVERTISE, 0);
2961                         tg3_writephy(tp, MII_BMCR,
2962                                      BMCR_ANENABLE | BMCR_ANRESTART);
2963
2964                         tg3_writephy(tp, MII_TG3_FET_TEST,
2965                                      phytest | MII_TG3_FET_SHADOW_EN);
2966                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2967                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2968                                 tg3_writephy(tp,
2969                                              MII_TG3_FET_SHDW_AUXMODE4,
2970                                              phy);
2971                         }
2972                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2973                 }
2974                 return;
2975         } else if (do_low_power) {
2976                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2977                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2978
2979                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2980                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2981                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2982                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2983         }
2984
2985         /* The PHY should not be powered down on some chips because
2986          * of bugs.
2987          */
2988         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2989             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2990             (tg3_asic_rev(tp) == ASIC_REV_5780 &&
2991              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2992             (tg3_asic_rev(tp) == ASIC_REV_5717 &&
2993              !tp->pci_fn))
2994                 return;
2995
2996         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2997             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2998                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2999                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3000                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3001                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3002         }
3003
3004         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3005 }
3006
3007 /* tp->lock is held. */
3008 static int tg3_nvram_lock(struct tg3 *tp)
3009 {
3010         if (tg3_flag(tp, NVRAM)) {
3011                 int i;
3012
3013                 if (tp->nvram_lock_cnt == 0) {
3014                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3015                         for (i = 0; i < 8000; i++) {
3016                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3017                                         break;
3018                                 udelay(20);
3019                         }
3020                         if (i == 8000) {
3021                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3022                                 return -ENODEV;
3023                         }
3024                 }
3025                 tp->nvram_lock_cnt++;
3026         }
3027         return 0;
3028 }
3029
3030 /* tp->lock is held. */
3031 static void tg3_nvram_unlock(struct tg3 *tp)
3032 {
3033         if (tg3_flag(tp, NVRAM)) {
3034                 if (tp->nvram_lock_cnt > 0)
3035                         tp->nvram_lock_cnt--;
3036                 if (tp->nvram_lock_cnt == 0)
3037                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3038         }
3039 }
3040
3041 /* tp->lock is held. */
3042 static void tg3_enable_nvram_access(struct tg3 *tp)
3043 {
3044         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3045                 u32 nvaccess = tr32(NVRAM_ACCESS);
3046
3047                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3048         }
3049 }
3050
3051 /* tp->lock is held. */
3052 static void tg3_disable_nvram_access(struct tg3 *tp)
3053 {
3054         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3055                 u32 nvaccess = tr32(NVRAM_ACCESS);
3056
3057                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3058         }
3059 }
3060
3061 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3062                                         u32 offset, u32 *val)
3063 {
3064         u32 tmp;
3065         int i;
3066
3067         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3068                 return -EINVAL;
3069
3070         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3071                                         EEPROM_ADDR_DEVID_MASK |
3072                                         EEPROM_ADDR_READ);
3073         tw32(GRC_EEPROM_ADDR,
3074              tmp |
3075              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3076              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3077               EEPROM_ADDR_ADDR_MASK) |
3078              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3079
3080         for (i = 0; i < 1000; i++) {
3081                 tmp = tr32(GRC_EEPROM_ADDR);
3082
3083                 if (tmp & EEPROM_ADDR_COMPLETE)
3084                         break;
3085                 msleep(1);
3086         }
3087         if (!(tmp & EEPROM_ADDR_COMPLETE))
3088                 return -EBUSY;
3089
3090         tmp = tr32(GRC_EEPROM_DATA);
3091
3092         /*
3093          * The data will always be opposite the native endian
3094          * format.  Perform a blind byteswap to compensate.
3095          */
3096         *val = swab32(tmp);
3097
3098         return 0;
3099 }
3100
3101 #define NVRAM_CMD_TIMEOUT 10000
3102
3103 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3104 {
3105         int i;
3106
3107         tw32(NVRAM_CMD, nvram_cmd);
3108         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3109                 udelay(10);
3110                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3111                         udelay(10);
3112                         break;
3113                 }
3114         }
3115
3116         if (i == NVRAM_CMD_TIMEOUT)
3117                 return -EBUSY;
3118
3119         return 0;
3120 }
3121
3122 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3123 {
3124         if (tg3_flag(tp, NVRAM) &&
3125             tg3_flag(tp, NVRAM_BUFFERED) &&
3126             tg3_flag(tp, FLASH) &&
3127             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3128             (tp->nvram_jedecnum == JEDEC_ATMEL))
3129
3130                 addr = ((addr / tp->nvram_pagesize) <<
3131                         ATMEL_AT45DB0X1B_PAGE_POS) +
3132                        (addr % tp->nvram_pagesize);
3133
3134         return addr;
3135 }
3136
3137 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3138 {
3139         if (tg3_flag(tp, NVRAM) &&
3140             tg3_flag(tp, NVRAM_BUFFERED) &&
3141             tg3_flag(tp, FLASH) &&
3142             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3143             (tp->nvram_jedecnum == JEDEC_ATMEL))
3144
3145                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3146                         tp->nvram_pagesize) +
3147                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3148
3149         return addr;
3150 }
3151
3152 /* NOTE: Data read in from NVRAM is byteswapped according to
3153  * the byteswapping settings for all other register accesses.
3154  * tg3 devices are BE devices, so on a BE machine, the data
3155  * returned will be exactly as it is seen in NVRAM.  On a LE
3156  * machine, the 32-bit value will be byteswapped.
3157  */
3158 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3159 {
3160         int ret;
3161
3162         if (!tg3_flag(tp, NVRAM))
3163                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3164
3165         offset = tg3_nvram_phys_addr(tp, offset);
3166
3167         if (offset > NVRAM_ADDR_MSK)
3168                 return -EINVAL;
3169
3170         ret = tg3_nvram_lock(tp);
3171         if (ret)
3172                 return ret;
3173
3174         tg3_enable_nvram_access(tp);
3175
3176         tw32(NVRAM_ADDR, offset);
3177         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3178                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3179
3180         if (ret == 0)
3181                 *val = tr32(NVRAM_RDDATA);
3182
3183         tg3_disable_nvram_access(tp);
3184
3185         tg3_nvram_unlock(tp);
3186
3187         return ret;
3188 }
3189
3190 /* Ensures NVRAM data is in bytestream format. */
3191 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3192 {
3193         u32 v;
3194         int res = tg3_nvram_read(tp, offset, &v);
3195         if (!res)
3196                 *val = cpu_to_be32(v);
3197         return res;
3198 }
3199
3200 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3201                                     u32 offset, u32 len, u8 *buf)
3202 {
3203         int i, j, rc = 0;
3204         u32 val;
3205
3206         for (i = 0; i < len; i += 4) {
3207                 u32 addr;
3208                 __be32 data;
3209
3210                 addr = offset + i;
3211
3212                 memcpy(&data, buf + i, 4);
3213
3214                 /*
3215                  * The SEEPROM interface expects the data to always be opposite
3216                  * the native endian format.  We accomplish this by reversing
3217                  * all the operations that would have been performed on the
3218                  * data from a call to tg3_nvram_read_be32().
3219                  */
3220                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3221
3222                 val = tr32(GRC_EEPROM_ADDR);
3223                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3224
3225                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3226                         EEPROM_ADDR_READ);
3227                 tw32(GRC_EEPROM_ADDR, val |
3228                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3229                         (addr & EEPROM_ADDR_ADDR_MASK) |
3230                         EEPROM_ADDR_START |
3231                         EEPROM_ADDR_WRITE);
3232
3233                 for (j = 0; j < 1000; j++) {
3234                         val = tr32(GRC_EEPROM_ADDR);
3235
3236                         if (val & EEPROM_ADDR_COMPLETE)
3237                                 break;
3238                         msleep(1);
3239                 }
3240                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3241                         rc = -EBUSY;
3242                         break;
3243                 }
3244         }
3245
3246         return rc;
3247 }
3248
3249 /* offset and length are dword aligned */
3250 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3251                 u8 *buf)
3252 {
3253         int ret = 0;
3254         u32 pagesize = tp->nvram_pagesize;
3255         u32 pagemask = pagesize - 1;
3256         u32 nvram_cmd;
3257         u8 *tmp;
3258
3259         tmp = kmalloc(pagesize, GFP_KERNEL);
3260         if (tmp == NULL)
3261                 return -ENOMEM;
3262
3263         while (len) {
3264                 int j;
3265                 u32 phy_addr, page_off, size;
3266
3267                 phy_addr = offset & ~pagemask;
3268
3269                 for (j = 0; j < pagesize; j += 4) {
3270                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3271                                                   (__be32 *) (tmp + j));
3272                         if (ret)
3273                                 break;
3274                 }
3275                 if (ret)
3276                         break;
3277
3278                 page_off = offset & pagemask;
3279                 size = pagesize;
3280                 if (len < size)
3281                         size = len;
3282
3283                 len -= size;
3284
3285                 memcpy(tmp + page_off, buf, size);
3286
3287                 offset = offset + (pagesize - page_off);
3288
3289                 tg3_enable_nvram_access(tp);
3290
3291                 /*
3292                  * Before we can erase the flash page, we need
3293                  * to issue a special "write enable" command.
3294                  */
3295                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3296
3297                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3298                         break;
3299
3300                 /* Erase the target page */
3301                 tw32(NVRAM_ADDR, phy_addr);
3302
3303                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3304                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3305
3306                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3307                         break;
3308
3309                 /* Issue another write enable to start the write. */
3310                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3311
3312                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3313                         break;
3314
3315                 for (j = 0; j < pagesize; j += 4) {
3316                         __be32 data;
3317
3318                         data = *((__be32 *) (tmp + j));
3319
3320                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3321
3322                         tw32(NVRAM_ADDR, phy_addr + j);
3323
3324                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3325                                 NVRAM_CMD_WR;
3326
3327                         if (j == 0)
3328                                 nvram_cmd |= NVRAM_CMD_FIRST;
3329                         else if (j == (pagesize - 4))
3330                                 nvram_cmd |= NVRAM_CMD_LAST;
3331
3332                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3333                         if (ret)
3334                                 break;
3335                 }
3336                 if (ret)
3337                         break;
3338         }
3339
3340         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3341         tg3_nvram_exec_cmd(tp, nvram_cmd);
3342
3343         kfree(tmp);
3344
3345         return ret;
3346 }
3347
3348 /* offset and length are dword aligned */
3349 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3350                 u8 *buf)
3351 {
3352         int i, ret = 0;
3353
3354         for (i = 0; i < len; i += 4, offset += 4) {
3355                 u32 page_off, phy_addr, nvram_cmd;
3356                 __be32 data;
3357
3358                 memcpy(&data, buf + i, 4);
3359                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3360
3361                 page_off = offset % tp->nvram_pagesize;
3362
3363                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3364
3365                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3366
3367                 if (page_off == 0 || i == 0)
3368                         nvram_cmd |= NVRAM_CMD_FIRST;
3369                 if (page_off == (tp->nvram_pagesize - 4))
3370                         nvram_cmd |= NVRAM_CMD_LAST;
3371
3372                 if (i == (len - 4))
3373                         nvram_cmd |= NVRAM_CMD_LAST;
3374
3375                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3376                     !tg3_flag(tp, FLASH) ||
3377                     !tg3_flag(tp, 57765_PLUS))
3378                         tw32(NVRAM_ADDR, phy_addr);
3379
3380                 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3381                     !tg3_flag(tp, 5755_PLUS) &&
3382                     (tp->nvram_jedecnum == JEDEC_ST) &&
3383                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3384                         u32 cmd;
3385
3386                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3387                         ret = tg3_nvram_exec_cmd(tp, cmd);
3388                         if (ret)
3389                                 break;
3390                 }
3391                 if (!tg3_flag(tp, FLASH)) {
3392                         /* We always do complete word writes to eeprom. */
3393                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3394                 }
3395
3396                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3397                 if (ret)
3398                         break;
3399         }
3400         return ret;
3401 }
3402
3403 /* offset and length are dword aligned */
3404 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3405 {
3406         int ret;
3407
3408         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3409                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3410                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3411                 udelay(40);
3412         }
3413
3414         if (!tg3_flag(tp, NVRAM)) {
3415                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3416         } else {
3417                 u32 grc_mode;
3418
3419                 ret = tg3_nvram_lock(tp);
3420                 if (ret)
3421                         return ret;
3422
3423                 tg3_enable_nvram_access(tp);
3424                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3425                         tw32(NVRAM_WRITE1, 0x406);
3426
3427                 grc_mode = tr32(GRC_MODE);
3428                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3429
3430                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3431                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3432                                 buf);
3433                 } else {
3434                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3435                                 buf);
3436                 }
3437
3438                 grc_mode = tr32(GRC_MODE);
3439                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3440
3441                 tg3_disable_nvram_access(tp);
3442                 tg3_nvram_unlock(tp);
3443         }
3444
3445         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3446                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3447                 udelay(40);
3448         }
3449
3450         return ret;
3451 }
3452
3453 #define RX_CPU_SCRATCH_BASE     0x30000
3454 #define RX_CPU_SCRATCH_SIZE     0x04000
3455 #define TX_CPU_SCRATCH_BASE     0x34000
3456 #define TX_CPU_SCRATCH_SIZE     0x04000
3457
3458 /* tp->lock is held. */
3459 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3460 {
3461         int i;
3462         const int iters = 10000;
3463
3464         for (i = 0; i < iters; i++) {
3465                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3466                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3467                 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3468                         break;
3469         }
3470
3471         return (i == iters) ? -EBUSY : 0;
3472 }
3473
3474 /* tp->lock is held. */
3475 static int tg3_rxcpu_pause(struct tg3 *tp)
3476 {
3477         int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3478
3479         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3480         tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3481         udelay(10);
3482
3483         return rc;
3484 }
3485
3486 /* tp->lock is held. */
3487 static int tg3_txcpu_pause(struct tg3 *tp)
3488 {
3489         return tg3_pause_cpu(tp, TX_CPU_BASE);
3490 }
3491
3492 /* tp->lock is held. */
3493 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3494 {
3495         tw32(cpu_base + CPU_STATE, 0xffffffff);
3496         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3497 }
3498
3499 /* tp->lock is held. */
3500 static void tg3_rxcpu_resume(struct tg3 *tp)
3501 {
3502         tg3_resume_cpu(tp, RX_CPU_BASE);
3503 }
3504
3505 /* tp->lock is held. */
3506 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3507 {
3508         int rc;
3509
3510         BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3511
3512         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3513                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3514
3515                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3516                 return 0;
3517         }
3518         if (cpu_base == RX_CPU_BASE) {
3519                 rc = tg3_rxcpu_pause(tp);
3520         } else {
3521                 /*
3522                  * There is only an Rx CPU for the 5750 derivative in the
3523                  * BCM4785.
3524                  */
3525                 if (tg3_flag(tp, IS_SSB_CORE))
3526                         return 0;
3527
3528                 rc = tg3_txcpu_pause(tp);
3529         }
3530
3531         if (rc) {
3532                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3533                            __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3534                 return -ENODEV;
3535         }
3536
3537         /* Clear firmware's nvram arbitration. */
3538         if (tg3_flag(tp, NVRAM))
3539                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3540         return 0;
3541 }
3542
3543 static int tg3_fw_data_len(struct tg3 *tp,
3544                            const struct tg3_firmware_hdr *fw_hdr)
3545 {
3546         int fw_len;
3547
3548         /* Non fragmented firmware have one firmware header followed by a
3549          * contiguous chunk of data to be written. The length field in that
3550          * header is not the length of data to be written but the complete
3551          * length of the bss. The data length is determined based on
3552          * tp->fw->size minus headers.
3553          *
3554          * Fragmented firmware have a main header followed by multiple
3555          * fragments. Each fragment is identical to non fragmented firmware
3556          * with a firmware header followed by a contiguous chunk of data. In
3557          * the main header, the length field is unused and set to 0xffffffff.
3558          * In each fragment header the length is the entire size of that
3559          * fragment i.e. fragment data + header length. Data length is
3560          * therefore length field in the header minus TG3_FW_HDR_LEN.
3561          */
3562         if (tp->fw_len == 0xffffffff)
3563                 fw_len = be32_to_cpu(fw_hdr->len);
3564         else
3565                 fw_len = tp->fw->size;
3566
3567         return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3568 }
3569
3570 /* tp->lock is held. */
3571 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3572                                  u32 cpu_scratch_base, int cpu_scratch_size,
3573                                  const struct tg3_firmware_hdr *fw_hdr)
3574 {
3575         int err, i;
3576         void (*write_op)(struct tg3 *, u32, u32);
3577         int total_len = tp->fw->size;
3578
3579         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3580                 netdev_err(tp->dev,
3581                            "%s: Trying to load TX cpu firmware which is 5705\n",
3582                            __func__);
3583                 return -EINVAL;
3584         }
3585
3586         if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3587                 write_op = tg3_write_mem;
3588         else
3589                 write_op = tg3_write_indirect_reg32;
3590
3591         if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3592                 /* It is possible that bootcode is still loading at this point.
3593                  * Get the nvram lock first before halting the cpu.
3594                  */
3595                 int lock_err = tg3_nvram_lock(tp);
3596                 err = tg3_halt_cpu(tp, cpu_base);
3597                 if (!lock_err)
3598                         tg3_nvram_unlock(tp);
3599                 if (err)
3600                         goto out;
3601
3602                 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3603                         write_op(tp, cpu_scratch_base + i, 0);
3604                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3605                 tw32(cpu_base + CPU_MODE,
3606                      tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3607         } else {
3608                 /* Subtract additional main header for fragmented firmware and
3609                  * advance to the first fragment
3610                  */
3611                 total_len -= TG3_FW_HDR_LEN;
3612                 fw_hdr++;
3613         }
3614
3615         do {
3616                 u32 *fw_data = (u32 *)(fw_hdr + 1);
3617                 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3618                         write_op(tp, cpu_scratch_base +
3619                                      (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3620                                      (i * sizeof(u32)),
3621                                  be32_to_cpu(fw_data[i]));
3622
3623                 total_len -= be32_to_cpu(fw_hdr->len);
3624
3625                 /* Advance to next fragment */
3626                 fw_hdr = (struct tg3_firmware_hdr *)
3627                          ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3628         } while (total_len > 0);
3629
3630         err = 0;
3631
3632 out:
3633         return err;
3634 }
3635
3636 /* tp->lock is held. */
3637 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3638 {
3639         int i;
3640         const int iters = 5;
3641
3642         tw32(cpu_base + CPU_STATE, 0xffffffff);
3643         tw32_f(cpu_base + CPU_PC, pc);
3644
3645         for (i = 0; i < iters; i++) {
3646                 if (tr32(cpu_base + CPU_PC) == pc)
3647                         break;
3648                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3649                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3650                 tw32_f(cpu_base + CPU_PC, pc);
3651                 udelay(1000);
3652         }
3653
3654         return (i == iters) ? -EBUSY : 0;
3655 }
3656
3657 /* tp->lock is held. */
3658 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3659 {
3660         const struct tg3_firmware_hdr *fw_hdr;
3661         int err;
3662
3663         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3664
3665         /* Firmware blob starts with version numbers, followed by
3666            start address and length. We are setting complete length.
3667            length = end_address_of_bss - start_address_of_text.
3668            Remainder is the blob to be loaded contiguously
3669            from start address. */
3670
3671         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3672                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3673                                     fw_hdr);
3674         if (err)
3675                 return err;
3676
3677         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3678                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3679                                     fw_hdr);
3680         if (err)
3681                 return err;
3682
3683         /* Now startup only the RX cpu. */
3684         err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3685                                        be32_to_cpu(fw_hdr->base_addr));
3686         if (err) {
3687                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3688                            "should be %08x\n", __func__,
3689                            tr32(RX_CPU_BASE + CPU_PC),
3690                                 be32_to_cpu(fw_hdr->base_addr));
3691                 return -ENODEV;
3692         }
3693
3694         tg3_rxcpu_resume(tp);
3695
3696         return 0;
3697 }
3698
3699 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3700 {
3701         const int iters = 1000;
3702         int i;
3703         u32 val;
3704
3705         /* Wait for boot code to complete initialization and enter service
3706          * loop. It is then safe to download service patches
3707          */
3708         for (i = 0; i < iters; i++) {
3709                 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3710                         break;
3711
3712                 udelay(10);
3713         }
3714
3715         if (i == iters) {
3716                 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3717                 return -EBUSY;
3718         }
3719
3720         val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3721         if (val & 0xff) {
3722                 netdev_warn(tp->dev,
3723                             "Other patches exist. Not downloading EEE patch\n");
3724                 return -EEXIST;
3725         }
3726
3727         return 0;
3728 }
3729
3730 /* tp->lock is held. */
3731 static void tg3_load_57766_firmware(struct tg3 *tp)
3732 {
3733         struct tg3_firmware_hdr *fw_hdr;
3734
3735         if (!tg3_flag(tp, NO_NVRAM))
3736                 return;
3737
3738         if (tg3_validate_rxcpu_state(tp))
3739                 return;
3740
3741         if (!tp->fw)
3742                 return;
3743
3744         /* This firmware blob has a different format than older firmware
3745          * releases as given below. The main difference is we have fragmented
3746          * data to be written to non-contiguous locations.
3747          *
3748          * In the beginning we have a firmware header identical to other
3749          * firmware which consists of version, base addr and length. The length
3750          * here is unused and set to 0xffffffff.
3751          *
3752          * This is followed by a series of firmware fragments which are
3753          * individually identical to previous firmware. i.e. they have the
3754          * firmware header and followed by data for that fragment. The version
3755          * field of the individual fragment header is unused.
3756          */
3757
3758         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3759         if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3760                 return;
3761
3762         if (tg3_rxcpu_pause(tp))
3763                 return;
3764
3765         /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3766         tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3767
3768         tg3_rxcpu_resume(tp);
3769 }
3770
3771 /* tp->lock is held. */
3772 static int tg3_load_tso_firmware(struct tg3 *tp)
3773 {
3774         const struct tg3_firmware_hdr *fw_hdr;
3775         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3776         int err;
3777
3778         if (!tg3_flag(tp, FW_TSO))
3779                 return 0;
3780
3781         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3782
3783         /* Firmware blob starts with version numbers, followed by
3784            start address and length. We are setting complete length.
3785            length = end_address_of_bss - start_address_of_text.
3786            Remainder is the blob to be loaded contiguously
3787            from start address. */
3788
3789         cpu_scratch_size = tp->fw_len;
3790
3791         if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3792                 cpu_base = RX_CPU_BASE;
3793                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3794         } else {
3795                 cpu_base = TX_CPU_BASE;
3796                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3797                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3798         }
3799
3800         err = tg3_load_firmware_cpu(tp, cpu_base,
3801                                     cpu_scratch_base, cpu_scratch_size,
3802                                     fw_hdr);
3803         if (err)
3804                 return err;
3805
3806         /* Now startup the cpu. */
3807         err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3808                                        be32_to_cpu(fw_hdr->base_addr));
3809         if (err) {
3810                 netdev_err(tp->dev,
3811                            "%s fails to set CPU PC, is %08x should be %08x\n",
3812                            __func__, tr32(cpu_base + CPU_PC),
3813                            be32_to_cpu(fw_hdr->base_addr));
3814                 return -ENODEV;
3815         }
3816
3817         tg3_resume_cpu(tp, cpu_base);
3818         return 0;
3819 }
3820
3821
3822 /* tp->lock is held. */
3823 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3824 {
3825         u32 addr_high, addr_low;
3826         int i;
3827
3828         addr_high = ((tp->dev->dev_addr[0] << 8) |
3829                      tp->dev->dev_addr[1]);
3830         addr_low = ((tp->dev->dev_addr[2] << 24) |
3831                     (tp->dev->dev_addr[3] << 16) |
3832                     (tp->dev->dev_addr[4] <<  8) |
3833                     (tp->dev->dev_addr[5] <<  0));
3834         for (i = 0; i < 4; i++) {
3835                 if (i == 1 && skip_mac_1)
3836                         continue;
3837                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3838                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3839         }
3840
3841         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3842             tg3_asic_rev(tp) == ASIC_REV_5704) {
3843                 for (i = 0; i < 12; i++) {
3844                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3845                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3846                 }
3847         }
3848
3849         addr_high = (tp->dev->dev_addr[0] +
3850                      tp->dev->dev_addr[1] +
3851                      tp->dev->dev_addr[2] +
3852                      tp->dev->dev_addr[3] +
3853                      tp->dev->dev_addr[4] +
3854                      tp->dev->dev_addr[5]) &
3855                 TX_BACKOFF_SEED_MASK;
3856         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3857 }
3858
3859 static void tg3_enable_register_access(struct tg3 *tp)
3860 {
3861         /*
3862          * Make sure register accesses (indirect or otherwise) will function
3863          * correctly.
3864          */
3865         pci_write_config_dword(tp->pdev,
3866                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3867 }
3868
3869 static int tg3_power_up(struct tg3 *tp)
3870 {
3871         int err;
3872
3873         tg3_enable_register_access(tp);
3874
3875         err = pci_set_power_state(tp->pdev, PCI_D0);
3876         if (!err) {
3877                 /* Switch out of Vaux if it is a NIC */
3878                 tg3_pwrsrc_switch_to_vmain(tp);
3879         } else {
3880                 netdev_err(tp->dev, "Transition to D0 failed\n");
3881         }
3882
3883         return err;
3884 }
3885
3886 static int tg3_setup_phy(struct tg3 *, int);
3887
3888 static int tg3_power_down_prepare(struct tg3 *tp)
3889 {
3890         u32 misc_host_ctrl;
3891         bool device_should_wake, do_low_power;
3892
3893         tg3_enable_register_access(tp);
3894
3895         /* Restore the CLKREQ setting. */
3896         if (tg3_flag(tp, CLKREQ_BUG))
3897                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3898                                          PCI_EXP_LNKCTL_CLKREQ_EN);
3899
3900         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3901         tw32(TG3PCI_MISC_HOST_CTRL,
3902              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3903
3904         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3905                              tg3_flag(tp, WOL_ENABLE);
3906
3907         if (tg3_flag(tp, USE_PHYLIB)) {
3908                 do_low_power = false;
3909                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3910                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3911                         struct phy_device *phydev;
3912                         u32 phyid, advertising;
3913
3914                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3915
3916                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3917
3918                         tp->link_config.speed = phydev->speed;
3919                         tp->link_config.duplex = phydev->duplex;
3920                         tp->link_config.autoneg = phydev->autoneg;
3921                         tp->link_config.advertising = phydev->advertising;
3922
3923                         advertising = ADVERTISED_TP |
3924                                       ADVERTISED_Pause |
3925                                       ADVERTISED_Autoneg |
3926                                       ADVERTISED_10baseT_Half;
3927
3928                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3929                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3930                                         advertising |=
3931                                                 ADVERTISED_100baseT_Half |
3932                                                 ADVERTISED_100baseT_Full |
3933                                                 ADVERTISED_10baseT_Full;
3934                                 else
3935                                         advertising |= ADVERTISED_10baseT_Full;
3936                         }
3937
3938                         phydev->advertising = advertising;
3939
3940                         phy_start_aneg(phydev);
3941
3942                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3943                         if (phyid != PHY_ID_BCMAC131) {
3944                                 phyid &= PHY_BCM_OUI_MASK;
3945                                 if (phyid == PHY_BCM_OUI_1 ||
3946                                     phyid == PHY_BCM_OUI_2 ||
3947                                     phyid == PHY_BCM_OUI_3)
3948                                         do_low_power = true;
3949                         }
3950                 }
3951         } else {
3952                 do_low_power = true;
3953
3954                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3955                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3956
3957                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3958                         tg3_setup_phy(tp, 0);
3959         }
3960
3961         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3962                 u32 val;
3963
3964                 val = tr32(GRC_VCPU_EXT_CTRL);
3965                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3966         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3967                 int i;
3968                 u32 val;
3969
3970                 for (i = 0; i < 200; i++) {
3971                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3972                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3973                                 break;
3974                         msleep(1);
3975                 }
3976         }
3977         if (tg3_flag(tp, WOL_CAP))
3978                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3979                                                      WOL_DRV_STATE_SHUTDOWN |
3980                                                      WOL_DRV_WOL |
3981                                                      WOL_SET_MAGIC_PKT);
3982
3983         if (device_should_wake) {
3984                 u32 mac_mode;
3985
3986                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3987                         if (do_low_power &&
3988                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3989                                 tg3_phy_auxctl_write(tp,
3990                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3991                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3992                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3993                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3994                                 udelay(40);
3995                         }
3996
3997                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3998                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3999                         else
4000                                 mac_mode = MAC_MODE_PORT_MODE_MII;
4001
4002                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4003                         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4004                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4005                                              SPEED_100 : SPEED_10;
4006                                 if (tg3_5700_link_polarity(tp, speed))
4007                                         mac_mode |= MAC_MODE_LINK_POLARITY;
4008                                 else
4009                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
4010                         }
4011                 } else {
4012                         mac_mode = MAC_MODE_PORT_MODE_TBI;
4013                 }
4014
4015                 if (!tg3_flag(tp, 5750_PLUS))
4016                         tw32(MAC_LED_CTRL, tp->led_ctrl);
4017
4018                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4019                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4020                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4021                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4022
4023                 if (tg3_flag(tp, ENABLE_APE))
4024                         mac_mode |= MAC_MODE_APE_TX_EN |
4025                                     MAC_MODE_APE_RX_EN |
4026                                     MAC_MODE_TDE_ENABLE;
4027
4028                 tw32_f(MAC_MODE, mac_mode);
4029                 udelay(100);
4030
4031                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4032                 udelay(10);
4033         }
4034
4035         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4036             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4037              tg3_asic_rev(tp) == ASIC_REV_5701)) {
4038                 u32 base_val;
4039
4040                 base_val = tp->pci_clock_ctrl;
4041                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4042                              CLOCK_CTRL_TXCLK_DISABLE);
4043
4044                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4045                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
4046         } else if (tg3_flag(tp, 5780_CLASS) ||
4047                    tg3_flag(tp, CPMU_PRESENT) ||
4048                    tg3_asic_rev(tp) == ASIC_REV_5906) {
4049                 /* do nothing */
4050         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4051                 u32 newbits1, newbits2;
4052
4053                 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4054                     tg3_asic_rev(tp) == ASIC_REV_5701) {
4055                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4056                                     CLOCK_CTRL_TXCLK_DISABLE |
4057                                     CLOCK_CTRL_ALTCLK);
4058                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4059                 } else if (tg3_flag(tp, 5705_PLUS)) {
4060                         newbits1 = CLOCK_CTRL_625_CORE;
4061                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4062                 } else {
4063                         newbits1 = CLOCK_CTRL_ALTCLK;
4064                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4065                 }
4066
4067                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4068                             40);
4069
4070                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4071                             40);
4072
4073                 if (!tg3_flag(tp, 5705_PLUS)) {
4074                         u32 newbits3;
4075
4076                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4077                             tg3_asic_rev(tp) == ASIC_REV_5701) {
4078                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4079                                             CLOCK_CTRL_TXCLK_DISABLE |
4080                                             CLOCK_CTRL_44MHZ_CORE);
4081                         } else {
4082                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4083                         }
4084
4085                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
4086                                     tp->pci_clock_ctrl | newbits3, 40);
4087                 }
4088         }
4089
4090         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4091                 tg3_power_down_phy(tp, do_low_power);
4092
4093         tg3_frob_aux_power(tp, true);
4094
4095         /* Workaround for unstable PLL clock */
4096         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4097             ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4098              (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4099                 u32 val = tr32(0x7d00);
4100
4101                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4102                 tw32(0x7d00, val);
4103                 if (!tg3_flag(tp, ENABLE_ASF)) {
4104                         int err;
4105
4106                         err = tg3_nvram_lock(tp);
4107                         tg3_halt_cpu(tp, RX_CPU_BASE);
4108                         if (!err)
4109                                 tg3_nvram_unlock(tp);
4110                 }
4111         }
4112
4113         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4114
4115         return 0;
4116 }
4117
4118 static void tg3_power_down(struct tg3 *tp)
4119 {
4120         tg3_power_down_prepare(tp);
4121
4122         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4123         pci_set_power_state(tp->pdev, PCI_D3hot);
4124 }
4125
4126 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4127 {
4128         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4129         case MII_TG3_AUX_STAT_10HALF:
4130                 *speed = SPEED_10;
4131                 *duplex = DUPLEX_HALF;
4132                 break;
4133
4134         case MII_TG3_AUX_STAT_10FULL:
4135                 *speed = SPEED_10;
4136                 *duplex = DUPLEX_FULL;
4137                 break;
4138
4139         case MII_TG3_AUX_STAT_100HALF:
4140                 *speed = SPEED_100;
4141                 *duplex = DUPLEX_HALF;
4142                 break;
4143
4144         case MII_TG3_AUX_STAT_100FULL:
4145                 *speed = SPEED_100;
4146                 *duplex = DUPLEX_FULL;
4147                 break;
4148
4149         case MII_TG3_AUX_STAT_1000HALF:
4150                 *speed = SPEED_1000;
4151                 *duplex = DUPLEX_HALF;
4152                 break;
4153
4154         case MII_TG3_AUX_STAT_1000FULL:
4155                 *speed = SPEED_1000;
4156                 *duplex = DUPLEX_FULL;
4157                 break;
4158
4159         default:
4160                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4161                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4162                                  SPEED_10;
4163                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4164                                   DUPLEX_HALF;
4165                         break;
4166                 }
4167                 *speed = SPEED_UNKNOWN;
4168                 *duplex = DUPLEX_UNKNOWN;
4169                 break;
4170         }
4171 }
4172
4173 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4174 {
4175         int err = 0;
4176         u32 val, new_adv;
4177
4178         new_adv = ADVERTISE_CSMA;
4179         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4180         new_adv |= mii_advertise_flowctrl(flowctrl);
4181
4182         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4183         if (err)
4184                 goto done;
4185
4186         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4187                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4188
4189                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4190                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4191                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4192
4193                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4194                 if (err)
4195                         goto done;
4196         }
4197
4198         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4199                 goto done;
4200
4201         tw32(TG3_CPMU_EEE_MODE,
4202              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4203
4204         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4205         if (!err) {
4206                 u32 err2;
4207
4208                 val = 0;
4209                 /* Advertise 100-BaseTX EEE ability */
4210                 if (advertise & ADVERTISED_100baseT_Full)
4211                         val |= MDIO_AN_EEE_ADV_100TX;
4212                 /* Advertise 1000-BaseT EEE ability */
4213                 if (advertise & ADVERTISED_1000baseT_Full)
4214                         val |= MDIO_AN_EEE_ADV_1000T;
4215                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4216                 if (err)
4217                         val = 0;
4218
4219                 switch (tg3_asic_rev(tp)) {
4220                 case ASIC_REV_5717:
4221                 case ASIC_REV_57765:
4222                 case ASIC_REV_57766:
4223                 case ASIC_REV_5719:
4224                         /* If we advertised any eee advertisements above... */
4225                         if (val)
4226                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4227                                       MII_TG3_DSP_TAP26_RMRXSTO |
4228                                       MII_TG3_DSP_TAP26_OPCSINPT;
4229                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4230                         /* Fall through */
4231                 case ASIC_REV_5720:
4232                 case ASIC_REV_5762:
4233                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4234                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4235                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4236                 }
4237
4238                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4239                 if (!err)
4240                         err = err2;
4241         }
4242
4243 done:
4244         return err;
4245 }
4246
4247 static void tg3_phy_copper_begin(struct tg3 *tp)
4248 {
4249         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4250             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4251                 u32 adv, fc;
4252
4253                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4254                         adv = ADVERTISED_10baseT_Half |
4255                               ADVERTISED_10baseT_Full;
4256                         if (tg3_flag(tp, WOL_SPEED_100MB))
4257                                 adv |= ADVERTISED_100baseT_Half |
4258                                        ADVERTISED_100baseT_Full;
4259
4260                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4261                 } else {
4262                         adv = tp->link_config.advertising;
4263                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4264                                 adv &= ~(ADVERTISED_1000baseT_Half |
4265                                          ADVERTISED_1000baseT_Full);
4266
4267                         fc = tp->link_config.flowctrl;
4268                 }
4269
4270                 tg3_phy_autoneg_cfg(tp, adv, fc);
4271
4272                 tg3_writephy(tp, MII_BMCR,
4273                              BMCR_ANENABLE | BMCR_ANRESTART);
4274         } else {
4275                 int i;
4276                 u32 bmcr, orig_bmcr;
4277
4278                 tp->link_config.active_speed = tp->link_config.speed;
4279                 tp->link_config.active_duplex = tp->link_config.duplex;
4280
4281                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4282                         /* With autoneg disabled, 5715 only links up when the
4283                          * advertisement register has the configured speed
4284                          * enabled.
4285                          */
4286                         tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4287                 }
4288
4289                 bmcr = 0;
4290                 switch (tp->link_config.speed) {
4291                 default:
4292                 case SPEED_10:
4293                         break;
4294
4295                 case SPEED_100:
4296                         bmcr |= BMCR_SPEED100;
4297                         break;
4298
4299                 case SPEED_1000:
4300                         bmcr |= BMCR_SPEED1000;
4301                         break;
4302                 }
4303
4304                 if (tp->link_config.duplex == DUPLEX_FULL)
4305                         bmcr |= BMCR_FULLDPLX;
4306
4307                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4308                     (bmcr != orig_bmcr)) {
4309                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4310                         for (i = 0; i < 1500; i++) {
4311                                 u32 tmp;
4312
4313                                 udelay(10);
4314                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4315                                     tg3_readphy(tp, MII_BMSR, &tmp))
4316                                         continue;
4317                                 if (!(tmp & BMSR_LSTATUS)) {
4318                                         udelay(40);
4319                                         break;
4320                                 }
4321                         }
4322                         tg3_writephy(tp, MII_BMCR, bmcr);
4323                         udelay(40);
4324                 }
4325         }
4326 }
4327
4328 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4329 {
4330         int err;
4331
4332         /* Turn off tap power management. */
4333         /* Set Extended packet length bit */
4334         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4335
4336         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4337         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4338         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4339         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4340         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4341
4342         udelay(40);
4343
4344         return err;
4345 }
4346
4347 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4348 {
4349         u32 advmsk, tgtadv, advertising;
4350
4351         advertising = tp->link_config.advertising;
4352         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4353
4354         advmsk = ADVERTISE_ALL;
4355         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4356                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4357                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4358         }
4359
4360         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4361                 return false;
4362
4363         if ((*lcladv & advmsk) != tgtadv)
4364                 return false;
4365
4366         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4367                 u32 tg3_ctrl;
4368
4369                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4370
4371                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4372                         return false;
4373
4374                 if (tgtadv &&
4375                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4376                      tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4377                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4378                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4379                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4380                 } else {
4381                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4382                 }
4383
4384                 if (tg3_ctrl != tgtadv)
4385                         return false;
4386         }
4387
4388         return true;
4389 }
4390
4391 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4392 {
4393         u32 lpeth = 0;
4394
4395         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4396                 u32 val;
4397
4398                 if (tg3_readphy(tp, MII_STAT1000, &val))
4399                         return false;
4400
4401                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4402         }
4403
4404         if (tg3_readphy(tp, MII_LPA, rmtadv))
4405                 return false;
4406
4407         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4408         tp->link_config.rmt_adv = lpeth;
4409
4410         return true;
4411 }
4412
4413 static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
4414 {
4415         if (curr_link_up != tp->link_up) {
4416                 if (curr_link_up) {
4417                         netif_carrier_on(tp->dev);
4418                 } else {
4419                         netif_carrier_off(tp->dev);
4420                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4421                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4422                 }
4423
4424                 tg3_link_report(tp);
4425                 return true;
4426         }
4427
4428         return false;
4429 }
4430
4431 static void tg3_clear_mac_status(struct tg3 *tp)
4432 {
4433         tw32(MAC_EVENT, 0);
4434
4435         tw32_f(MAC_STATUS,
4436                MAC_STATUS_SYNC_CHANGED |
4437                MAC_STATUS_CFG_CHANGED |
4438                MAC_STATUS_MI_COMPLETION |
4439                MAC_STATUS_LNKSTATE_CHANGED);
4440         udelay(40);
4441 }
4442
4443 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4444 {
4445         int current_link_up;
4446         u32 bmsr, val;
4447         u32 lcl_adv, rmt_adv;
4448         u16 current_speed;
4449         u8 current_duplex;
4450         int i, err;
4451
4452         tg3_clear_mac_status(tp);
4453
4454         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4455                 tw32_f(MAC_MI_MODE,
4456                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4457                 udelay(80);
4458         }
4459
4460         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4461
4462         /* Some third-party PHYs need to be reset on link going
4463          * down.
4464          */
4465         if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4466              tg3_asic_rev(tp) == ASIC_REV_5704 ||
4467              tg3_asic_rev(tp) == ASIC_REV_5705) &&
4468             tp->link_up) {
4469                 tg3_readphy(tp, MII_BMSR, &bmsr);
4470                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4471                     !(bmsr & BMSR_LSTATUS))
4472                         force_reset = 1;
4473         }
4474         if (force_reset)
4475                 tg3_phy_reset(tp);
4476
4477         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4478                 tg3_readphy(tp, MII_BMSR, &bmsr);
4479                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4480                     !tg3_flag(tp, INIT_COMPLETE))
4481                         bmsr = 0;
4482
4483                 if (!(bmsr & BMSR_LSTATUS)) {
4484                         err = tg3_init_5401phy_dsp(tp);
4485                         if (err)
4486                                 return err;
4487
4488                         tg3_readphy(tp, MII_BMSR, &bmsr);
4489                         for (i = 0; i < 1000; i++) {
4490                                 udelay(10);
4491                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4492                                     (bmsr & BMSR_LSTATUS)) {
4493                                         udelay(40);
4494                                         break;
4495                                 }
4496                         }
4497
4498                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4499                             TG3_PHY_REV_BCM5401_B0 &&
4500                             !(bmsr & BMSR_LSTATUS) &&
4501                             tp->link_config.active_speed == SPEED_1000) {
4502                                 err = tg3_phy_reset(tp);
4503                                 if (!err)
4504                                         err = tg3_init_5401phy_dsp(tp);
4505                                 if (err)
4506                                         return err;
4507                         }
4508                 }
4509         } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4510                    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4511                 /* 5701 {A0,B0} CRC bug workaround */
4512                 tg3_writephy(tp, 0x15, 0x0a75);
4513                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4514                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4515                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4516         }
4517
4518         /* Clear pending interrupts... */
4519         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4520         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4521
4522         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4523                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4524         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4525                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4526
4527         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4528             tg3_asic_rev(tp) == ASIC_REV_5701) {
4529                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4530                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4531                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4532                 else
4533                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4534         }
4535
4536         current_link_up = 0;
4537         current_speed = SPEED_UNKNOWN;
4538         current_duplex = DUPLEX_UNKNOWN;
4539         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4540         tp->link_config.rmt_adv = 0;
4541
4542         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4543                 err = tg3_phy_auxctl_read(tp,
4544                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4545                                           &val);
4546                 if (!err && !(val & (1 << 10))) {
4547                         tg3_phy_auxctl_write(tp,
4548                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4549                                              val | (1 << 10));
4550                         goto relink;
4551                 }
4552         }
4553
4554         bmsr = 0;
4555         for (i = 0; i < 100; i++) {
4556                 tg3_readphy(tp, MII_BMSR, &bmsr);
4557                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4558                     (bmsr & BMSR_LSTATUS))
4559                         break;
4560                 udelay(40);
4561         }
4562
4563         if (bmsr & BMSR_LSTATUS) {
4564                 u32 aux_stat, bmcr;
4565
4566                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4567                 for (i = 0; i < 2000; i++) {
4568                         udelay(10);
4569                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4570                             aux_stat)
4571                                 break;
4572                 }
4573
4574                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4575                                              &current_speed,
4576                                              &current_duplex);
4577
4578                 bmcr = 0;
4579                 for (i = 0; i < 200; i++) {
4580                         tg3_readphy(tp, MII_BMCR, &bmcr);
4581                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4582                                 continue;
4583                         if (bmcr && bmcr != 0x7fff)
4584                                 break;
4585                         udelay(10);
4586                 }
4587
4588                 lcl_adv = 0;
4589                 rmt_adv = 0;
4590
4591                 tp->link_config.active_speed = current_speed;
4592                 tp->link_config.active_duplex = current_duplex;
4593
4594                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4595                         if ((bmcr & BMCR_ANENABLE) &&
4596                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4597                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4598                                 current_link_up = 1;
4599                 } else {
4600                         if (!(bmcr & BMCR_ANENABLE) &&
4601                             tp->link_config.speed == current_speed &&
4602                             tp->link_config.duplex == current_duplex) {
4603                                 current_link_up = 1;
4604                         }
4605                 }
4606
4607                 if (current_link_up == 1 &&
4608                     tp->link_config.active_duplex == DUPLEX_FULL) {
4609                         u32 reg, bit;
4610
4611                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4612                                 reg = MII_TG3_FET_GEN_STAT;
4613                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4614                         } else {
4615                                 reg = MII_TG3_EXT_STAT;
4616                                 bit = MII_TG3_EXT_STAT_MDIX;
4617                         }
4618
4619                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4620                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4621
4622                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4623                 }
4624         }
4625
4626 relink:
4627         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4628                 tg3_phy_copper_begin(tp);
4629
4630                 if (tg3_flag(tp, ROBOSWITCH)) {
4631                         current_link_up = 1;
4632                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4633                         current_speed = SPEED_1000;
4634                         current_duplex = DUPLEX_FULL;
4635                         tp->link_config.active_speed = current_speed;
4636                         tp->link_config.active_duplex = current_duplex;
4637                 }
4638
4639                 tg3_readphy(tp, MII_BMSR, &bmsr);
4640                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4641                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4642                         current_link_up = 1;
4643         }
4644
4645         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4646         if (current_link_up == 1) {
4647                 if (tp->link_config.active_speed == SPEED_100 ||
4648                     tp->link_config.active_speed == SPEED_10)
4649                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4650                 else
4651                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4652         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4653                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4654         else
4655                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4656
4657         /* In order for the 5750 core in BCM4785 chip to work properly
4658          * in RGMII mode, the Led Control Register must be set up.
4659          */
4660         if (tg3_flag(tp, RGMII_MODE)) {
4661                 u32 led_ctrl = tr32(MAC_LED_CTRL);
4662                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4663
4664                 if (tp->link_config.active_speed == SPEED_10)
4665                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4666                 else if (tp->link_config.active_speed == SPEED_100)
4667                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4668                                      LED_CTRL_100MBPS_ON);
4669                 else if (tp->link_config.active_speed == SPEED_1000)
4670                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4671                                      LED_CTRL_1000MBPS_ON);
4672
4673                 tw32(MAC_LED_CTRL, led_ctrl);
4674                 udelay(40);
4675         }
4676
4677         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4678         if (tp->link_config.active_duplex == DUPLEX_HALF)
4679                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4680
4681         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4682                 if (current_link_up == 1 &&
4683                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4684                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4685                 else
4686                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4687         }
4688
4689         /* ??? Without this setting Netgear GA302T PHY does not
4690          * ??? send/receive packets...
4691          */
4692         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4693             tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
4694                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4695                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4696                 udelay(80);
4697         }
4698
4699         tw32_f(MAC_MODE, tp->mac_mode);
4700         udelay(40);
4701
4702         tg3_phy_eee_adjust(tp, current_link_up);
4703
4704         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4705                 /* Polled via timer. */
4706                 tw32_f(MAC_EVENT, 0);
4707         } else {
4708                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4709         }
4710         udelay(40);
4711
4712         if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
4713             current_link_up == 1 &&
4714             tp->link_config.active_speed == SPEED_1000 &&
4715             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4716                 udelay(120);
4717                 tw32_f(MAC_STATUS,
4718                      (MAC_STATUS_SYNC_CHANGED |
4719                       MAC_STATUS_CFG_CHANGED));
4720                 udelay(40);
4721                 tg3_write_mem(tp,
4722                               NIC_SRAM_FIRMWARE_MBOX,
4723                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4724         }
4725
4726         /* Prevent send BD corruption. */
4727         if (tg3_flag(tp, CLKREQ_BUG)) {
4728                 if (tp->link_config.active_speed == SPEED_100 ||
4729                     tp->link_config.active_speed == SPEED_10)
4730                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4731                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
4732                 else
4733                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4734                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
4735         }
4736
4737         tg3_test_and_report_link_chg(tp, current_link_up);
4738
4739         return 0;
4740 }
4741
4742 struct tg3_fiber_aneginfo {
4743         int state;
4744 #define ANEG_STATE_UNKNOWN              0
4745 #define ANEG_STATE_AN_ENABLE            1
4746 #define ANEG_STATE_RESTART_INIT         2
4747 #define ANEG_STATE_RESTART              3
4748 #define ANEG_STATE_DISABLE_LINK_OK      4
4749 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4750 #define ANEG_STATE_ABILITY_DETECT       6
4751 #define ANEG_STATE_ACK_DETECT_INIT      7
4752 #define ANEG_STATE_ACK_DETECT           8
4753 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4754 #define ANEG_STATE_COMPLETE_ACK         10
4755 #define ANEG_STATE_IDLE_DETECT_INIT     11
4756 #define ANEG_STATE_IDLE_DETECT          12
4757 #define ANEG_STATE_LINK_OK              13
4758 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4759 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4760
4761         u32 flags;
4762 #define MR_AN_ENABLE            0x00000001
4763 #define MR_RESTART_AN           0x00000002
4764 #define MR_AN_COMPLETE          0x00000004
4765 #define MR_PAGE_RX              0x00000008
4766 #define MR_NP_LOADED            0x00000010
4767 #define MR_TOGGLE_TX            0x00000020
4768 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4769 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4770 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4771 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4772 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4773 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4774 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4775 #define MR_TOGGLE_RX            0x00002000
4776 #define MR_NP_RX                0x00004000
4777
4778 #define MR_LINK_OK              0x80000000
4779
4780         unsigned long link_time, cur_time;
4781
4782         u32 ability_match_cfg;
4783         int ability_match_count;
4784
4785         char ability_match, idle_match, ack_match;
4786
4787         u32 txconfig, rxconfig;
4788 #define ANEG_CFG_NP             0x00000080
4789 #define ANEG_CFG_ACK            0x00000040
4790 #define ANEG_CFG_RF2            0x00000020
4791 #define ANEG_CFG_RF1            0x00000010
4792 #define ANEG_CFG_PS2            0x00000001
4793 #define ANEG_CFG_PS1            0x00008000
4794 #define ANEG_CFG_HD             0x00004000
4795 #define ANEG_CFG_FD             0x00002000
4796 #define ANEG_CFG_INVAL          0x00001f06
4797
4798 };
4799 #define ANEG_OK         0
4800 #define ANEG_DONE       1
4801 #define ANEG_TIMER_ENAB 2
4802 #define ANEG_FAILED     -1
4803
4804 #define ANEG_STATE_SETTLE_TIME  10000
4805
4806 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4807                                    struct tg3_fiber_aneginfo *ap)
4808 {
4809         u16 flowctrl;
4810         unsigned long delta;
4811         u32 rx_cfg_reg;
4812         int ret;
4813
4814         if (ap->state == ANEG_STATE_UNKNOWN) {
4815                 ap->rxconfig = 0;
4816                 ap->link_time = 0;
4817                 ap->cur_time = 0;
4818                 ap->ability_match_cfg = 0;
4819                 ap->ability_match_count = 0;
4820                 ap->ability_match = 0;
4821                 ap->idle_match = 0;
4822                 ap->ack_match = 0;
4823         }
4824         ap->cur_time++;
4825
4826         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4827                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4828
4829                 if (rx_cfg_reg != ap->ability_match_cfg) {
4830                         ap->ability_match_cfg = rx_cfg_reg;
4831                         ap->ability_match = 0;
4832                         ap->ability_match_count = 0;
4833                 } else {
4834                         if (++ap->ability_match_count > 1) {
4835                                 ap->ability_match = 1;
4836                                 ap->ability_match_cfg = rx_cfg_reg;
4837                         }
4838                 }
4839                 if (rx_cfg_reg & ANEG_CFG_ACK)
4840                         ap->ack_match = 1;
4841                 else
4842                         ap->ack_match = 0;
4843
4844                 ap->idle_match = 0;
4845         } else {
4846                 ap->idle_match = 1;
4847                 ap->ability_match_cfg = 0;
4848                 ap->ability_match_count = 0;
4849                 ap->ability_match = 0;
4850                 ap->ack_match = 0;
4851
4852                 rx_cfg_reg = 0;
4853         }
4854
4855         ap->rxconfig = rx_cfg_reg;
4856         ret = ANEG_OK;
4857
4858         switch (ap->state) {
4859         case ANEG_STATE_UNKNOWN:
4860                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4861                         ap->state = ANEG_STATE_AN_ENABLE;
4862
4863                 /* fallthru */
4864         case ANEG_STATE_AN_ENABLE:
4865                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4866                 if (ap->flags & MR_AN_ENABLE) {
4867                         ap->link_time = 0;
4868                         ap->cur_time = 0;
4869                         ap->ability_match_cfg = 0;
4870                         ap->ability_match_count = 0;
4871                         ap->ability_match = 0;
4872                         ap->idle_match = 0;
4873                         ap->ack_match = 0;
4874
4875                         ap->state = ANEG_STATE_RESTART_INIT;
4876                 } else {
4877                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4878                 }
4879                 break;
4880
4881         case ANEG_STATE_RESTART_INIT:
4882                 ap->link_time = ap->cur_time;
4883                 ap->flags &= ~(MR_NP_LOADED);
4884                 ap->txconfig = 0;
4885                 tw32(MAC_TX_AUTO_NEG, 0);
4886                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4887                 tw32_f(MAC_MODE, tp->mac_mode);
4888                 udelay(40);
4889
4890                 ret = ANEG_TIMER_ENAB;
4891                 ap->state = ANEG_STATE_RESTART;
4892
4893                 /* fallthru */
4894         case ANEG_STATE_RESTART:
4895                 delta = ap->cur_time - ap->link_time;
4896                 if (delta > ANEG_STATE_SETTLE_TIME)
4897                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4898                 else
4899                         ret = ANEG_TIMER_ENAB;
4900                 break;
4901
4902         case ANEG_STATE_DISABLE_LINK_OK:
4903                 ret = ANEG_DONE;
4904                 break;
4905
4906         case ANEG_STATE_ABILITY_DETECT_INIT:
4907                 ap->flags &= ~(MR_TOGGLE_TX);
4908                 ap->txconfig = ANEG_CFG_FD;
4909                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4910                 if (flowctrl & ADVERTISE_1000XPAUSE)
4911                         ap->txconfig |= ANEG_CFG_PS1;
4912                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4913                         ap->txconfig |= ANEG_CFG_PS2;
4914                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4915                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4916                 tw32_f(MAC_MODE, tp->mac_mode);
4917                 udelay(40);
4918
4919                 ap->state = ANEG_STATE_ABILITY_DETECT;
4920                 break;
4921
4922         case ANEG_STATE_ABILITY_DETECT:
4923                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4924                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4925                 break;
4926
4927         case ANEG_STATE_ACK_DETECT_INIT:
4928                 ap->txconfig |= ANEG_CFG_ACK;
4929                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4930                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4931                 tw32_f(MAC_MODE, tp->mac_mode);
4932                 udelay(40);
4933
4934                 ap->state = ANEG_STATE_ACK_DETECT;
4935
4936                 /* fallthru */
4937         case ANEG_STATE_ACK_DETECT:
4938                 if (ap->ack_match != 0) {
4939                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4940                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4941                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4942                         } else {
4943                                 ap->state = ANEG_STATE_AN_ENABLE;
4944                         }
4945                 } else if (ap->ability_match != 0 &&
4946                            ap->rxconfig == 0) {
4947                         ap->state = ANEG_STATE_AN_ENABLE;
4948                 }
4949                 break;
4950
4951         case ANEG_STATE_COMPLETE_ACK_INIT:
4952                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4953                         ret = ANEG_FAILED;
4954                         break;
4955                 }
4956                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4957                                MR_LP_ADV_HALF_DUPLEX |
4958                                MR_LP_ADV_SYM_PAUSE |
4959                                MR_LP_ADV_ASYM_PAUSE |
4960                                MR_LP_ADV_REMOTE_FAULT1 |
4961                                MR_LP_ADV_REMOTE_FAULT2 |
4962                                MR_LP_ADV_NEXT_PAGE |
4963                                MR_TOGGLE_RX |
4964                                MR_NP_RX);
4965                 if (ap->rxconfig & ANEG_CFG_FD)
4966                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4967                 if (ap->rxconfig & ANEG_CFG_HD)
4968                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4969                 if (ap->rxconfig & ANEG_CFG_PS1)
4970                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4971                 if (ap->rxconfig & ANEG_CFG_PS2)
4972                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4973                 if (ap->rxconfig & ANEG_CFG_RF1)
4974                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4975                 if (ap->rxconfig & ANEG_CFG_RF2)
4976                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4977                 if (ap->rxconfig & ANEG_CFG_NP)
4978                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4979
4980                 ap->link_time = ap->cur_time;
4981
4982                 ap->flags ^= (MR_TOGGLE_TX);
4983                 if (ap->rxconfig & 0x0008)
4984                         ap->flags |= MR_TOGGLE_RX;
4985                 if (ap->rxconfig & ANEG_CFG_NP)
4986                         ap->flags |= MR_NP_RX;
4987                 ap->flags |= MR_PAGE_RX;
4988
4989                 ap->state = ANEG_STATE_COMPLETE_ACK;
4990                 ret = ANEG_TIMER_ENAB;
4991                 break;
4992
4993         case ANEG_STATE_COMPLETE_ACK:
4994                 if (ap->ability_match != 0 &&
4995                     ap->rxconfig == 0) {
4996                         ap->state = ANEG_STATE_AN_ENABLE;
4997                         break;
4998                 }
4999                 delta = ap->cur_time - ap->link_time;
5000                 if (delta > ANEG_STATE_SETTLE_TIME) {
5001                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5002                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5003                         } else {
5004                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5005                                     !(ap->flags & MR_NP_RX)) {
5006                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5007                                 } else {
5008                                         ret = ANEG_FAILED;
5009                                 }
5010                         }
5011                 }
5012                 break;
5013
5014         case ANEG_STATE_IDLE_DETECT_INIT:
5015                 ap->link_time = ap->cur_time;
5016                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5017                 tw32_f(MAC_MODE, tp->mac_mode);
5018                 udelay(40);
5019
5020                 ap->state = ANEG_STATE_IDLE_DETECT;
5021                 ret = ANEG_TIMER_ENAB;
5022                 break;
5023
5024         case ANEG_STATE_IDLE_DETECT:
5025                 if (ap->ability_match != 0 &&
5026                     ap->rxconfig == 0) {
5027                         ap->state = ANEG_STATE_AN_ENABLE;
5028                         break;
5029                 }
5030                 delta = ap->cur_time - ap->link_time;
5031                 if (delta > ANEG_STATE_SETTLE_TIME) {
5032                         /* XXX another gem from the Broadcom driver :( */
5033                         ap->state = ANEG_STATE_LINK_OK;
5034                 }
5035                 break;
5036
5037         case ANEG_STATE_LINK_OK:
5038                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5039                 ret = ANEG_DONE;
5040                 break;
5041
5042         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5043                 /* ??? unimplemented */
5044                 break;
5045
5046         case ANEG_STATE_NEXT_PAGE_WAIT:
5047                 /* ??? unimplemented */
5048                 break;
5049
5050         default:
5051                 ret = ANEG_FAILED;
5052                 break;
5053         }
5054
5055         return ret;
5056 }
5057
5058 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5059 {
5060         int res = 0;
5061         struct tg3_fiber_aneginfo aninfo;
5062         int status = ANEG_FAILED;
5063         unsigned int tick;
5064         u32 tmp;
5065
5066         tw32_f(MAC_TX_AUTO_NEG, 0);
5067
5068         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5069         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5070         udelay(40);
5071
5072         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5073         udelay(40);
5074
5075         memset(&aninfo, 0, sizeof(aninfo));
5076         aninfo.flags |= MR_AN_ENABLE;
5077         aninfo.state = ANEG_STATE_UNKNOWN;
5078         aninfo.cur_time = 0;
5079         tick = 0;
5080         while (++tick < 195000) {
5081                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5082                 if (status == ANEG_DONE || status == ANEG_FAILED)
5083                         break;
5084
5085                 udelay(1);
5086         }
5087
5088         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5089         tw32_f(MAC_MODE, tp->mac_mode);
5090         udelay(40);
5091
5092         *txflags = aninfo.txconfig;
5093         *rxflags = aninfo.flags;
5094
5095         if (status == ANEG_DONE &&
5096             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5097                              MR_LP_ADV_FULL_DUPLEX)))
5098                 res = 1;
5099
5100         return res;
5101 }
5102
5103 static void tg3_init_bcm8002(struct tg3 *tp)
5104 {
5105         u32 mac_status = tr32(MAC_STATUS);
5106         int i;
5107
5108         /* Reset when initting first time or we have a link. */
5109         if (tg3_flag(tp, INIT_COMPLETE) &&
5110             !(mac_status & MAC_STATUS_PCS_SYNCED))
5111                 return;
5112
5113         /* Set PLL lock range. */
5114         tg3_writephy(tp, 0x16, 0x8007);
5115
5116         /* SW reset */
5117         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5118
5119         /* Wait for reset to complete. */
5120         /* XXX schedule_timeout() ... */
5121         for (i = 0; i < 500; i++)
5122                 udelay(10);
5123
5124         /* Config mode; select PMA/Ch 1 regs. */
5125         tg3_writephy(tp, 0x10, 0x8411);
5126
5127         /* Enable auto-lock and comdet, select txclk for tx. */
5128         tg3_writephy(tp, 0x11, 0x0a10);
5129
5130         tg3_writephy(tp, 0x18, 0x00a0);
5131         tg3_writephy(tp, 0x16, 0x41ff);
5132
5133         /* Assert and deassert POR. */
5134         tg3_writephy(tp, 0x13, 0x0400);
5135         udelay(40);
5136         tg3_writephy(tp, 0x13, 0x0000);
5137
5138         tg3_writephy(tp, 0x11, 0x0a50);
5139         udelay(40);
5140         tg3_writephy(tp, 0x11, 0x0a10);
5141
5142         /* Wait for signal to stabilize */
5143         /* XXX schedule_timeout() ... */
5144         for (i = 0; i < 15000; i++)
5145                 udelay(10);
5146
5147         /* Deselect the channel register so we can read the PHYID
5148          * later.
5149          */
5150         tg3_writephy(tp, 0x10, 0x8011);
5151 }
5152
5153 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5154 {
5155         u16 flowctrl;
5156         u32 sg_dig_ctrl, sg_dig_status;
5157         u32 serdes_cfg, expected_sg_dig_ctrl;
5158         int workaround, port_a;
5159         int current_link_up;
5160
5161         serdes_cfg = 0;
5162         expected_sg_dig_ctrl = 0;
5163         workaround = 0;
5164         port_a = 1;
5165         current_link_up = 0;
5166
5167         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5168             tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5169                 workaround = 1;
5170                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5171                         port_a = 0;
5172
5173                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5174                 /* preserve bits 20-23 for voltage regulator */
5175                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5176         }
5177
5178         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5179
5180         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5181                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5182                         if (workaround) {
5183                                 u32 val = serdes_cfg;
5184
5185                                 if (port_a)
5186                                         val |= 0xc010000;
5187                                 else
5188                                         val |= 0x4010000;
5189                                 tw32_f(MAC_SERDES_CFG, val);
5190                         }
5191
5192                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5193                 }
5194                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5195                         tg3_setup_flow_control(tp, 0, 0);
5196                         current_link_up = 1;
5197                 }
5198                 goto out;
5199         }
5200
5201         /* Want auto-negotiation.  */
5202         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5203
5204         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5205         if (flowctrl & ADVERTISE_1000XPAUSE)
5206                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5207         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5208                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5209
5210         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5211                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5212                     tp->serdes_counter &&
5213                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5214                                     MAC_STATUS_RCVD_CFG)) ==
5215                      MAC_STATUS_PCS_SYNCED)) {
5216                         tp->serdes_counter--;
5217                         current_link_up = 1;
5218                         goto out;
5219                 }
5220 restart_autoneg:
5221                 if (workaround)
5222                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5223                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5224                 udelay(5);
5225                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5226
5227                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5228                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5229         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5230                                  MAC_STATUS_SIGNAL_DET)) {
5231                 sg_dig_status = tr32(SG_DIG_STATUS);
5232                 mac_status = tr32(MAC_STATUS);
5233
5234                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5235                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5236                         u32 local_adv = 0, remote_adv = 0;
5237
5238                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5239                                 local_adv |= ADVERTISE_1000XPAUSE;
5240                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5241                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5242
5243                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5244                                 remote_adv |= LPA_1000XPAUSE;
5245                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5246                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5247
5248                         tp->link_config.rmt_adv =
5249                                            mii_adv_to_ethtool_adv_x(remote_adv);
5250
5251                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5252                         current_link_up = 1;
5253                         tp->serdes_counter = 0;
5254                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5255                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5256                         if (tp->serdes_counter)
5257                                 tp->serdes_counter--;
5258                         else {
5259                                 if (workaround) {
5260                                         u32 val = serdes_cfg;
5261
5262                                         if (port_a)
5263                                                 val |= 0xc010000;
5264                                         else
5265                                                 val |= 0x4010000;
5266
5267                                         tw32_f(MAC_SERDES_CFG, val);
5268                                 }
5269
5270                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5271                                 udelay(40);
5272
5273                                 /* Link parallel detection - link is up */
5274                                 /* only if we have PCS_SYNC and not */
5275                                 /* receiving config code words */
5276                                 mac_status = tr32(MAC_STATUS);
5277                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5278                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5279                                         tg3_setup_flow_control(tp, 0, 0);
5280                                         current_link_up = 1;
5281                                         tp->phy_flags |=
5282                                                 TG3_PHYFLG_PARALLEL_DETECT;
5283                                         tp->serdes_counter =
5284                                                 SERDES_PARALLEL_DET_TIMEOUT;
5285                                 } else
5286                                         goto restart_autoneg;
5287                         }
5288                 }
5289         } else {
5290                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5291                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5292         }
5293
5294 out:
5295         return current_link_up;
5296 }
5297
5298 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5299 {
5300         int current_link_up = 0;
5301
5302         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5303                 goto out;
5304
5305         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5306                 u32 txflags, rxflags;
5307                 int i;
5308
5309                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5310                         u32 local_adv = 0, remote_adv = 0;
5311
5312                         if (txflags & ANEG_CFG_PS1)
5313                                 local_adv |= ADVERTISE_1000XPAUSE;
5314                         if (txflags & ANEG_CFG_PS2)
5315                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5316
5317                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5318                                 remote_adv |= LPA_1000XPAUSE;
5319                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5320                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5321
5322                         tp->link_config.rmt_adv =
5323                                            mii_adv_to_ethtool_adv_x(remote_adv);
5324
5325                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5326
5327                         current_link_up = 1;
5328                 }
5329                 for (i = 0; i < 30; i++) {
5330                         udelay(20);
5331                         tw32_f(MAC_STATUS,
5332                                (MAC_STATUS_SYNC_CHANGED |
5333                                 MAC_STATUS_CFG_CHANGED));
5334                         udelay(40);
5335                         if ((tr32(MAC_STATUS) &
5336                              (MAC_STATUS_SYNC_CHANGED |
5337                               MAC_STATUS_CFG_CHANGED)) == 0)
5338                                 break;
5339                 }
5340
5341                 mac_status = tr32(MAC_STATUS);
5342                 if (current_link_up == 0 &&
5343                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5344                     !(mac_status & MAC_STATUS_RCVD_CFG))
5345                         current_link_up = 1;
5346         } else {
5347                 tg3_setup_flow_control(tp, 0, 0);
5348
5349                 /* Forcing 1000FD link up. */
5350                 current_link_up = 1;
5351
5352                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5353                 udelay(40);
5354
5355                 tw32_f(MAC_MODE, tp->mac_mode);
5356                 udelay(40);
5357         }
5358
5359 out:
5360         return current_link_up;
5361 }
5362
5363 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5364 {
5365         u32 orig_pause_cfg;
5366         u16 orig_active_speed;
5367         u8 orig_active_duplex;
5368         u32 mac_status;
5369         int current_link_up;
5370         int i;
5371
5372         orig_pause_cfg = tp->link_config.active_flowctrl;
5373         orig_active_speed = tp->link_config.active_speed;
5374         orig_active_duplex = tp->link_config.active_duplex;
5375
5376         if (!tg3_flag(tp, HW_AUTONEG) &&
5377             tp->link_up &&
5378             tg3_flag(tp, INIT_COMPLETE)) {
5379                 mac_status = tr32(MAC_STATUS);
5380                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5381                                MAC_STATUS_SIGNAL_DET |
5382                                MAC_STATUS_CFG_CHANGED |
5383                                MAC_STATUS_RCVD_CFG);
5384                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5385                                    MAC_STATUS_SIGNAL_DET)) {
5386                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5387                                             MAC_STATUS_CFG_CHANGED));
5388                         return 0;
5389                 }
5390         }
5391
5392         tw32_f(MAC_TX_AUTO_NEG, 0);
5393
5394         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5395         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5396         tw32_f(MAC_MODE, tp->mac_mode);
5397         udelay(40);
5398
5399         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5400                 tg3_init_bcm8002(tp);
5401
5402         /* Enable link change event even when serdes polling.  */
5403         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5404         udelay(40);
5405
5406         current_link_up = 0;
5407         tp->link_config.rmt_adv = 0;
5408         mac_status = tr32(MAC_STATUS);
5409
5410         if (tg3_flag(tp, HW_AUTONEG))
5411                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5412         else
5413                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5414
5415         tp->napi[0].hw_status->status =
5416                 (SD_STATUS_UPDATED |
5417                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5418
5419         for (i = 0; i < 100; i++) {
5420                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5421                                     MAC_STATUS_CFG_CHANGED));
5422                 udelay(5);
5423                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5424                                          MAC_STATUS_CFG_CHANGED |
5425                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5426                         break;
5427         }
5428
5429         mac_status = tr32(MAC_STATUS);
5430         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5431                 current_link_up = 0;
5432                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5433                     tp->serdes_counter == 0) {
5434                         tw32_f(MAC_MODE, (tp->mac_mode |
5435                                           MAC_MODE_SEND_CONFIGS));
5436                         udelay(1);
5437                         tw32_f(MAC_MODE, tp->mac_mode);
5438                 }
5439         }
5440
5441         if (current_link_up == 1) {
5442                 tp->link_config.active_speed = SPEED_1000;
5443                 tp->link_config.active_duplex = DUPLEX_FULL;
5444                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5445                                     LED_CTRL_LNKLED_OVERRIDE |
5446                                     LED_CTRL_1000MBPS_ON));
5447         } else {
5448                 tp->link_config.active_speed = SPEED_UNKNOWN;
5449                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5450                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5451                                     LED_CTRL_LNKLED_OVERRIDE |
5452                                     LED_CTRL_TRAFFIC_OVERRIDE));
5453         }
5454
5455         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5456                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5457                 if (orig_pause_cfg != now_pause_cfg ||
5458                     orig_active_speed != tp->link_config.active_speed ||
5459                     orig_active_duplex != tp->link_config.active_duplex)
5460                         tg3_link_report(tp);
5461         }
5462
5463         return 0;
5464 }
5465
5466 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5467 {
5468         int current_link_up = 0, err = 0;
5469         u32 bmsr, bmcr;
5470         u16 current_speed = SPEED_UNKNOWN;
5471         u8 current_duplex = DUPLEX_UNKNOWN;
5472         u32 local_adv, remote_adv, sgsr;
5473
5474         if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5475              tg3_asic_rev(tp) == ASIC_REV_5720) &&
5476              !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5477              (sgsr & SERDES_TG3_SGMII_MODE)) {
5478
5479                 if (force_reset)
5480                         tg3_phy_reset(tp);
5481
5482                 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5483
5484                 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5485                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5486                 } else {
5487                         current_link_up = 1;
5488                         if (sgsr & SERDES_TG3_SPEED_1000) {
5489                                 current_speed = SPEED_1000;
5490                                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5491                         } else if (sgsr & SERDES_TG3_SPEED_100) {
5492                                 current_speed = SPEED_100;
5493                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5494                         } else {
5495                                 current_speed = SPEED_10;
5496                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5497                         }
5498
5499                         if (sgsr & SERDES_TG3_FULL_DUPLEX)
5500                                 current_duplex = DUPLEX_FULL;
5501                         else
5502                                 current_duplex = DUPLEX_HALF;
5503                 }
5504
5505                 tw32_f(MAC_MODE, tp->mac_mode);
5506                 udelay(40);
5507
5508                 tg3_clear_mac_status(tp);
5509
5510                 goto fiber_setup_done;
5511         }
5512
5513         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5514         tw32_f(MAC_MODE, tp->mac_mode);
5515         udelay(40);
5516
5517         tg3_clear_mac_status(tp);
5518
5519         if (force_reset)
5520                 tg3_phy_reset(tp);
5521
5522         tp->link_config.rmt_adv = 0;
5523
5524         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5525         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5526         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5527                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5528                         bmsr |= BMSR_LSTATUS;
5529                 else
5530                         bmsr &= ~BMSR_LSTATUS;
5531         }
5532
5533         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5534
5535         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5536             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5537                 /* do nothing, just check for link up at the end */
5538         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5539                 u32 adv, newadv;
5540
5541                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5542                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5543                                  ADVERTISE_1000XPAUSE |
5544                                  ADVERTISE_1000XPSE_ASYM |
5545                                  ADVERTISE_SLCT);
5546
5547                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5548                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5549
5550                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5551                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5552                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5553                         tg3_writephy(tp, MII_BMCR, bmcr);
5554
5555                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5556                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5557                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5558
5559                         return err;
5560                 }
5561         } else {
5562                 u32 new_bmcr;
5563
5564                 bmcr &= ~BMCR_SPEED1000;
5565                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5566
5567                 if (tp->link_config.duplex == DUPLEX_FULL)
5568                         new_bmcr |= BMCR_FULLDPLX;
5569
5570                 if (new_bmcr != bmcr) {
5571                         /* BMCR_SPEED1000 is a reserved bit that needs
5572                          * to be set on write.
5573                          */
5574                         new_bmcr |= BMCR_SPEED1000;
5575
5576                         /* Force a linkdown */
5577                         if (tp->link_up) {
5578                                 u32 adv;
5579
5580                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5581                                 adv &= ~(ADVERTISE_1000XFULL |
5582                                          ADVERTISE_1000XHALF |
5583                                          ADVERTISE_SLCT);
5584                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5585                                 tg3_writephy(tp, MII_BMCR, bmcr |
5586                                                            BMCR_ANRESTART |
5587                                                            BMCR_ANENABLE);
5588                                 udelay(10);
5589                                 tg3_carrier_off(tp);
5590                         }
5591                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5592                         bmcr = new_bmcr;
5593                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5594                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5595                         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5596                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5597                                         bmsr |= BMSR_LSTATUS;
5598                                 else
5599                                         bmsr &= ~BMSR_LSTATUS;
5600                         }
5601                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5602                 }
5603         }
5604
5605         if (bmsr & BMSR_LSTATUS) {
5606                 current_speed = SPEED_1000;
5607                 current_link_up = 1;
5608                 if (bmcr & BMCR_FULLDPLX)
5609                         current_duplex = DUPLEX_FULL;
5610                 else
5611                         current_duplex = DUPLEX_HALF;
5612
5613                 local_adv = 0;
5614                 remote_adv = 0;
5615
5616                 if (bmcr & BMCR_ANENABLE) {
5617                         u32 common;
5618
5619                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5620                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5621                         common = local_adv & remote_adv;
5622                         if (common & (ADVERTISE_1000XHALF |
5623                                       ADVERTISE_1000XFULL)) {
5624                                 if (common & ADVERTISE_1000XFULL)
5625                                         current_duplex = DUPLEX_FULL;
5626                                 else
5627                                         current_duplex = DUPLEX_HALF;
5628
5629                                 tp->link_config.rmt_adv =
5630                                            mii_adv_to_ethtool_adv_x(remote_adv);
5631                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5632                                 /* Link is up via parallel detect */
5633                         } else {
5634                                 current_link_up = 0;
5635                         }
5636                 }
5637         }
5638
5639 fiber_setup_done:
5640         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5641                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5642
5643         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5644         if (tp->link_config.active_duplex == DUPLEX_HALF)
5645                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5646
5647         tw32_f(MAC_MODE, tp->mac_mode);
5648         udelay(40);
5649
5650         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5651
5652         tp->link_config.active_speed = current_speed;
5653         tp->link_config.active_duplex = current_duplex;
5654
5655         tg3_test_and_report_link_chg(tp, current_link_up);
5656         return err;
5657 }
5658
5659 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5660 {
5661         if (tp->serdes_counter) {
5662                 /* Give autoneg time to complete. */
5663                 tp->serdes_counter--;
5664                 return;
5665         }
5666
5667         if (!tp->link_up &&
5668             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5669                 u32 bmcr;
5670
5671                 tg3_readphy(tp, MII_BMCR, &bmcr);
5672                 if (bmcr & BMCR_ANENABLE) {
5673                         u32 phy1, phy2;
5674
5675                         /* Select shadow register 0x1f */
5676                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5677                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5678
5679                         /* Select expansion interrupt status register */
5680                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5681                                          MII_TG3_DSP_EXP1_INT_STAT);
5682                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5683                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5684
5685                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5686                                 /* We have signal detect and not receiving
5687                                  * config code words, link is up by parallel
5688                                  * detection.
5689                                  */
5690
5691                                 bmcr &= ~BMCR_ANENABLE;
5692                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5693                                 tg3_writephy(tp, MII_BMCR, bmcr);
5694                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5695                         }
5696                 }
5697         } else if (tp->link_up &&
5698                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5699                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5700                 u32 phy2;
5701
5702                 /* Select expansion interrupt status register */
5703                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5704                                  MII_TG3_DSP_EXP1_INT_STAT);
5705                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5706                 if (phy2 & 0x20) {
5707                         u32 bmcr;
5708
5709                         /* Config code words received, turn on autoneg. */
5710                         tg3_readphy(tp, MII_BMCR, &bmcr);
5711                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5712
5713                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5714
5715                 }
5716         }
5717 }
5718
5719 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5720 {
5721         u32 val;
5722         int err;
5723
5724         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5725                 err = tg3_setup_fiber_phy(tp, force_reset);
5726         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5727                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5728         else
5729                 err = tg3_setup_copper_phy(tp, force_reset);
5730
5731         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
5732                 u32 scale;
5733
5734                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5735                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5736                         scale = 65;
5737                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5738                         scale = 6;
5739                 else
5740                         scale = 12;
5741
5742                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5743                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5744                 tw32(GRC_MISC_CFG, val);
5745         }
5746
5747         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5748               (6 << TX_LENGTHS_IPG_SHIFT);
5749         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
5750             tg3_asic_rev(tp) == ASIC_REV_5762)
5751                 val |= tr32(MAC_TX_LENGTHS) &
5752                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5753                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5754
5755         if (tp->link_config.active_speed == SPEED_1000 &&
5756             tp->link_config.active_duplex == DUPLEX_HALF)
5757                 tw32(MAC_TX_LENGTHS, val |
5758                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5759         else
5760                 tw32(MAC_TX_LENGTHS, val |
5761                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5762
5763         if (!tg3_flag(tp, 5705_PLUS)) {
5764                 if (tp->link_up) {
5765                         tw32(HOSTCC_STAT_COAL_TICKS,
5766                              tp->coal.stats_block_coalesce_usecs);
5767                 } else {
5768                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5769                 }
5770         }
5771
5772         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5773                 val = tr32(PCIE_PWR_MGMT_THRESH);
5774                 if (!tp->link_up)
5775                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5776                               tp->pwrmgmt_thresh;
5777                 else
5778                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5779                 tw32(PCIE_PWR_MGMT_THRESH, val);
5780         }
5781
5782         return err;
5783 }
5784
5785 /* tp->lock must be held */
5786 static u64 tg3_refclk_read(struct tg3 *tp)
5787 {
5788         u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
5789         return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
5790 }
5791
5792 /* tp->lock must be held */
5793 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
5794 {
5795         tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
5796         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
5797         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
5798         tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
5799 }
5800
5801 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
5802 static inline void tg3_full_unlock(struct tg3 *tp);
5803 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
5804 {
5805         struct tg3 *tp = netdev_priv(dev);
5806
5807         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5808                                 SOF_TIMESTAMPING_RX_SOFTWARE |
5809                                 SOF_TIMESTAMPING_SOFTWARE    |
5810                                 SOF_TIMESTAMPING_TX_HARDWARE |
5811                                 SOF_TIMESTAMPING_RX_HARDWARE |
5812                                 SOF_TIMESTAMPING_RAW_HARDWARE;
5813
5814         if (tp->ptp_clock)
5815                 info->phc_index = ptp_clock_index(tp->ptp_clock);
5816         else
5817                 info->phc_index = -1;
5818
5819         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
5820
5821         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
5822                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
5823                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
5824                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
5825         return 0;
5826 }
5827
5828 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
5829 {
5830         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5831         bool neg_adj = false;
5832         u32 correction = 0;
5833
5834         if (ppb < 0) {
5835                 neg_adj = true;
5836                 ppb = -ppb;
5837         }
5838
5839         /* Frequency adjustment is performed using hardware with a 24 bit
5840          * accumulator and a programmable correction value. On each clk, the
5841          * correction value gets added to the accumulator and when it
5842          * overflows, the time counter is incremented/decremented.
5843          *
5844          * So conversion from ppb to correction value is
5845          *              ppb * (1 << 24) / 1000000000
5846          */
5847         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
5848                      TG3_EAV_REF_CLK_CORRECT_MASK;
5849
5850         tg3_full_lock(tp, 0);
5851
5852         if (correction)
5853                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
5854                      TG3_EAV_REF_CLK_CORRECT_EN |
5855                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
5856         else
5857                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
5858
5859         tg3_full_unlock(tp);
5860
5861         return 0;
5862 }
5863
5864 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
5865 {
5866         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5867
5868         tg3_full_lock(tp, 0);
5869         tp->ptp_adjust += delta;
5870         tg3_full_unlock(tp);
5871
5872         return 0;
5873 }
5874
5875 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
5876 {
5877         u64 ns;
5878         u32 remainder;
5879         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5880
5881         tg3_full_lock(tp, 0);
5882         ns = tg3_refclk_read(tp);
5883         ns += tp->ptp_adjust;
5884         tg3_full_unlock(tp);
5885
5886         ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
5887         ts->tv_nsec = remainder;
5888
5889         return 0;
5890 }
5891
5892 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
5893                            const struct timespec *ts)
5894 {
5895         u64 ns;
5896         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5897
5898         ns = timespec_to_ns(ts);
5899
5900         tg3_full_lock(tp, 0);
5901         tg3_refclk_write(tp, ns);
5902         tp->ptp_adjust = 0;
5903         tg3_full_unlock(tp);
5904
5905         return 0;
5906 }
5907
5908 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
5909                           struct ptp_clock_request *rq, int on)
5910 {
5911         return -EOPNOTSUPP;
5912 }
5913
5914 static const struct ptp_clock_info tg3_ptp_caps = {
5915         .owner          = THIS_MODULE,
5916         .name           = "tg3 clock",
5917         .max_adj        = 250000000,
5918         .n_alarm        = 0,
5919         .n_ext_ts       = 0,
5920         .n_per_out      = 0,
5921         .pps            = 0,
5922         .adjfreq        = tg3_ptp_adjfreq,
5923         .adjtime        = tg3_ptp_adjtime,
5924         .gettime        = tg3_ptp_gettime,
5925         .settime        = tg3_ptp_settime,
5926         .enable         = tg3_ptp_enable,
5927 };
5928
5929 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
5930                                      struct skb_shared_hwtstamps *timestamp)
5931 {
5932         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
5933         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
5934                                            tp->ptp_adjust);
5935 }
5936
5937 /* tp->lock must be held */
5938 static void tg3_ptp_init(struct tg3 *tp)
5939 {
5940         if (!tg3_flag(tp, PTP_CAPABLE))
5941                 return;
5942
5943         /* Initialize the hardware clock to the system time. */
5944         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
5945         tp->ptp_adjust = 0;
5946         tp->ptp_info = tg3_ptp_caps;
5947 }
5948
5949 /* tp->lock must be held */
5950 static void tg3_ptp_resume(struct tg3 *tp)
5951 {
5952         if (!tg3_flag(tp, PTP_CAPABLE))
5953                 return;
5954
5955         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
5956         tp->ptp_adjust = 0;
5957 }
5958
5959 static void tg3_ptp_fini(struct tg3 *tp)
5960 {
5961         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
5962                 return;
5963
5964         ptp_clock_unregister(tp->ptp_clock);
5965         tp->ptp_clock = NULL;
5966         tp->ptp_adjust = 0;
5967 }
5968
5969 static inline int tg3_irq_sync(struct tg3 *tp)
5970 {
5971         return tp->irq_sync;
5972 }
5973
5974 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5975 {
5976         int i;
5977
5978         dst = (u32 *)((u8 *)dst + off);
5979         for (i = 0; i < len; i += sizeof(u32))
5980                 *dst++ = tr32(off + i);
5981 }
5982
5983 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5984 {
5985         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5986         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5987         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5988         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5989         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5990         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5991         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5992         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5993         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5994         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5995         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5996         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5997         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5998         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5999         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6000         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6001         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6002         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6003         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6004
6005         if (tg3_flag(tp, SUPPORT_MSIX))
6006                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6007
6008         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6009         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6010         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6011         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6012         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6013         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6014         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6015         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6016
6017         if (!tg3_flag(tp, 5705_PLUS)) {
6018                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6019                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6020                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6021         }
6022
6023         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6024         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6025         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6026         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6027         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6028
6029         if (tg3_flag(tp, NVRAM))
6030                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6031 }
6032
6033 static void tg3_dump_state(struct tg3 *tp)
6034 {
6035         int i;
6036         u32 *regs;
6037
6038         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6039         if (!regs)
6040                 return;
6041
6042         if (tg3_flag(tp, PCI_EXPRESS)) {
6043                 /* Read up to but not including private PCI registers */
6044                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6045                         regs[i / sizeof(u32)] = tr32(i);
6046         } else
6047                 tg3_dump_legacy_regs(tp, regs);
6048
6049         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6050                 if (!regs[i + 0] && !regs[i + 1] &&
6051                     !regs[i + 2] && !regs[i + 3])
6052                         continue;
6053
6054                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6055                            i * 4,
6056                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6057         }
6058
6059         kfree(regs);
6060
6061         for (i = 0; i < tp->irq_cnt; i++) {
6062                 struct tg3_napi *tnapi = &tp->napi[i];
6063
6064                 /* SW status block */
6065                 netdev_err(tp->dev,
6066                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6067                            i,
6068                            tnapi->hw_status->status,
6069                            tnapi->hw_status->status_tag,
6070                            tnapi->hw_status->rx_jumbo_consumer,
6071                            tnapi->hw_status->rx_consumer,
6072                            tnapi->hw_status->rx_mini_consumer,
6073                            tnapi->hw_status->idx[0].rx_producer,
6074                            tnapi->hw_status->idx[0].tx_consumer);
6075
6076                 netdev_err(tp->dev,
6077                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6078                            i,
6079                            tnapi->last_tag, tnapi->last_irq_tag,
6080                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6081                            tnapi->rx_rcb_ptr,
6082                            tnapi->prodring.rx_std_prod_idx,
6083                            tnapi->prodring.rx_std_cons_idx,
6084                            tnapi->prodring.rx_jmb_prod_idx,
6085                            tnapi->prodring.rx_jmb_cons_idx);
6086         }
6087 }
6088
6089 /* This is called whenever we suspect that the system chipset is re-
6090  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6091  * is bogus tx completions. We try to recover by setting the
6092  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6093  * in the workqueue.
6094  */
6095 static void tg3_tx_recover(struct tg3 *tp)
6096 {
6097         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6098                tp->write32_tx_mbox == tg3_write_indirect_mbox);
6099
6100         netdev_warn(tp->dev,
6101                     "The system may be re-ordering memory-mapped I/O "
6102                     "cycles to the network device, attempting to recover. "
6103                     "Please report the problem to the driver maintainer "
6104                     "and include system chipset information.\n");
6105
6106         spin_lock(&tp->lock);
6107         tg3_flag_set(tp, TX_RECOVERY_PENDING);
6108         spin_unlock(&tp->lock);
6109 }
6110
6111 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6112 {
6113         /* Tell compiler to fetch tx indices from memory. */
6114         barrier();
6115         return tnapi->tx_pending -
6116                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6117 }
6118
6119 /* Tigon3 never reports partial packet sends.  So we do not
6120  * need special logic to handle SKBs that have not had all
6121  * of their frags sent yet, like SunGEM does.
6122  */
6123 static void tg3_tx(struct tg3_napi *tnapi)
6124 {
6125         struct tg3 *tp = tnapi->tp;
6126         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6127         u32 sw_idx = tnapi->tx_cons;
6128         struct netdev_queue *txq;
6129         int index = tnapi - tp->napi;
6130         unsigned int pkts_compl = 0, bytes_compl = 0;
6131
6132         if (tg3_flag(tp, ENABLE_TSS))
6133                 index--;
6134
6135         txq = netdev_get_tx_queue(tp->dev, index);
6136
6137         while (sw_idx != hw_idx) {
6138                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6139                 struct sk_buff *skb = ri->skb;
6140                 int i, tx_bug = 0;
6141
6142                 if (unlikely(skb == NULL)) {
6143                         tg3_tx_recover(tp);
6144                         return;
6145                 }
6146
6147                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6148                         struct skb_shared_hwtstamps timestamp;
6149                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6150                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6151
6152                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6153
6154                         skb_tstamp_tx(skb, &timestamp);
6155                 }
6156
6157                 pci_unmap_single(tp->pdev,
6158                                  dma_unmap_addr(ri, mapping),
6159                                  skb_headlen(skb),
6160                                  PCI_DMA_TODEVICE);
6161
6162                 ri->skb = NULL;
6163
6164                 while (ri->fragmented) {
6165                         ri->fragmented = false;
6166                         sw_idx = NEXT_TX(sw_idx);
6167                         ri = &tnapi->tx_buffers[sw_idx];
6168                 }
6169
6170                 sw_idx = NEXT_TX(sw_idx);
6171
6172                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6173                         ri = &tnapi->tx_buffers[sw_idx];
6174                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6175                                 tx_bug = 1;
6176
6177                         pci_unmap_page(tp->pdev,
6178                                        dma_unmap_addr(ri, mapping),
6179                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
6180                                        PCI_DMA_TODEVICE);
6181
6182                         while (ri->fragmented) {
6183                                 ri->fragmented = false;
6184                                 sw_idx = NEXT_TX(sw_idx);
6185                                 ri = &tnapi->tx_buffers[sw_idx];
6186                         }
6187
6188                         sw_idx = NEXT_TX(sw_idx);
6189                 }
6190
6191                 pkts_compl++;
6192                 bytes_compl += skb->len;
6193
6194                 dev_kfree_skb(skb);
6195
6196                 if (unlikely(tx_bug)) {
6197                         tg3_tx_recover(tp);
6198                         return;
6199                 }
6200         }
6201
6202         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6203
6204         tnapi->tx_cons = sw_idx;
6205
6206         /* Need to make the tx_cons update visible to tg3_start_xmit()
6207          * before checking for netif_queue_stopped().  Without the
6208          * memory barrier, there is a small possibility that tg3_start_xmit()
6209          * will miss it and cause the queue to be stopped forever.
6210          */
6211         smp_mb();
6212
6213         if (unlikely(netif_tx_queue_stopped(txq) &&
6214                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6215                 __netif_tx_lock(txq, smp_processor_id());
6216                 if (netif_tx_queue_stopped(txq) &&
6217                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6218                         netif_tx_wake_queue(txq);
6219                 __netif_tx_unlock(txq);
6220         }
6221 }
6222
6223 static void tg3_frag_free(bool is_frag, void *data)
6224 {
6225         if (is_frag)
6226                 put_page(virt_to_head_page(data));
6227         else
6228                 kfree(data);
6229 }
6230
6231 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6232 {
6233         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6234                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6235
6236         if (!ri->data)
6237                 return;
6238
6239         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6240                          map_sz, PCI_DMA_FROMDEVICE);
6241         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6242         ri->data = NULL;
6243 }
6244
6245
6246 /* Returns size of skb allocated or < 0 on error.
6247  *
6248  * We only need to fill in the address because the other members
6249  * of the RX descriptor are invariant, see tg3_init_rings.
6250  *
6251  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6252  * posting buffers we only dirty the first cache line of the RX
6253  * descriptor (containing the address).  Whereas for the RX status
6254  * buffers the cpu only reads the last cacheline of the RX descriptor
6255  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6256  */
6257 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6258                              u32 opaque_key, u32 dest_idx_unmasked,
6259                              unsigned int *frag_size)
6260 {
6261         struct tg3_rx_buffer_desc *desc;
6262         struct ring_info *map;
6263         u8 *data;
6264         dma_addr_t mapping;
6265         int skb_size, data_size, dest_idx;
6266
6267         switch (opaque_key) {
6268         case RXD_OPAQUE_RING_STD:
6269                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6270                 desc = &tpr->rx_std[dest_idx];
6271                 map = &tpr->rx_std_buffers[dest_idx];
6272                 data_size = tp->rx_pkt_map_sz;
6273                 break;
6274
6275         case RXD_OPAQUE_RING_JUMBO:
6276                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6277                 desc = &tpr->rx_jmb[dest_idx].std;
6278                 map = &tpr->rx_jmb_buffers[dest_idx];
6279                 data_size = TG3_RX_JMB_MAP_SZ;
6280                 break;
6281
6282         default:
6283                 return -EINVAL;
6284         }
6285
6286         /* Do not overwrite any of the map or rp information
6287          * until we are sure we can commit to a new buffer.
6288          *
6289          * Callers depend upon this behavior and assume that
6290          * we leave everything unchanged if we fail.
6291          */
6292         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6293                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6294         if (skb_size <= PAGE_SIZE) {
6295                 data = netdev_alloc_frag(skb_size);
6296                 *frag_size = skb_size;
6297         } else {
6298                 data = kmalloc(skb_size, GFP_ATOMIC);
6299                 *frag_size = 0;
6300         }
6301         if (!data)
6302                 return -ENOMEM;
6303
6304         mapping = pci_map_single(tp->pdev,
6305                                  data + TG3_RX_OFFSET(tp),
6306                                  data_size,
6307                                  PCI_DMA_FROMDEVICE);
6308         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6309                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6310                 return -EIO;
6311         }
6312
6313         map->data = data;
6314         dma_unmap_addr_set(map, mapping, mapping);
6315
6316         desc->addr_hi = ((u64)mapping >> 32);
6317         desc->addr_lo = ((u64)mapping & 0xffffffff);
6318
6319         return data_size;
6320 }
6321
6322 /* We only need to move over in the address because the other
6323  * members of the RX descriptor are invariant.  See notes above
6324  * tg3_alloc_rx_data for full details.
6325  */
6326 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6327                            struct tg3_rx_prodring_set *dpr,
6328                            u32 opaque_key, int src_idx,
6329                            u32 dest_idx_unmasked)
6330 {
6331         struct tg3 *tp = tnapi->tp;
6332         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6333         struct ring_info *src_map, *dest_map;
6334         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6335         int dest_idx;
6336
6337         switch (opaque_key) {
6338         case RXD_OPAQUE_RING_STD:
6339                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6340                 dest_desc = &dpr->rx_std[dest_idx];
6341                 dest_map = &dpr->rx_std_buffers[dest_idx];
6342                 src_desc = &spr->rx_std[src_idx];
6343                 src_map = &spr->rx_std_buffers[src_idx];
6344                 break;
6345
6346         case RXD_OPAQUE_RING_JUMBO:
6347                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6348                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6349                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6350                 src_desc = &spr->rx_jmb[src_idx].std;
6351                 src_map = &spr->rx_jmb_buffers[src_idx];
6352                 break;
6353
6354         default:
6355                 return;
6356         }
6357
6358         dest_map->data = src_map->data;
6359         dma_unmap_addr_set(dest_map, mapping,
6360                            dma_unmap_addr(src_map, mapping));
6361         dest_desc->addr_hi = src_desc->addr_hi;
6362         dest_desc->addr_lo = src_desc->addr_lo;
6363
6364         /* Ensure that the update to the skb happens after the physical
6365          * addresses have been transferred to the new BD location.
6366          */
6367         smp_wmb();
6368
6369         src_map->data = NULL;
6370 }
6371
6372 /* The RX ring scheme is composed of multiple rings which post fresh
6373  * buffers to the chip, and one special ring the chip uses to report
6374  * status back to the host.
6375  *
6376  * The special ring reports the status of received packets to the
6377  * host.  The chip does not write into the original descriptor the
6378  * RX buffer was obtained from.  The chip simply takes the original
6379  * descriptor as provided by the host, updates the status and length
6380  * field, then writes this into the next status ring entry.
6381  *
6382  * Each ring the host uses to post buffers to the chip is described
6383  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6384  * it is first placed into the on-chip ram.  When the packet's length
6385  * is known, it walks down the TG3_BDINFO entries to select the ring.
6386  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6387  * which is within the range of the new packet's length is chosen.
6388  *
6389  * The "separate ring for rx status" scheme may sound queer, but it makes
6390  * sense from a cache coherency perspective.  If only the host writes
6391  * to the buffer post rings, and only the chip writes to the rx status
6392  * rings, then cache lines never move beyond shared-modified state.
6393  * If both the host and chip were to write into the same ring, cache line
6394  * eviction could occur since both entities want it in an exclusive state.
6395  */
6396 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6397 {
6398         struct tg3 *tp = tnapi->tp;
6399         u32 work_mask, rx_std_posted = 0;
6400         u32 std_prod_idx, jmb_prod_idx;
6401         u32 sw_idx = tnapi->rx_rcb_ptr;
6402         u16 hw_idx;
6403         int received;
6404         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6405
6406         hw_idx = *(tnapi->rx_rcb_prod_idx);
6407         /*
6408          * We need to order the read of hw_idx and the read of
6409          * the opaque cookie.
6410          */
6411         rmb();
6412         work_mask = 0;
6413         received = 0;
6414         std_prod_idx = tpr->rx_std_prod_idx;
6415         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6416         while (sw_idx != hw_idx && budget > 0) {
6417                 struct ring_info *ri;
6418                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6419                 unsigned int len;
6420                 struct sk_buff *skb;
6421                 dma_addr_t dma_addr;
6422                 u32 opaque_key, desc_idx, *post_ptr;
6423                 u8 *data;
6424                 u64 tstamp = 0;
6425
6426                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6427                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6428                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6429                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6430                         dma_addr = dma_unmap_addr(ri, mapping);
6431                         data = ri->data;
6432                         post_ptr = &std_prod_idx;
6433                         rx_std_posted++;
6434                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6435                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6436                         dma_addr = dma_unmap_addr(ri, mapping);
6437                         data = ri->data;
6438                         post_ptr = &jmb_prod_idx;
6439                 } else
6440                         goto next_pkt_nopost;
6441
6442                 work_mask |= opaque_key;
6443
6444                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6445                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6446                 drop_it:
6447                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6448                                        desc_idx, *post_ptr);
6449                 drop_it_no_recycle:
6450                         /* Other statistics kept track of by card. */
6451                         tp->rx_dropped++;
6452                         goto next_pkt;
6453                 }
6454
6455                 prefetch(data + TG3_RX_OFFSET(tp));
6456                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6457                       ETH_FCS_LEN;
6458
6459                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6460                      RXD_FLAG_PTPSTAT_PTPV1 ||
6461                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6462                      RXD_FLAG_PTPSTAT_PTPV2) {
6463                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6464                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6465                 }
6466
6467                 if (len > TG3_RX_COPY_THRESH(tp)) {
6468                         int skb_size;
6469                         unsigned int frag_size;
6470
6471                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6472                                                     *post_ptr, &frag_size);
6473                         if (skb_size < 0)
6474                                 goto drop_it;
6475
6476                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6477                                          PCI_DMA_FROMDEVICE);
6478
6479                         skb = build_skb(data, frag_size);
6480                         if (!skb) {
6481                                 tg3_frag_free(frag_size != 0, data);
6482                                 goto drop_it_no_recycle;
6483                         }
6484                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6485                         /* Ensure that the update to the data happens
6486                          * after the usage of the old DMA mapping.
6487                          */
6488                         smp_wmb();
6489
6490                         ri->data = NULL;
6491
6492                 } else {
6493                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6494                                        desc_idx, *post_ptr);
6495
6496                         skb = netdev_alloc_skb(tp->dev,
6497                                                len + TG3_RAW_IP_ALIGN);
6498                         if (skb == NULL)
6499                                 goto drop_it_no_recycle;
6500
6501                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6502                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6503                         memcpy(skb->data,
6504                                data + TG3_RX_OFFSET(tp),
6505                                len);
6506                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6507                 }
6508
6509                 skb_put(skb, len);
6510                 if (tstamp)
6511                         tg3_hwclock_to_timestamp(tp, tstamp,
6512                                                  skb_hwtstamps(skb));
6513
6514                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6515                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6516                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6517                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6518                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6519                 else
6520                         skb_checksum_none_assert(skb);
6521
6522                 skb->protocol = eth_type_trans(skb, tp->dev);
6523
6524                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6525                     skb->protocol != htons(ETH_P_8021Q)) {
6526                         dev_kfree_skb(skb);
6527                         goto drop_it_no_recycle;
6528                 }
6529
6530                 if (desc->type_flags & RXD_FLAG_VLAN &&
6531                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6532                         __vlan_hwaccel_put_tag(skb,
6533                                                desc->err_vlan & RXD_VLAN_MASK);
6534
6535                 napi_gro_receive(&tnapi->napi, skb);
6536
6537                 received++;
6538                 budget--;
6539
6540 next_pkt:
6541                 (*post_ptr)++;
6542
6543                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6544                         tpr->rx_std_prod_idx = std_prod_idx &
6545                                                tp->rx_std_ring_mask;
6546                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6547                                      tpr->rx_std_prod_idx);
6548                         work_mask &= ~RXD_OPAQUE_RING_STD;
6549                         rx_std_posted = 0;
6550                 }
6551 next_pkt_nopost:
6552                 sw_idx++;
6553                 sw_idx &= tp->rx_ret_ring_mask;
6554
6555                 /* Refresh hw_idx to see if there is new work */
6556                 if (sw_idx == hw_idx) {
6557                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6558                         rmb();
6559                 }
6560         }
6561
6562         /* ACK the status ring. */
6563         tnapi->rx_rcb_ptr = sw_idx;
6564         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6565
6566         /* Refill RX ring(s). */
6567         if (!tg3_flag(tp, ENABLE_RSS)) {
6568                 /* Sync BD data before updating mailbox */
6569                 wmb();
6570
6571                 if (work_mask & RXD_OPAQUE_RING_STD) {
6572                         tpr->rx_std_prod_idx = std_prod_idx &
6573                                                tp->rx_std_ring_mask;
6574                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6575                                      tpr->rx_std_prod_idx);
6576                 }
6577                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6578                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6579                                                tp->rx_jmb_ring_mask;
6580                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6581                                      tpr->rx_jmb_prod_idx);
6582                 }
6583                 mmiowb();
6584         } else if (work_mask) {
6585                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6586                  * updated before the producer indices can be updated.
6587                  */
6588                 smp_wmb();
6589
6590                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6591                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6592
6593                 if (tnapi != &tp->napi[1]) {
6594                         tp->rx_refill = true;
6595                         napi_schedule(&tp->napi[1].napi);
6596                 }
6597         }
6598
6599         return received;
6600 }
6601
6602 static void tg3_poll_link(struct tg3 *tp)
6603 {
6604         /* handle link change and other phy events */
6605         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6606                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6607
6608                 if (sblk->status & SD_STATUS_LINK_CHG) {
6609                         sblk->status = SD_STATUS_UPDATED |
6610                                        (sblk->status & ~SD_STATUS_LINK_CHG);
6611                         spin_lock(&tp->lock);
6612                         if (tg3_flag(tp, USE_PHYLIB)) {
6613                                 tw32_f(MAC_STATUS,
6614                                      (MAC_STATUS_SYNC_CHANGED |
6615                                       MAC_STATUS_CFG_CHANGED |
6616                                       MAC_STATUS_MI_COMPLETION |
6617                                       MAC_STATUS_LNKSTATE_CHANGED));
6618                                 udelay(40);
6619                         } else
6620                                 tg3_setup_phy(tp, 0);
6621                         spin_unlock(&tp->lock);
6622                 }
6623         }
6624 }
6625
6626 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6627                                 struct tg3_rx_prodring_set *dpr,
6628                                 struct tg3_rx_prodring_set *spr)
6629 {
6630         u32 si, di, cpycnt, src_prod_idx;
6631         int i, err = 0;
6632
6633         while (1) {
6634                 src_prod_idx = spr->rx_std_prod_idx;
6635
6636                 /* Make sure updates to the rx_std_buffers[] entries and the
6637                  * standard producer index are seen in the correct order.
6638                  */
6639                 smp_rmb();
6640
6641                 if (spr->rx_std_cons_idx == src_prod_idx)
6642                         break;
6643
6644                 if (spr->rx_std_cons_idx < src_prod_idx)
6645                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6646                 else
6647                         cpycnt = tp->rx_std_ring_mask + 1 -
6648                                  spr->rx_std_cons_idx;
6649
6650                 cpycnt = min(cpycnt,
6651                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6652
6653                 si = spr->rx_std_cons_idx;
6654                 di = dpr->rx_std_prod_idx;
6655
6656                 for (i = di; i < di + cpycnt; i++) {
6657                         if (dpr->rx_std_buffers[i].data) {
6658                                 cpycnt = i - di;
6659                                 err = -ENOSPC;
6660                                 break;
6661                         }
6662                 }
6663
6664                 if (!cpycnt)
6665                         break;
6666
6667                 /* Ensure that updates to the rx_std_buffers ring and the
6668                  * shadowed hardware producer ring from tg3_recycle_skb() are
6669                  * ordered correctly WRT the skb check above.
6670                  */
6671                 smp_rmb();
6672
6673                 memcpy(&dpr->rx_std_buffers[di],
6674                        &spr->rx_std_buffers[si],
6675                        cpycnt * sizeof(struct ring_info));
6676
6677                 for (i = 0; i < cpycnt; i++, di++, si++) {
6678                         struct tg3_rx_buffer_desc *sbd, *dbd;
6679                         sbd = &spr->rx_std[si];
6680                         dbd = &dpr->rx_std[di];
6681                         dbd->addr_hi = sbd->addr_hi;
6682                         dbd->addr_lo = sbd->addr_lo;
6683                 }
6684
6685                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6686                                        tp->rx_std_ring_mask;
6687                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6688                                        tp->rx_std_ring_mask;
6689         }
6690
6691         while (1) {
6692                 src_prod_idx = spr->rx_jmb_prod_idx;
6693
6694                 /* Make sure updates to the rx_jmb_buffers[] entries and
6695                  * the jumbo producer index are seen in the correct order.
6696                  */
6697                 smp_rmb();
6698
6699                 if (spr->rx_jmb_cons_idx == src_prod_idx)
6700                         break;
6701
6702                 if (spr->rx_jmb_cons_idx < src_prod_idx)
6703                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6704                 else
6705                         cpycnt = tp->rx_jmb_ring_mask + 1 -
6706                                  spr->rx_jmb_cons_idx;
6707
6708                 cpycnt = min(cpycnt,
6709                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6710
6711                 si = spr->rx_jmb_cons_idx;
6712                 di = dpr->rx_jmb_prod_idx;
6713
6714                 for (i = di; i < di + cpycnt; i++) {
6715                         if (dpr->rx_jmb_buffers[i].data) {
6716                                 cpycnt = i - di;
6717                                 err = -ENOSPC;
6718                                 break;
6719                         }
6720                 }
6721
6722                 if (!cpycnt)
6723                         break;
6724
6725                 /* Ensure that updates to the rx_jmb_buffers ring and the
6726                  * shadowed hardware producer ring from tg3_recycle_skb() are
6727                  * ordered correctly WRT the skb check above.
6728                  */
6729                 smp_rmb();
6730
6731                 memcpy(&dpr->rx_jmb_buffers[di],
6732                        &spr->rx_jmb_buffers[si],
6733                        cpycnt * sizeof(struct ring_info));
6734
6735                 for (i = 0; i < cpycnt; i++, di++, si++) {
6736                         struct tg3_rx_buffer_desc *sbd, *dbd;
6737                         sbd = &spr->rx_jmb[si].std;
6738                         dbd = &dpr->rx_jmb[di].std;
6739                         dbd->addr_hi = sbd->addr_hi;
6740                         dbd->addr_lo = sbd->addr_lo;
6741                 }
6742
6743                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6744                                        tp->rx_jmb_ring_mask;
6745                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6746                                        tp->rx_jmb_ring_mask;
6747         }
6748
6749         return err;
6750 }
6751
6752 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6753 {
6754         struct tg3 *tp = tnapi->tp;
6755
6756         /* run TX completion thread */
6757         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6758                 tg3_tx(tnapi);
6759                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6760                         return work_done;
6761         }
6762
6763         if (!tnapi->rx_rcb_prod_idx)
6764                 return work_done;
6765
6766         /* run RX thread, within the bounds set by NAPI.
6767          * All RX "locking" is done by ensuring outside
6768          * code synchronizes with tg3->napi.poll()
6769          */
6770         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6771                 work_done += tg3_rx(tnapi, budget - work_done);
6772
6773         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6774                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6775                 int i, err = 0;
6776                 u32 std_prod_idx = dpr->rx_std_prod_idx;
6777                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6778
6779                 tp->rx_refill = false;
6780                 for (i = 1; i <= tp->rxq_cnt; i++)
6781                         err |= tg3_rx_prodring_xfer(tp, dpr,
6782                                                     &tp->napi[i].prodring);
6783
6784                 wmb();
6785
6786                 if (std_prod_idx != dpr->rx_std_prod_idx)
6787                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6788                                      dpr->rx_std_prod_idx);
6789
6790                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6791                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6792                                      dpr->rx_jmb_prod_idx);
6793
6794                 mmiowb();
6795
6796                 if (err)
6797                         tw32_f(HOSTCC_MODE, tp->coal_now);
6798         }
6799
6800         return work_done;
6801 }
6802
6803 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6804 {
6805         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6806                 schedule_work(&tp->reset_task);
6807 }
6808
6809 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6810 {
6811         cancel_work_sync(&tp->reset_task);
6812         tg3_flag_clear(tp, RESET_TASK_PENDING);
6813         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6814 }
6815
6816 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6817 {
6818         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6819         struct tg3 *tp = tnapi->tp;
6820         int work_done = 0;
6821         struct tg3_hw_status *sblk = tnapi->hw_status;
6822
6823         while (1) {
6824                 work_done = tg3_poll_work(tnapi, work_done, budget);
6825
6826                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6827                         goto tx_recovery;
6828
6829                 if (unlikely(work_done >= budget))
6830                         break;
6831
6832                 /* tp->last_tag is used in tg3_int_reenable() below
6833                  * to tell the hw how much work has been processed,
6834                  * so we must read it before checking for more work.
6835                  */
6836                 tnapi->last_tag = sblk->status_tag;
6837                 tnapi->last_irq_tag = tnapi->last_tag;
6838                 rmb();
6839
6840                 /* check for RX/TX work to do */
6841                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6842                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6843
6844                         /* This test here is not race free, but will reduce
6845                          * the number of interrupts by looping again.
6846                          */
6847                         if (tnapi == &tp->napi[1] && tp->rx_refill)
6848                                 continue;
6849
6850                         napi_complete(napi);
6851                         /* Reenable interrupts. */
6852                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6853
6854                         /* This test here is synchronized by napi_schedule()
6855                          * and napi_complete() to close the race condition.
6856                          */
6857                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6858                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
6859                                                   HOSTCC_MODE_ENABLE |
6860                                                   tnapi->coal_now);
6861                         }
6862                         mmiowb();
6863                         break;
6864                 }
6865         }
6866
6867         return work_done;
6868
6869 tx_recovery:
6870         /* work_done is guaranteed to be less than budget. */
6871         napi_complete(napi);
6872         tg3_reset_task_schedule(tp);
6873         return work_done;
6874 }
6875
6876 static void tg3_process_error(struct tg3 *tp)
6877 {
6878         u32 val;
6879         bool real_error = false;
6880
6881         if (tg3_flag(tp, ERROR_PROCESSED))
6882                 return;
6883
6884         /* Check Flow Attention register */
6885         val = tr32(HOSTCC_FLOW_ATTN);
6886         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6887                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6888                 real_error = true;
6889         }
6890
6891         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6892                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6893                 real_error = true;
6894         }
6895
6896         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6897                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6898                 real_error = true;
6899         }
6900
6901         if (!real_error)
6902                 return;
6903
6904         tg3_dump_state(tp);
6905
6906         tg3_flag_set(tp, ERROR_PROCESSED);
6907         tg3_reset_task_schedule(tp);
6908 }
6909
6910 static int tg3_poll(struct napi_struct *napi, int budget)
6911 {
6912         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6913         struct tg3 *tp = tnapi->tp;
6914         int work_done = 0;
6915         struct tg3_hw_status *sblk = tnapi->hw_status;
6916
6917         while (1) {
6918                 if (sblk->status & SD_STATUS_ERROR)
6919                         tg3_process_error(tp);
6920
6921                 tg3_poll_link(tp);
6922
6923                 work_done = tg3_poll_work(tnapi, work_done, budget);
6924
6925                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6926                         goto tx_recovery;
6927
6928                 if (unlikely(work_done >= budget))
6929                         break;
6930
6931                 if (tg3_flag(tp, TAGGED_STATUS)) {
6932                         /* tp->last_tag is used in tg3_int_reenable() below
6933                          * to tell the hw how much work has been processed,
6934                          * so we must read it before checking for more work.
6935                          */
6936                         tnapi->last_tag = sblk->status_tag;
6937                         tnapi->last_irq_tag = tnapi->last_tag;
6938                         rmb();
6939                 } else
6940                         sblk->status &= ~SD_STATUS_UPDATED;
6941
6942                 if (likely(!tg3_has_work(tnapi))) {
6943                         napi_complete(napi);
6944                         tg3_int_reenable(tnapi);
6945                         break;
6946                 }
6947         }
6948
6949         return work_done;
6950
6951 tx_recovery:
6952         /* work_done is guaranteed to be less than budget. */
6953         napi_complete(napi);
6954         tg3_reset_task_schedule(tp);
6955         return work_done;
6956 }
6957
6958 static void tg3_napi_disable(struct tg3 *tp)
6959 {
6960         int i;
6961
6962         for (i = tp->irq_cnt - 1; i >= 0; i--)
6963                 napi_disable(&tp->napi[i].napi);
6964 }
6965
6966 static void tg3_napi_enable(struct tg3 *tp)
6967 {
6968         int i;
6969
6970         for (i = 0; i < tp->irq_cnt; i++)
6971                 napi_enable(&tp->napi[i].napi);
6972 }
6973
6974 static void tg3_napi_init(struct tg3 *tp)
6975 {
6976         int i;
6977
6978         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6979         for (i = 1; i < tp->irq_cnt; i++)
6980                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6981 }
6982
6983 static void tg3_napi_fini(struct tg3 *tp)
6984 {
6985         int i;
6986
6987         for (i = 0; i < tp->irq_cnt; i++)
6988                 netif_napi_del(&tp->napi[i].napi);
6989 }
6990
6991 static inline void tg3_netif_stop(struct tg3 *tp)
6992 {
6993         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6994         tg3_napi_disable(tp);
6995         netif_carrier_off(tp->dev);
6996         netif_tx_disable(tp->dev);
6997 }
6998
6999 /* tp->lock must be held */
7000 static inline void tg3_netif_start(struct tg3 *tp)
7001 {
7002         tg3_ptp_resume(tp);
7003
7004         /* NOTE: unconditional netif_tx_wake_all_queues is only
7005          * appropriate so long as all callers are assured to
7006          * have free tx slots (such as after tg3_init_hw)
7007          */
7008         netif_tx_wake_all_queues(tp->dev);
7009
7010         if (tp->link_up)
7011                 netif_carrier_on(tp->dev);
7012
7013         tg3_napi_enable(tp);
7014         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7015         tg3_enable_ints(tp);
7016 }
7017
7018 static void tg3_irq_quiesce(struct tg3 *tp)
7019 {
7020         int i;
7021
7022         BUG_ON(tp->irq_sync);
7023
7024         tp->irq_sync = 1;
7025         smp_mb();
7026
7027         for (i = 0; i < tp->irq_cnt; i++)
7028                 synchronize_irq(tp->napi[i].irq_vec);
7029 }
7030
7031 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7032  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7033  * with as well.  Most of the time, this is not necessary except when
7034  * shutting down the device.
7035  */
7036 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7037 {
7038         spin_lock_bh(&tp->lock);
7039         if (irq_sync)
7040                 tg3_irq_quiesce(tp);
7041 }
7042
7043 static inline void tg3_full_unlock(struct tg3 *tp)
7044 {
7045         spin_unlock_bh(&tp->lock);
7046 }
7047
7048 /* One-shot MSI handler - Chip automatically disables interrupt
7049  * after sending MSI so driver doesn't have to do it.
7050  */
7051 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7052 {
7053         struct tg3_napi *tnapi = dev_id;
7054         struct tg3 *tp = tnapi->tp;
7055
7056         prefetch(tnapi->hw_status);
7057         if (tnapi->rx_rcb)
7058                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7059
7060         if (likely(!tg3_irq_sync(tp)))
7061                 napi_schedule(&tnapi->napi);
7062
7063         return IRQ_HANDLED;
7064 }
7065
7066 /* MSI ISR - No need to check for interrupt sharing and no need to
7067  * flush status block and interrupt mailbox. PCI ordering rules
7068  * guarantee that MSI will arrive after the status block.
7069  */
7070 static irqreturn_t tg3_msi(int irq, void *dev_id)
7071 {
7072         struct tg3_napi *tnapi = dev_id;
7073         struct tg3 *tp = tnapi->tp;
7074
7075         prefetch(tnapi->hw_status);
7076         if (tnapi->rx_rcb)
7077                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7078         /*
7079          * Writing any value to intr-mbox-0 clears PCI INTA# and
7080          * chip-internal interrupt pending events.
7081          * Writing non-zero to intr-mbox-0 additional tells the
7082          * NIC to stop sending us irqs, engaging "in-intr-handler"
7083          * event coalescing.
7084          */
7085         tw32_mailbox(tnapi->int_mbox, 0x00000001);
7086         if (likely(!tg3_irq_sync(tp)))
7087                 napi_schedule(&tnapi->napi);
7088
7089         return IRQ_RETVAL(1);
7090 }
7091
7092 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7093 {
7094         struct tg3_napi *tnapi = dev_id;
7095         struct tg3 *tp = tnapi->tp;
7096         struct tg3_hw_status *sblk = tnapi->hw_status;
7097         unsigned int handled = 1;
7098
7099         /* In INTx mode, it is possible for the interrupt to arrive at
7100          * the CPU before the status block posted prior to the interrupt.
7101          * Reading the PCI State register will confirm whether the
7102          * interrupt is ours and will flush the status block.
7103          */
7104         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7105                 if (tg3_flag(tp, CHIP_RESETTING) ||
7106                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7107                         handled = 0;
7108                         goto out;
7109                 }
7110         }
7111
7112         /*
7113          * Writing any value to intr-mbox-0 clears PCI INTA# and
7114          * chip-internal interrupt pending events.
7115          * Writing non-zero to intr-mbox-0 additional tells the
7116          * NIC to stop sending us irqs, engaging "in-intr-handler"
7117          * event coalescing.
7118          *
7119          * Flush the mailbox to de-assert the IRQ immediately to prevent
7120          * spurious interrupts.  The flush impacts performance but
7121          * excessive spurious interrupts can be worse in some cases.
7122          */
7123         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7124         if (tg3_irq_sync(tp))
7125                 goto out;
7126         sblk->status &= ~SD_STATUS_UPDATED;
7127         if (likely(tg3_has_work(tnapi))) {
7128                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7129                 napi_schedule(&tnapi->napi);
7130         } else {
7131                 /* No work, shared interrupt perhaps?  re-enable
7132                  * interrupts, and flush that PCI write
7133                  */
7134                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7135                                0x00000000);
7136         }
7137 out:
7138         return IRQ_RETVAL(handled);
7139 }
7140
7141 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7142 {
7143         struct tg3_napi *tnapi = dev_id;
7144         struct tg3 *tp = tnapi->tp;
7145         struct tg3_hw_status *sblk = tnapi->hw_status;
7146         unsigned int handled = 1;
7147
7148         /* In INTx mode, it is possible for the interrupt to arrive at
7149          * the CPU before the status block posted prior to the interrupt.
7150          * Reading the PCI State register will confirm whether the
7151          * interrupt is ours and will flush the status block.
7152          */
7153         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7154                 if (tg3_flag(tp, CHIP_RESETTING) ||
7155                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7156                         handled = 0;
7157                         goto out;
7158                 }
7159         }
7160
7161         /*
7162          * writing any value to intr-mbox-0 clears PCI INTA# and
7163          * chip-internal interrupt pending events.
7164          * writing non-zero to intr-mbox-0 additional tells the
7165          * NIC to stop sending us irqs, engaging "in-intr-handler"
7166          * event coalescing.
7167          *
7168          * Flush the mailbox to de-assert the IRQ immediately to prevent
7169          * spurious interrupts.  The flush impacts performance but
7170          * excessive spurious interrupts can be worse in some cases.
7171          */
7172         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7173
7174         /*
7175          * In a shared interrupt configuration, sometimes other devices'
7176          * interrupts will scream.  We record the current status tag here
7177          * so that the above check can report that the screaming interrupts
7178          * are unhandled.  Eventually they will be silenced.
7179          */
7180         tnapi->last_irq_tag = sblk->status_tag;
7181
7182         if (tg3_irq_sync(tp))
7183                 goto out;
7184
7185         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7186
7187         napi_schedule(&tnapi->napi);
7188
7189 out:
7190         return IRQ_RETVAL(handled);
7191 }
7192
7193 /* ISR for interrupt test */
7194 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7195 {
7196         struct tg3_napi *tnapi = dev_id;
7197         struct tg3 *tp = tnapi->tp;
7198         struct tg3_hw_status *sblk = tnapi->hw_status;
7199
7200         if ((sblk->status & SD_STATUS_UPDATED) ||
7201             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7202                 tg3_disable_ints(tp);
7203                 return IRQ_RETVAL(1);
7204         }
7205         return IRQ_RETVAL(0);
7206 }
7207
7208 #ifdef CONFIG_NET_POLL_CONTROLLER
7209 static void tg3_poll_controller(struct net_device *dev)
7210 {
7211         int i;
7212         struct tg3 *tp = netdev_priv(dev);
7213
7214         if (tg3_irq_sync(tp))
7215                 return;
7216
7217         for (i = 0; i < tp->irq_cnt; i++)
7218                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7219 }
7220 #endif
7221
7222 static void tg3_tx_timeout(struct net_device *dev)
7223 {
7224         struct tg3 *tp = netdev_priv(dev);
7225
7226         if (netif_msg_tx_err(tp)) {
7227                 netdev_err(dev, "transmit timed out, resetting\n");
7228                 tg3_dump_state(tp);
7229         }
7230
7231         tg3_reset_task_schedule(tp);
7232 }
7233
7234 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7235 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7236 {
7237         u32 base = (u32) mapping & 0xffffffff;
7238
7239         return (base > 0xffffdcc0) && (base + len + 8 < base);
7240 }
7241
7242 /* Test for DMA addresses > 40-bit */
7243 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7244                                           int len)
7245 {
7246 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7247         if (tg3_flag(tp, 40BIT_DMA_BUG))
7248                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7249         return 0;
7250 #else
7251         return 0;
7252 #endif
7253 }
7254
7255 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7256                                  dma_addr_t mapping, u32 len, u32 flags,
7257                                  u32 mss, u32 vlan)
7258 {
7259         txbd->addr_hi = ((u64) mapping >> 32);
7260         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7261         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7262         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7263 }
7264
7265 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7266                             dma_addr_t map, u32 len, u32 flags,
7267                             u32 mss, u32 vlan)
7268 {
7269         struct tg3 *tp = tnapi->tp;
7270         bool hwbug = false;
7271
7272         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7273                 hwbug = true;
7274
7275         if (tg3_4g_overflow_test(map, len))
7276                 hwbug = true;
7277
7278         if (tg3_40bit_overflow_test(tp, map, len))
7279                 hwbug = true;
7280
7281         if (tp->dma_limit) {
7282                 u32 prvidx = *entry;
7283                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7284                 while (len > tp->dma_limit && *budget) {
7285                         u32 frag_len = tp->dma_limit;
7286                         len -= tp->dma_limit;
7287
7288                         /* Avoid the 8byte DMA problem */
7289                         if (len <= 8) {
7290                                 len += tp->dma_limit / 2;
7291                                 frag_len = tp->dma_limit / 2;
7292                         }
7293
7294                         tnapi->tx_buffers[*entry].fragmented = true;
7295
7296                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7297                                       frag_len, tmp_flag, mss, vlan);
7298                         *budget -= 1;
7299                         prvidx = *entry;
7300                         *entry = NEXT_TX(*entry);
7301
7302                         map += frag_len;
7303                 }
7304
7305                 if (len) {
7306                         if (*budget) {
7307                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7308                                               len, flags, mss, vlan);
7309                                 *budget -= 1;
7310                                 *entry = NEXT_TX(*entry);
7311                         } else {
7312                                 hwbug = true;
7313                                 tnapi->tx_buffers[prvidx].fragmented = false;
7314                         }
7315                 }
7316         } else {
7317                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7318                               len, flags, mss, vlan);
7319                 *entry = NEXT_TX(*entry);
7320         }
7321
7322         return hwbug;
7323 }
7324
7325 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7326 {
7327         int i;
7328         struct sk_buff *skb;
7329         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7330
7331         skb = txb->skb;
7332         txb->skb = NULL;
7333
7334         pci_unmap_single(tnapi->tp->pdev,
7335                          dma_unmap_addr(txb, mapping),
7336                          skb_headlen(skb),
7337                          PCI_DMA_TODEVICE);
7338
7339         while (txb->fragmented) {
7340                 txb->fragmented = false;
7341                 entry = NEXT_TX(entry);
7342                 txb = &tnapi->tx_buffers[entry];
7343         }
7344
7345         for (i = 0; i <= last; i++) {
7346                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7347
7348                 entry = NEXT_TX(entry);
7349                 txb = &tnapi->tx_buffers[entry];
7350
7351                 pci_unmap_page(tnapi->tp->pdev,
7352                                dma_unmap_addr(txb, mapping),
7353                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7354
7355                 while (txb->fragmented) {
7356                         txb->fragmented = false;
7357                         entry = NEXT_TX(entry);
7358                         txb = &tnapi->tx_buffers[entry];
7359                 }
7360         }
7361 }
7362
7363 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7364 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7365                                        struct sk_buff **pskb,
7366                                        u32 *entry, u32 *budget,
7367                                        u32 base_flags, u32 mss, u32 vlan)
7368 {
7369         struct tg3 *tp = tnapi->tp;
7370         struct sk_buff *new_skb, *skb = *pskb;
7371         dma_addr_t new_addr = 0;
7372         int ret = 0;
7373
7374         if (tg3_asic_rev(tp) != ASIC_REV_5701)
7375                 new_skb = skb_copy(skb, GFP_ATOMIC);
7376         else {
7377                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7378
7379                 new_skb = skb_copy_expand(skb,
7380                                           skb_headroom(skb) + more_headroom,
7381                                           skb_tailroom(skb), GFP_ATOMIC);
7382         }
7383
7384         if (!new_skb) {
7385                 ret = -1;
7386         } else {
7387                 /* New SKB is guaranteed to be linear. */
7388                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7389                                           PCI_DMA_TODEVICE);
7390                 /* Make sure the mapping succeeded */
7391                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7392                         dev_kfree_skb(new_skb);
7393                         ret = -1;
7394                 } else {
7395                         u32 save_entry = *entry;
7396
7397                         base_flags |= TXD_FLAG_END;
7398
7399                         tnapi->tx_buffers[*entry].skb = new_skb;
7400                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7401                                            mapping, new_addr);
7402
7403                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7404                                             new_skb->len, base_flags,
7405                                             mss, vlan)) {
7406                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7407                                 dev_kfree_skb(new_skb);
7408                                 ret = -1;
7409                         }
7410                 }
7411         }
7412
7413         dev_kfree_skb(skb);
7414         *pskb = new_skb;
7415         return ret;
7416 }
7417
7418 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7419
7420 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7421  * TSO header is greater than 80 bytes.
7422  */
7423 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7424 {
7425         struct sk_buff *segs, *nskb;
7426         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7427
7428         /* Estimate the number of fragments in the worst case */
7429         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7430                 netif_stop_queue(tp->dev);
7431
7432                 /* netif_tx_stop_queue() must be done before checking
7433                  * checking tx index in tg3_tx_avail() below, because in
7434                  * tg3_tx(), we update tx index before checking for
7435                  * netif_tx_queue_stopped().
7436                  */
7437                 smp_mb();
7438                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7439                         return NETDEV_TX_BUSY;
7440
7441                 netif_wake_queue(tp->dev);
7442         }
7443
7444         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7445         if (IS_ERR(segs))
7446                 goto tg3_tso_bug_end;
7447
7448         do {
7449                 nskb = segs;
7450                 segs = segs->next;
7451                 nskb->next = NULL;
7452                 tg3_start_xmit(nskb, tp->dev);
7453         } while (segs);
7454
7455 tg3_tso_bug_end:
7456         dev_kfree_skb(skb);
7457
7458         return NETDEV_TX_OK;
7459 }
7460
7461 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7462  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7463  */
7464 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7465 {
7466         struct tg3 *tp = netdev_priv(dev);
7467         u32 len, entry, base_flags, mss, vlan = 0;
7468         u32 budget;
7469         int i = -1, would_hit_hwbug;
7470         dma_addr_t mapping;
7471         struct tg3_napi *tnapi;
7472         struct netdev_queue *txq;
7473         unsigned int last;
7474
7475         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7476         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7477         if (tg3_flag(tp, ENABLE_TSS))
7478                 tnapi++;
7479
7480         budget = tg3_tx_avail(tnapi);
7481
7482         /* We are running in BH disabled context with netif_tx_lock
7483          * and TX reclaim runs via tp->napi.poll inside of a software
7484          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7485          * no IRQ context deadlocks to worry about either.  Rejoice!
7486          */
7487         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7488                 if (!netif_tx_queue_stopped(txq)) {
7489                         netif_tx_stop_queue(txq);
7490
7491                         /* This is a hard error, log it. */
7492                         netdev_err(dev,
7493                                    "BUG! Tx Ring full when queue awake!\n");
7494                 }
7495                 return NETDEV_TX_BUSY;
7496         }
7497
7498         entry = tnapi->tx_prod;
7499         base_flags = 0;
7500         if (skb->ip_summed == CHECKSUM_PARTIAL)
7501                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7502
7503         mss = skb_shinfo(skb)->gso_size;
7504         if (mss) {
7505                 struct iphdr *iph;
7506                 u32 tcp_opt_len, hdr_len;
7507
7508                 if (skb_header_cloned(skb) &&
7509                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7510                         goto drop;
7511
7512                 iph = ip_hdr(skb);
7513                 tcp_opt_len = tcp_optlen(skb);
7514
7515                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7516
7517                 if (!skb_is_gso_v6(skb)) {
7518                         iph->check = 0;
7519                         iph->tot_len = htons(mss + hdr_len);
7520                 }
7521
7522                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7523                     tg3_flag(tp, TSO_BUG))
7524                         return tg3_tso_bug(tp, skb);
7525
7526                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7527                                TXD_FLAG_CPU_POST_DMA);
7528
7529                 if (tg3_flag(tp, HW_TSO_1) ||
7530                     tg3_flag(tp, HW_TSO_2) ||
7531                     tg3_flag(tp, HW_TSO_3)) {
7532                         tcp_hdr(skb)->check = 0;
7533                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7534                 } else
7535                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7536                                                                  iph->daddr, 0,
7537                                                                  IPPROTO_TCP,
7538                                                                  0);
7539
7540                 if (tg3_flag(tp, HW_TSO_3)) {
7541                         mss |= (hdr_len & 0xc) << 12;
7542                         if (hdr_len & 0x10)
7543                                 base_flags |= 0x00000010;
7544                         base_flags |= (hdr_len & 0x3e0) << 5;
7545                 } else if (tg3_flag(tp, HW_TSO_2))
7546                         mss |= hdr_len << 9;
7547                 else if (tg3_flag(tp, HW_TSO_1) ||
7548                          tg3_asic_rev(tp) == ASIC_REV_5705) {
7549                         if (tcp_opt_len || iph->ihl > 5) {
7550                                 int tsflags;
7551
7552                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7553                                 mss |= (tsflags << 11);
7554                         }
7555                 } else {
7556                         if (tcp_opt_len || iph->ihl > 5) {
7557                                 int tsflags;
7558
7559                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7560                                 base_flags |= tsflags << 12;
7561                         }
7562                 }
7563         }
7564
7565         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7566             !mss && skb->len > VLAN_ETH_FRAME_LEN)
7567                 base_flags |= TXD_FLAG_JMB_PKT;
7568
7569         if (vlan_tx_tag_present(skb)) {
7570                 base_flags |= TXD_FLAG_VLAN;
7571                 vlan = vlan_tx_tag_get(skb);
7572         }
7573
7574         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7575             tg3_flag(tp, TX_TSTAMP_EN)) {
7576                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7577                 base_flags |= TXD_FLAG_HWTSTAMP;
7578         }
7579
7580         len = skb_headlen(skb);
7581
7582         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7583         if (pci_dma_mapping_error(tp->pdev, mapping))
7584                 goto drop;
7585
7586
7587         tnapi->tx_buffers[entry].skb = skb;
7588         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7589
7590         would_hit_hwbug = 0;
7591
7592         if (tg3_flag(tp, 5701_DMA_BUG))
7593                 would_hit_hwbug = 1;
7594
7595         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7596                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7597                             mss, vlan)) {
7598                 would_hit_hwbug = 1;
7599         } else if (skb_shinfo(skb)->nr_frags > 0) {
7600                 u32 tmp_mss = mss;
7601
7602                 if (!tg3_flag(tp, HW_TSO_1) &&
7603                     !tg3_flag(tp, HW_TSO_2) &&
7604                     !tg3_flag(tp, HW_TSO_3))
7605                         tmp_mss = 0;
7606
7607                 /* Now loop through additional data
7608                  * fragments, and queue them.
7609                  */
7610                 last = skb_shinfo(skb)->nr_frags - 1;
7611                 for (i = 0; i <= last; i++) {
7612                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7613
7614                         len = skb_frag_size(frag);
7615                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7616                                                    len, DMA_TO_DEVICE);
7617
7618                         tnapi->tx_buffers[entry].skb = NULL;
7619                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7620                                            mapping);
7621                         if (dma_mapping_error(&tp->pdev->dev, mapping))
7622                                 goto dma_error;
7623
7624                         if (!budget ||
7625                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7626                                             len, base_flags |
7627                                             ((i == last) ? TXD_FLAG_END : 0),
7628                                             tmp_mss, vlan)) {
7629                                 would_hit_hwbug = 1;
7630                                 break;
7631                         }
7632                 }
7633         }
7634
7635         if (would_hit_hwbug) {
7636                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7637
7638                 /* If the workaround fails due to memory/mapping
7639                  * failure, silently drop this packet.
7640                  */
7641                 entry = tnapi->tx_prod;
7642                 budget = tg3_tx_avail(tnapi);
7643                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7644                                                 base_flags, mss, vlan))
7645                         goto drop_nofree;
7646         }
7647
7648         skb_tx_timestamp(skb);
7649         netdev_tx_sent_queue(txq, skb->len);
7650
7651         /* Sync BD data before updating mailbox */
7652         wmb();
7653
7654         /* Packets are ready, update Tx producer idx local and on card. */
7655         tw32_tx_mbox(tnapi->prodmbox, entry);
7656
7657         tnapi->tx_prod = entry;
7658         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7659                 netif_tx_stop_queue(txq);
7660
7661                 /* netif_tx_stop_queue() must be done before checking
7662                  * checking tx index in tg3_tx_avail() below, because in
7663                  * tg3_tx(), we update tx index before checking for
7664                  * netif_tx_queue_stopped().
7665                  */
7666                 smp_mb();
7667                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7668                         netif_tx_wake_queue(txq);
7669         }
7670
7671         mmiowb();
7672         return NETDEV_TX_OK;
7673
7674 dma_error:
7675         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7676         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7677 drop:
7678         dev_kfree_skb(skb);
7679 drop_nofree:
7680         tp->tx_dropped++;
7681         return NETDEV_TX_OK;
7682 }
7683
7684 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7685 {
7686         if (enable) {
7687                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7688                                   MAC_MODE_PORT_MODE_MASK);
7689
7690                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7691
7692                 if (!tg3_flag(tp, 5705_PLUS))
7693                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7694
7695                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7696                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7697                 else
7698                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7699         } else {
7700                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7701
7702                 if (tg3_flag(tp, 5705_PLUS) ||
7703                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7704                     tg3_asic_rev(tp) == ASIC_REV_5700)
7705                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7706         }
7707
7708         tw32(MAC_MODE, tp->mac_mode);
7709         udelay(40);
7710 }
7711
7712 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7713 {
7714         u32 val, bmcr, mac_mode, ptest = 0;
7715
7716         tg3_phy_toggle_apd(tp, false);
7717         tg3_phy_toggle_automdix(tp, 0);
7718
7719         if (extlpbk && tg3_phy_set_extloopbk(tp))
7720                 return -EIO;
7721
7722         bmcr = BMCR_FULLDPLX;
7723         switch (speed) {
7724         case SPEED_10:
7725                 break;
7726         case SPEED_100:
7727                 bmcr |= BMCR_SPEED100;
7728                 break;
7729         case SPEED_1000:
7730         default:
7731                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7732                         speed = SPEED_100;
7733                         bmcr |= BMCR_SPEED100;
7734                 } else {
7735                         speed = SPEED_1000;
7736                         bmcr |= BMCR_SPEED1000;
7737                 }
7738         }
7739
7740         if (extlpbk) {
7741                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7742                         tg3_readphy(tp, MII_CTRL1000, &val);
7743                         val |= CTL1000_AS_MASTER |
7744                                CTL1000_ENABLE_MASTER;
7745                         tg3_writephy(tp, MII_CTRL1000, val);
7746                 } else {
7747                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7748                                 MII_TG3_FET_PTEST_TRIM_2;
7749                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7750                 }
7751         } else
7752                 bmcr |= BMCR_LOOPBACK;
7753
7754         tg3_writephy(tp, MII_BMCR, bmcr);
7755
7756         /* The write needs to be flushed for the FETs */
7757         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7758                 tg3_readphy(tp, MII_BMCR, &bmcr);
7759
7760         udelay(40);
7761
7762         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7763             tg3_asic_rev(tp) == ASIC_REV_5785) {
7764                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7765                              MII_TG3_FET_PTEST_FRC_TX_LINK |
7766                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
7767
7768                 /* The write needs to be flushed for the AC131 */
7769                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7770         }
7771
7772         /* Reset to prevent losing 1st rx packet intermittently */
7773         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7774             tg3_flag(tp, 5780_CLASS)) {
7775                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7776                 udelay(10);
7777                 tw32_f(MAC_RX_MODE, tp->rx_mode);
7778         }
7779
7780         mac_mode = tp->mac_mode &
7781                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7782         if (speed == SPEED_1000)
7783                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7784         else
7785                 mac_mode |= MAC_MODE_PORT_MODE_MII;
7786
7787         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
7788                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7789
7790                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7791                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
7792                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7793                         mac_mode |= MAC_MODE_LINK_POLARITY;
7794
7795                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7796                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7797         }
7798
7799         tw32(MAC_MODE, mac_mode);
7800         udelay(40);
7801
7802         return 0;
7803 }
7804
7805 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7806 {
7807         struct tg3 *tp = netdev_priv(dev);
7808
7809         if (features & NETIF_F_LOOPBACK) {
7810                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7811                         return;
7812
7813                 spin_lock_bh(&tp->lock);
7814                 tg3_mac_loopback(tp, true);
7815                 netif_carrier_on(tp->dev);
7816                 spin_unlock_bh(&tp->lock);
7817                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7818         } else {
7819                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7820                         return;
7821
7822                 spin_lock_bh(&tp->lock);
7823                 tg3_mac_loopback(tp, false);
7824                 /* Force link status check */
7825                 tg3_setup_phy(tp, 1);
7826                 spin_unlock_bh(&tp->lock);
7827                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7828         }
7829 }
7830
7831 static netdev_features_t tg3_fix_features(struct net_device *dev,
7832         netdev_features_t features)
7833 {
7834         struct tg3 *tp = netdev_priv(dev);
7835
7836         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7837                 features &= ~NETIF_F_ALL_TSO;
7838
7839         return features;
7840 }
7841
7842 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7843 {
7844         netdev_features_t changed = dev->features ^ features;
7845
7846         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7847                 tg3_set_loopback(dev, features);
7848
7849         return 0;
7850 }
7851
7852 static void tg3_rx_prodring_free(struct tg3 *tp,
7853                                  struct tg3_rx_prodring_set *tpr)
7854 {
7855         int i;
7856
7857         if (tpr != &tp->napi[0].prodring) {
7858                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7859                      i = (i + 1) & tp->rx_std_ring_mask)
7860                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7861                                         tp->rx_pkt_map_sz);
7862
7863                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7864                         for (i = tpr->rx_jmb_cons_idx;
7865                              i != tpr->rx_jmb_prod_idx;
7866                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7867                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7868                                                 TG3_RX_JMB_MAP_SZ);
7869                         }
7870                 }
7871
7872                 return;
7873         }
7874
7875         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7876                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7877                                 tp->rx_pkt_map_sz);
7878
7879         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7880                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7881                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7882                                         TG3_RX_JMB_MAP_SZ);
7883         }
7884 }
7885
7886 /* Initialize rx rings for packet processing.
7887  *
7888  * The chip has been shut down and the driver detached from
7889  * the networking, so no interrupts or new tx packets will
7890  * end up in the driver.  tp->{tx,}lock are held and thus
7891  * we may not sleep.
7892  */
7893 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7894                                  struct tg3_rx_prodring_set *tpr)
7895 {
7896         u32 i, rx_pkt_dma_sz;
7897
7898         tpr->rx_std_cons_idx = 0;
7899         tpr->rx_std_prod_idx = 0;
7900         tpr->rx_jmb_cons_idx = 0;
7901         tpr->rx_jmb_prod_idx = 0;
7902
7903         if (tpr != &tp->napi[0].prodring) {
7904                 memset(&tpr->rx_std_buffers[0], 0,
7905                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7906                 if (tpr->rx_jmb_buffers)
7907                         memset(&tpr->rx_jmb_buffers[0], 0,
7908                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7909                 goto done;
7910         }
7911
7912         /* Zero out all descriptors. */
7913         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7914
7915         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7916         if (tg3_flag(tp, 5780_CLASS) &&
7917             tp->dev->mtu > ETH_DATA_LEN)
7918                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7919         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7920
7921         /* Initialize invariants of the rings, we only set this
7922          * stuff once.  This works because the card does not
7923          * write into the rx buffer posting rings.
7924          */
7925         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7926                 struct tg3_rx_buffer_desc *rxd;
7927
7928                 rxd = &tpr->rx_std[i];
7929                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7930                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7931                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7932                                (i << RXD_OPAQUE_INDEX_SHIFT));
7933         }
7934
7935         /* Now allocate fresh SKBs for each rx ring. */
7936         for (i = 0; i < tp->rx_pending; i++) {
7937                 unsigned int frag_size;
7938
7939                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7940                                       &frag_size) < 0) {
7941                         netdev_warn(tp->dev,
7942                                     "Using a smaller RX standard ring. Only "
7943                                     "%d out of %d buffers were allocated "
7944                                     "successfully\n", i, tp->rx_pending);
7945                         if (i == 0)
7946                                 goto initfail;
7947                         tp->rx_pending = i;
7948                         break;
7949                 }
7950         }
7951
7952         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7953                 goto done;
7954
7955         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7956
7957         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7958                 goto done;
7959
7960         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7961                 struct tg3_rx_buffer_desc *rxd;
7962
7963                 rxd = &tpr->rx_jmb[i].std;
7964                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7965                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7966                                   RXD_FLAG_JUMBO;
7967                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7968                        (i << RXD_OPAQUE_INDEX_SHIFT));
7969         }
7970
7971         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7972                 unsigned int frag_size;
7973
7974                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7975                                       &frag_size) < 0) {
7976                         netdev_warn(tp->dev,
7977                                     "Using a smaller RX jumbo ring. Only %d "
7978                                     "out of %d buffers were allocated "
7979                                     "successfully\n", i, tp->rx_jumbo_pending);
7980                         if (i == 0)
7981                                 goto initfail;
7982                         tp->rx_jumbo_pending = i;
7983                         break;
7984                 }
7985         }
7986
7987 done:
7988         return 0;
7989
7990 initfail:
7991         tg3_rx_prodring_free(tp, tpr);
7992         return -ENOMEM;
7993 }
7994
7995 static void tg3_rx_prodring_fini(struct tg3 *tp,
7996                                  struct tg3_rx_prodring_set *tpr)
7997 {
7998         kfree(tpr->rx_std_buffers);
7999         tpr->rx_std_buffers = NULL;
8000         kfree(tpr->rx_jmb_buffers);
8001         tpr->rx_jmb_buffers = NULL;
8002         if (tpr->rx_std) {
8003                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8004                                   tpr->rx_std, tpr->rx_std_mapping);
8005                 tpr->rx_std = NULL;
8006         }
8007         if (tpr->rx_jmb) {
8008                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8009                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
8010                 tpr->rx_jmb = NULL;
8011         }
8012 }
8013
8014 static int tg3_rx_prodring_init(struct tg3 *tp,
8015                                 struct tg3_rx_prodring_set *tpr)
8016 {
8017         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8018                                       GFP_KERNEL);
8019         if (!tpr->rx_std_buffers)
8020                 return -ENOMEM;
8021
8022         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8023                                          TG3_RX_STD_RING_BYTES(tp),
8024                                          &tpr->rx_std_mapping,
8025                                          GFP_KERNEL);
8026         if (!tpr->rx_std)
8027                 goto err_out;
8028
8029         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8030                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8031                                               GFP_KERNEL);
8032                 if (!tpr->rx_jmb_buffers)
8033                         goto err_out;
8034
8035                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8036                                                  TG3_RX_JMB_RING_BYTES(tp),
8037                                                  &tpr->rx_jmb_mapping,
8038                                                  GFP_KERNEL);
8039                 if (!tpr->rx_jmb)
8040                         goto err_out;
8041         }
8042
8043         return 0;
8044
8045 err_out:
8046         tg3_rx_prodring_fini(tp, tpr);
8047         return -ENOMEM;
8048 }
8049
8050 /* Free up pending packets in all rx/tx rings.
8051  *
8052  * The chip has been shut down and the driver detached from
8053  * the networking, so no interrupts or new tx packets will
8054  * end up in the driver.  tp->{tx,}lock is not held and we are not
8055  * in an interrupt context and thus may sleep.
8056  */
8057 static void tg3_free_rings(struct tg3 *tp)
8058 {
8059         int i, j;
8060
8061         for (j = 0; j < tp->irq_cnt; j++) {
8062                 struct tg3_napi *tnapi = &tp->napi[j];
8063
8064                 tg3_rx_prodring_free(tp, &tnapi->prodring);
8065
8066                 if (!tnapi->tx_buffers)
8067                         continue;
8068
8069                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8070                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8071
8072                         if (!skb)
8073                                 continue;
8074
8075                         tg3_tx_skb_unmap(tnapi, i,
8076                                          skb_shinfo(skb)->nr_frags - 1);
8077
8078                         dev_kfree_skb_any(skb);
8079                 }
8080                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8081         }
8082 }
8083
8084 /* Initialize tx/rx rings for packet processing.
8085  *
8086  * The chip has been shut down and the driver detached from
8087  * the networking, so no interrupts or new tx packets will
8088  * end up in the driver.  tp->{tx,}lock are held and thus
8089  * we may not sleep.
8090  */
8091 static int tg3_init_rings(struct tg3 *tp)
8092 {
8093         int i;
8094
8095         /* Free up all the SKBs. */
8096         tg3_free_rings(tp);
8097
8098         for (i = 0; i < tp->irq_cnt; i++) {
8099                 struct tg3_napi *tnapi = &tp->napi[i];
8100
8101                 tnapi->last_tag = 0;
8102                 tnapi->last_irq_tag = 0;
8103                 tnapi->hw_status->status = 0;
8104                 tnapi->hw_status->status_tag = 0;
8105                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8106
8107                 tnapi->tx_prod = 0;
8108                 tnapi->tx_cons = 0;
8109                 if (tnapi->tx_ring)
8110                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8111
8112                 tnapi->rx_rcb_ptr = 0;
8113                 if (tnapi->rx_rcb)
8114                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8115
8116                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8117                         tg3_free_rings(tp);
8118                         return -ENOMEM;
8119                 }
8120         }
8121
8122         return 0;
8123 }
8124
8125 static void tg3_mem_tx_release(struct tg3 *tp)
8126 {
8127         int i;
8128
8129         for (i = 0; i < tp->irq_max; i++) {
8130                 struct tg3_napi *tnapi = &tp->napi[i];
8131
8132                 if (tnapi->tx_ring) {
8133                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8134                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
8135                         tnapi->tx_ring = NULL;
8136                 }
8137
8138                 kfree(tnapi->tx_buffers);
8139                 tnapi->tx_buffers = NULL;
8140         }
8141 }
8142
8143 static int tg3_mem_tx_acquire(struct tg3 *tp)
8144 {
8145         int i;
8146         struct tg3_napi *tnapi = &tp->napi[0];
8147
8148         /* If multivector TSS is enabled, vector 0 does not handle
8149          * tx interrupts.  Don't allocate any resources for it.
8150          */
8151         if (tg3_flag(tp, ENABLE_TSS))
8152                 tnapi++;
8153
8154         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8155                 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8156                                             TG3_TX_RING_SIZE, GFP_KERNEL);
8157                 if (!tnapi->tx_buffers)
8158                         goto err_out;
8159
8160                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8161                                                     TG3_TX_RING_BYTES,
8162                                                     &tnapi->tx_desc_mapping,
8163                                                     GFP_KERNEL);
8164                 if (!tnapi->tx_ring)
8165                         goto err_out;
8166         }
8167
8168         return 0;
8169
8170 err_out:
8171         tg3_mem_tx_release(tp);
8172         return -ENOMEM;
8173 }
8174
8175 static void tg3_mem_rx_release(struct tg3 *tp)
8176 {
8177         int i;
8178
8179         for (i = 0; i < tp->irq_max; i++) {
8180                 struct tg3_napi *tnapi = &tp->napi[i];
8181
8182                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8183
8184                 if (!tnapi->rx_rcb)
8185                         continue;
8186
8187                 dma_free_coherent(&tp->pdev->dev,
8188                                   TG3_RX_RCB_RING_BYTES(tp),
8189                                   tnapi->rx_rcb,
8190                                   tnapi->rx_rcb_mapping);
8191                 tnapi->rx_rcb = NULL;
8192         }
8193 }
8194
8195 static int tg3_mem_rx_acquire(struct tg3 *tp)
8196 {
8197         unsigned int i, limit;
8198
8199         limit = tp->rxq_cnt;
8200
8201         /* If RSS is enabled, we need a (dummy) producer ring
8202          * set on vector zero.  This is the true hw prodring.
8203          */
8204         if (tg3_flag(tp, ENABLE_RSS))
8205                 limit++;
8206
8207         for (i = 0; i < limit; i++) {
8208                 struct tg3_napi *tnapi = &tp->napi[i];
8209
8210                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8211                         goto err_out;
8212
8213                 /* If multivector RSS is enabled, vector 0
8214                  * does not handle rx or tx interrupts.
8215                  * Don't allocate any resources for it.
8216                  */
8217                 if (!i && tg3_flag(tp, ENABLE_RSS))
8218                         continue;
8219
8220                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8221                                                    TG3_RX_RCB_RING_BYTES(tp),
8222                                                    &tnapi->rx_rcb_mapping,
8223                                                    GFP_KERNEL | __GFP_ZERO);
8224                 if (!tnapi->rx_rcb)
8225                         goto err_out;
8226         }
8227
8228         return 0;
8229
8230 err_out:
8231         tg3_mem_rx_release(tp);
8232         return -ENOMEM;
8233 }
8234
8235 /*
8236  * Must not be invoked with interrupt sources disabled and
8237  * the hardware shutdown down.
8238  */
8239 static void tg3_free_consistent(struct tg3 *tp)
8240 {
8241         int i;
8242
8243         for (i = 0; i < tp->irq_cnt; i++) {
8244                 struct tg3_napi *tnapi = &tp->napi[i];
8245
8246                 if (tnapi->hw_status) {
8247                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8248                                           tnapi->hw_status,
8249                                           tnapi->status_mapping);
8250                         tnapi->hw_status = NULL;
8251                 }
8252         }
8253
8254         tg3_mem_rx_release(tp);
8255         tg3_mem_tx_release(tp);
8256
8257         if (tp->hw_stats) {
8258                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8259                                   tp->hw_stats, tp->stats_mapping);
8260                 tp->hw_stats = NULL;
8261         }
8262 }
8263
8264 /*
8265  * Must not be invoked with interrupt sources disabled and
8266  * the hardware shutdown down.  Can sleep.
8267  */
8268 static int tg3_alloc_consistent(struct tg3 *tp)
8269 {
8270         int i;
8271
8272         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8273                                           sizeof(struct tg3_hw_stats),
8274                                           &tp->stats_mapping,
8275                                           GFP_KERNEL | __GFP_ZERO);
8276         if (!tp->hw_stats)
8277                 goto err_out;
8278
8279         for (i = 0; i < tp->irq_cnt; i++) {
8280                 struct tg3_napi *tnapi = &tp->napi[i];
8281                 struct tg3_hw_status *sblk;
8282
8283                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8284                                                       TG3_HW_STATUS_SIZE,
8285                                                       &tnapi->status_mapping,
8286                                                       GFP_KERNEL | __GFP_ZERO);
8287                 if (!tnapi->hw_status)
8288                         goto err_out;
8289
8290                 sblk = tnapi->hw_status;
8291
8292                 if (tg3_flag(tp, ENABLE_RSS)) {
8293                         u16 *prodptr = NULL;
8294
8295                         /*
8296                          * When RSS is enabled, the status block format changes
8297                          * slightly.  The "rx_jumbo_consumer", "reserved",
8298                          * and "rx_mini_consumer" members get mapped to the
8299                          * other three rx return ring producer indexes.
8300                          */
8301                         switch (i) {
8302                         case 1:
8303                                 prodptr = &sblk->idx[0].rx_producer;
8304                                 break;
8305                         case 2:
8306                                 prodptr = &sblk->rx_jumbo_consumer;
8307                                 break;
8308                         case 3:
8309                                 prodptr = &sblk->reserved;
8310                                 break;
8311                         case 4:
8312                                 prodptr = &sblk->rx_mini_consumer;
8313                                 break;
8314                         }
8315                         tnapi->rx_rcb_prod_idx = prodptr;
8316                 } else {
8317                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8318                 }
8319         }
8320
8321         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8322                 goto err_out;
8323
8324         return 0;
8325
8326 err_out:
8327         tg3_free_consistent(tp);
8328         return -ENOMEM;
8329 }
8330
8331 #define MAX_WAIT_CNT 1000
8332
8333 /* To stop a block, clear the enable bit and poll till it
8334  * clears.  tp->lock is held.
8335  */
8336 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
8337 {
8338         unsigned int i;
8339         u32 val;
8340
8341         if (tg3_flag(tp, 5705_PLUS)) {
8342                 switch (ofs) {
8343                 case RCVLSC_MODE:
8344                 case DMAC_MODE:
8345                 case MBFREE_MODE:
8346                 case BUFMGR_MODE:
8347                 case MEMARB_MODE:
8348                         /* We can't enable/disable these bits of the
8349                          * 5705/5750, just say success.
8350                          */
8351                         return 0;
8352
8353                 default:
8354                         break;
8355                 }
8356         }
8357
8358         val = tr32(ofs);
8359         val &= ~enable_bit;
8360         tw32_f(ofs, val);
8361
8362         for (i = 0; i < MAX_WAIT_CNT; i++) {
8363                 udelay(100);
8364                 val = tr32(ofs);
8365                 if ((val & enable_bit) == 0)
8366                         break;
8367         }
8368
8369         if (i == MAX_WAIT_CNT && !silent) {
8370                 dev_err(&tp->pdev->dev,
8371                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8372                         ofs, enable_bit);
8373                 return -ENODEV;
8374         }
8375
8376         return 0;
8377 }
8378
8379 /* tp->lock is held. */
8380 static int tg3_abort_hw(struct tg3 *tp, int silent)
8381 {
8382         int i, err;
8383
8384         tg3_disable_ints(tp);
8385
8386         tp->rx_mode &= ~RX_MODE_ENABLE;
8387         tw32_f(MAC_RX_MODE, tp->rx_mode);
8388         udelay(10);
8389
8390         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8391         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8392         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8393         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8394         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8395         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8396
8397         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8398         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8399         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8400         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8401         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8402         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8403         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8404
8405         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8406         tw32_f(MAC_MODE, tp->mac_mode);
8407         udelay(40);
8408
8409         tp->tx_mode &= ~TX_MODE_ENABLE;
8410         tw32_f(MAC_TX_MODE, tp->tx_mode);
8411
8412         for (i = 0; i < MAX_WAIT_CNT; i++) {
8413                 udelay(100);
8414                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8415                         break;
8416         }
8417         if (i >= MAX_WAIT_CNT) {
8418                 dev_err(&tp->pdev->dev,
8419                         "%s timed out, TX_MODE_ENABLE will not clear "
8420                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8421                 err |= -ENODEV;
8422         }
8423
8424         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8425         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8426         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8427
8428         tw32(FTQ_RESET, 0xffffffff);
8429         tw32(FTQ_RESET, 0x00000000);
8430
8431         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8432         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8433
8434         for (i = 0; i < tp->irq_cnt; i++) {
8435                 struct tg3_napi *tnapi = &tp->napi[i];
8436                 if (tnapi->hw_status)
8437                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8438         }
8439
8440         return err;
8441 }
8442
8443 /* Save PCI command register before chip reset */
8444 static void tg3_save_pci_state(struct tg3 *tp)
8445 {
8446         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8447 }
8448
8449 /* Restore PCI state after chip reset */
8450 static void tg3_restore_pci_state(struct tg3 *tp)
8451 {
8452         u32 val;
8453
8454         /* Re-enable indirect register accesses. */
8455         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8456                                tp->misc_host_ctrl);
8457
8458         /* Set MAX PCI retry to zero. */
8459         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8460         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8461             tg3_flag(tp, PCIX_MODE))
8462                 val |= PCISTATE_RETRY_SAME_DMA;
8463         /* Allow reads and writes to the APE register and memory space. */
8464         if (tg3_flag(tp, ENABLE_APE))
8465                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8466                        PCISTATE_ALLOW_APE_SHMEM_WR |
8467                        PCISTATE_ALLOW_APE_PSPACE_WR;
8468         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8469
8470         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8471
8472         if (!tg3_flag(tp, PCI_EXPRESS)) {
8473                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8474                                       tp->pci_cacheline_sz);
8475                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8476                                       tp->pci_lat_timer);
8477         }
8478
8479         /* Make sure PCI-X relaxed ordering bit is clear. */
8480         if (tg3_flag(tp, PCIX_MODE)) {
8481                 u16 pcix_cmd;
8482
8483                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8484                                      &pcix_cmd);
8485                 pcix_cmd &= ~PCI_X_CMD_ERO;
8486                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8487                                       pcix_cmd);
8488         }
8489
8490         if (tg3_flag(tp, 5780_CLASS)) {
8491
8492                 /* Chip reset on 5780 will reset MSI enable bit,
8493                  * so need to restore it.
8494                  */
8495                 if (tg3_flag(tp, USING_MSI)) {
8496                         u16 ctrl;
8497
8498                         pci_read_config_word(tp->pdev,
8499                                              tp->msi_cap + PCI_MSI_FLAGS,
8500                                              &ctrl);
8501                         pci_write_config_word(tp->pdev,
8502                                               tp->msi_cap + PCI_MSI_FLAGS,
8503                                               ctrl | PCI_MSI_FLAGS_ENABLE);
8504                         val = tr32(MSGINT_MODE);
8505                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8506                 }
8507         }
8508 }
8509
8510 /* tp->lock is held. */
8511 static int tg3_chip_reset(struct tg3 *tp)
8512 {
8513         u32 val;
8514         void (*write_op)(struct tg3 *, u32, u32);
8515         int i, err;
8516
8517         tg3_nvram_lock(tp);
8518
8519         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8520
8521         /* No matching tg3_nvram_unlock() after this because
8522          * chip reset below will undo the nvram lock.
8523          */
8524         tp->nvram_lock_cnt = 0;
8525
8526         /* GRC_MISC_CFG core clock reset will clear the memory
8527          * enable bit in PCI register 4 and the MSI enable bit
8528          * on some chips, so we save relevant registers here.
8529          */
8530         tg3_save_pci_state(tp);
8531
8532         if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8533             tg3_flag(tp, 5755_PLUS))
8534                 tw32(GRC_FASTBOOT_PC, 0);
8535
8536         /*
8537          * We must avoid the readl() that normally takes place.
8538          * It locks machines, causes machine checks, and other
8539          * fun things.  So, temporarily disable the 5701
8540          * hardware workaround, while we do the reset.
8541          */
8542         write_op = tp->write32;
8543         if (write_op == tg3_write_flush_reg32)
8544                 tp->write32 = tg3_write32;
8545
8546         /* Prevent the irq handler from reading or writing PCI registers
8547          * during chip reset when the memory enable bit in the PCI command
8548          * register may be cleared.  The chip does not generate interrupt
8549          * at this time, but the irq handler may still be called due to irq
8550          * sharing or irqpoll.
8551          */
8552         tg3_flag_set(tp, CHIP_RESETTING);
8553         for (i = 0; i < tp->irq_cnt; i++) {
8554                 struct tg3_napi *tnapi = &tp->napi[i];
8555                 if (tnapi->hw_status) {
8556                         tnapi->hw_status->status = 0;
8557                         tnapi->hw_status->status_tag = 0;
8558                 }
8559                 tnapi->last_tag = 0;
8560                 tnapi->last_irq_tag = 0;
8561         }
8562         smp_mb();
8563
8564         for (i = 0; i < tp->irq_cnt; i++)
8565                 synchronize_irq(tp->napi[i].irq_vec);
8566
8567         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8568                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8569                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8570         }
8571
8572         /* do the reset */
8573         val = GRC_MISC_CFG_CORECLK_RESET;
8574
8575         if (tg3_flag(tp, PCI_EXPRESS)) {
8576                 /* Force PCIe 1.0a mode */
8577                 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8578                     !tg3_flag(tp, 57765_PLUS) &&
8579                     tr32(TG3_PCIE_PHY_TSTCTL) ==
8580                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8581                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8582
8583                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8584                         tw32(GRC_MISC_CFG, (1 << 29));
8585                         val |= (1 << 29);
8586                 }
8587         }
8588
8589         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8590                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8591                 tw32(GRC_VCPU_EXT_CTRL,
8592                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8593         }
8594
8595         /* Manage gphy power for all CPMU absent PCIe devices. */
8596         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8597                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8598
8599         tw32(GRC_MISC_CFG, val);
8600
8601         /* restore 5701 hardware bug workaround write method */
8602         tp->write32 = write_op;
8603
8604         /* Unfortunately, we have to delay before the PCI read back.
8605          * Some 575X chips even will not respond to a PCI cfg access
8606          * when the reset command is given to the chip.
8607          *
8608          * How do these hardware designers expect things to work
8609          * properly if the PCI write is posted for a long period
8610          * of time?  It is always necessary to have some method by
8611          * which a register read back can occur to push the write
8612          * out which does the reset.
8613          *
8614          * For most tg3 variants the trick below was working.
8615          * Ho hum...
8616          */
8617         udelay(120);
8618
8619         /* Flush PCI posted writes.  The normal MMIO registers
8620          * are inaccessible at this time so this is the only
8621          * way to make this reliably (actually, this is no longer
8622          * the case, see above).  I tried to use indirect
8623          * register read/write but this upset some 5701 variants.
8624          */
8625         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8626
8627         udelay(120);
8628
8629         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8630                 u16 val16;
8631
8632                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
8633                         int j;
8634                         u32 cfg_val;
8635
8636                         /* Wait for link training to complete.  */
8637                         for (j = 0; j < 5000; j++)
8638                                 udelay(100);
8639
8640                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8641                         pci_write_config_dword(tp->pdev, 0xc4,
8642                                                cfg_val | (1 << 15));
8643                 }
8644
8645                 /* Clear the "no snoop" and "relaxed ordering" bits. */
8646                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8647                 /*
8648                  * Older PCIe devices only support the 128 byte
8649                  * MPS setting.  Enforce the restriction.
8650                  */
8651                 if (!tg3_flag(tp, CPMU_PRESENT))
8652                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8653                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8654
8655                 /* Clear error status */
8656                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8657                                       PCI_EXP_DEVSTA_CED |
8658                                       PCI_EXP_DEVSTA_NFED |
8659                                       PCI_EXP_DEVSTA_FED |
8660                                       PCI_EXP_DEVSTA_URD);
8661         }
8662
8663         tg3_restore_pci_state(tp);
8664
8665         tg3_flag_clear(tp, CHIP_RESETTING);
8666         tg3_flag_clear(tp, ERROR_PROCESSED);
8667
8668         val = 0;
8669         if (tg3_flag(tp, 5780_CLASS))
8670                 val = tr32(MEMARB_MODE);
8671         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8672
8673         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
8674                 tg3_stop_fw(tp);
8675                 tw32(0x5000, 0x400);
8676         }
8677
8678         if (tg3_flag(tp, IS_SSB_CORE)) {
8679                 /*
8680                  * BCM4785: In order to avoid repercussions from using
8681                  * potentially defective internal ROM, stop the Rx RISC CPU,
8682                  * which is not required.
8683                  */
8684                 tg3_stop_fw(tp);
8685                 tg3_halt_cpu(tp, RX_CPU_BASE);
8686         }
8687
8688         tw32(GRC_MODE, tp->grc_mode);
8689
8690         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
8691                 val = tr32(0xc4);
8692
8693                 tw32(0xc4, val | (1 << 15));
8694         }
8695
8696         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8697             tg3_asic_rev(tp) == ASIC_REV_5705) {
8698                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8699                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
8700                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8701                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8702         }
8703
8704         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8705                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8706                 val = tp->mac_mode;
8707         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8708                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8709                 val = tp->mac_mode;
8710         } else
8711                 val = 0;
8712
8713         tw32_f(MAC_MODE, val);
8714         udelay(40);
8715
8716         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8717
8718         err = tg3_poll_fw(tp);
8719         if (err)
8720                 return err;
8721
8722         tg3_mdio_start(tp);
8723
8724         if (tg3_flag(tp, PCI_EXPRESS) &&
8725             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
8726             tg3_asic_rev(tp) != ASIC_REV_5785 &&
8727             !tg3_flag(tp, 57765_PLUS)) {
8728                 val = tr32(0x7c00);
8729
8730                 tw32(0x7c00, val | (1 << 25));
8731         }
8732
8733         if (tg3_asic_rev(tp) == ASIC_REV_5720) {
8734                 val = tr32(TG3_CPMU_CLCK_ORIDE);
8735                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8736         }
8737
8738         /* Reprobe ASF enable state.  */
8739         tg3_flag_clear(tp, ENABLE_ASF);
8740         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8741         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8742         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8743                 u32 nic_cfg;
8744
8745                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8746                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8747                         tg3_flag_set(tp, ENABLE_ASF);
8748                         tp->last_event_jiffies = jiffies;
8749                         if (tg3_flag(tp, 5750_PLUS))
8750                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8751                 }
8752         }
8753
8754         return 0;
8755 }
8756
8757 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8758 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8759
8760 /* tp->lock is held. */
8761 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8762 {
8763         int err;
8764
8765         tg3_stop_fw(tp);
8766
8767         tg3_write_sig_pre_reset(tp, kind);
8768
8769         tg3_abort_hw(tp, silent);
8770         err = tg3_chip_reset(tp);
8771
8772         __tg3_set_mac_addr(tp, 0);
8773
8774         tg3_write_sig_legacy(tp, kind);
8775         tg3_write_sig_post_reset(tp, kind);
8776
8777         if (tp->hw_stats) {
8778                 /* Save the stats across chip resets... */
8779                 tg3_get_nstats(tp, &tp->net_stats_prev);
8780                 tg3_get_estats(tp, &tp->estats_prev);
8781
8782                 /* And make sure the next sample is new data */
8783                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8784         }
8785
8786         if (err)
8787                 return err;
8788
8789         return 0;
8790 }
8791
8792 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8793 {
8794         struct tg3 *tp = netdev_priv(dev);
8795         struct sockaddr *addr = p;
8796         int err = 0, skip_mac_1 = 0;
8797
8798         if (!is_valid_ether_addr(addr->sa_data))
8799                 return -EADDRNOTAVAIL;
8800
8801         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8802
8803         if (!netif_running(dev))
8804                 return 0;
8805
8806         if (tg3_flag(tp, ENABLE_ASF)) {
8807                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8808
8809                 addr0_high = tr32(MAC_ADDR_0_HIGH);
8810                 addr0_low = tr32(MAC_ADDR_0_LOW);
8811                 addr1_high = tr32(MAC_ADDR_1_HIGH);
8812                 addr1_low = tr32(MAC_ADDR_1_LOW);
8813
8814                 /* Skip MAC addr 1 if ASF is using it. */
8815                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8816                     !(addr1_high == 0 && addr1_low == 0))
8817                         skip_mac_1 = 1;
8818         }
8819         spin_lock_bh(&tp->lock);
8820         __tg3_set_mac_addr(tp, skip_mac_1);
8821         spin_unlock_bh(&tp->lock);
8822
8823         return err;
8824 }
8825
8826 /* tp->lock is held. */
8827 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8828                            dma_addr_t mapping, u32 maxlen_flags,
8829                            u32 nic_addr)
8830 {
8831         tg3_write_mem(tp,
8832                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8833                       ((u64) mapping >> 32));
8834         tg3_write_mem(tp,
8835                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8836                       ((u64) mapping & 0xffffffff));
8837         tg3_write_mem(tp,
8838                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8839                        maxlen_flags);
8840
8841         if (!tg3_flag(tp, 5705_PLUS))
8842                 tg3_write_mem(tp,
8843                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8844                               nic_addr);
8845 }
8846
8847
8848 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8849 {
8850         int i = 0;
8851
8852         if (!tg3_flag(tp, ENABLE_TSS)) {
8853                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8854                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8855                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8856         } else {
8857                 tw32(HOSTCC_TXCOL_TICKS, 0);
8858                 tw32(HOSTCC_TXMAX_FRAMES, 0);
8859                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8860
8861                 for (; i < tp->txq_cnt; i++) {
8862                         u32 reg;
8863
8864                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8865                         tw32(reg, ec->tx_coalesce_usecs);
8866                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8867                         tw32(reg, ec->tx_max_coalesced_frames);
8868                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8869                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8870                 }
8871         }
8872
8873         for (; i < tp->irq_max - 1; i++) {
8874                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8875                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8876                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8877         }
8878 }
8879
8880 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8881 {
8882         int i = 0;
8883         u32 limit = tp->rxq_cnt;
8884
8885         if (!tg3_flag(tp, ENABLE_RSS)) {
8886                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8887                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8888                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8889                 limit--;
8890         } else {
8891                 tw32(HOSTCC_RXCOL_TICKS, 0);
8892                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8893                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8894         }
8895
8896         for (; i < limit; i++) {
8897                 u32 reg;
8898
8899                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8900                 tw32(reg, ec->rx_coalesce_usecs);
8901                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8902                 tw32(reg, ec->rx_max_coalesced_frames);
8903                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8904                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8905         }
8906
8907         for (; i < tp->irq_max - 1; i++) {
8908                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8909                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8910                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8911         }
8912 }
8913
8914 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8915 {
8916         tg3_coal_tx_init(tp, ec);
8917         tg3_coal_rx_init(tp, ec);
8918
8919         if (!tg3_flag(tp, 5705_PLUS)) {
8920                 u32 val = ec->stats_block_coalesce_usecs;
8921
8922                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8923                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8924
8925                 if (!tp->link_up)
8926                         val = 0;
8927
8928                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8929         }
8930 }
8931
8932 /* tp->lock is held. */
8933 static void tg3_rings_reset(struct tg3 *tp)
8934 {
8935         int i;
8936         u32 stblk, txrcb, rxrcb, limit;
8937         struct tg3_napi *tnapi = &tp->napi[0];
8938
8939         /* Disable all transmit rings but the first. */
8940         if (!tg3_flag(tp, 5705_PLUS))
8941                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8942         else if (tg3_flag(tp, 5717_PLUS))
8943                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8944         else if (tg3_flag(tp, 57765_CLASS) ||
8945                  tg3_asic_rev(tp) == ASIC_REV_5762)
8946                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8947         else
8948                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8949
8950         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8951              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8952                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8953                               BDINFO_FLAGS_DISABLED);
8954
8955
8956         /* Disable all receive return rings but the first. */
8957         if (tg3_flag(tp, 5717_PLUS))
8958                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8959         else if (!tg3_flag(tp, 5705_PLUS))
8960                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8961         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
8962                  tg3_asic_rev(tp) == ASIC_REV_5762 ||
8963                  tg3_flag(tp, 57765_CLASS))
8964                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8965         else
8966                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8967
8968         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8969              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8970                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8971                               BDINFO_FLAGS_DISABLED);
8972
8973         /* Disable interrupts */
8974         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8975         tp->napi[0].chk_msi_cnt = 0;
8976         tp->napi[0].last_rx_cons = 0;
8977         tp->napi[0].last_tx_cons = 0;
8978
8979         /* Zero mailbox registers. */
8980         if (tg3_flag(tp, SUPPORT_MSIX)) {
8981                 for (i = 1; i < tp->irq_max; i++) {
8982                         tp->napi[i].tx_prod = 0;
8983                         tp->napi[i].tx_cons = 0;
8984                         if (tg3_flag(tp, ENABLE_TSS))
8985                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8986                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8987                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8988                         tp->napi[i].chk_msi_cnt = 0;
8989                         tp->napi[i].last_rx_cons = 0;
8990                         tp->napi[i].last_tx_cons = 0;
8991                 }
8992                 if (!tg3_flag(tp, ENABLE_TSS))
8993                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8994         } else {
8995                 tp->napi[0].tx_prod = 0;
8996                 tp->napi[0].tx_cons = 0;
8997                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8998                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8999         }
9000
9001         /* Make sure the NIC-based send BD rings are disabled. */
9002         if (!tg3_flag(tp, 5705_PLUS)) {
9003                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9004                 for (i = 0; i < 16; i++)
9005                         tw32_tx_mbox(mbox + i * 8, 0);
9006         }
9007
9008         txrcb = NIC_SRAM_SEND_RCB;
9009         rxrcb = NIC_SRAM_RCV_RET_RCB;
9010
9011         /* Clear status block in ram. */
9012         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9013
9014         /* Set status block DMA address */
9015         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9016              ((u64) tnapi->status_mapping >> 32));
9017         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9018              ((u64) tnapi->status_mapping & 0xffffffff));
9019
9020         if (tnapi->tx_ring) {
9021                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9022                                (TG3_TX_RING_SIZE <<
9023                                 BDINFO_FLAGS_MAXLEN_SHIFT),
9024                                NIC_SRAM_TX_BUFFER_DESC);
9025                 txrcb += TG3_BDINFO_SIZE;
9026         }
9027
9028         if (tnapi->rx_rcb) {
9029                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9030                                (tp->rx_ret_ring_mask + 1) <<
9031                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9032                 rxrcb += TG3_BDINFO_SIZE;
9033         }
9034
9035         stblk = HOSTCC_STATBLCK_RING1;
9036
9037         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9038                 u64 mapping = (u64)tnapi->status_mapping;
9039                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9040                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9041
9042                 /* Clear status block in ram. */
9043                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9044
9045                 if (tnapi->tx_ring) {
9046                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9047                                        (TG3_TX_RING_SIZE <<
9048                                         BDINFO_FLAGS_MAXLEN_SHIFT),
9049                                        NIC_SRAM_TX_BUFFER_DESC);
9050                         txrcb += TG3_BDINFO_SIZE;
9051                 }
9052
9053                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9054                                ((tp->rx_ret_ring_mask + 1) <<
9055                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
9056
9057                 stblk += 8;
9058                 rxrcb += TG3_BDINFO_SIZE;
9059         }
9060 }
9061
9062 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9063 {
9064         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9065
9066         if (!tg3_flag(tp, 5750_PLUS) ||
9067             tg3_flag(tp, 5780_CLASS) ||
9068             tg3_asic_rev(tp) == ASIC_REV_5750 ||
9069             tg3_asic_rev(tp) == ASIC_REV_5752 ||
9070             tg3_flag(tp, 57765_PLUS))
9071                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9072         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9073                  tg3_asic_rev(tp) == ASIC_REV_5787)
9074                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9075         else
9076                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9077
9078         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9079         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9080
9081         val = min(nic_rep_thresh, host_rep_thresh);
9082         tw32(RCVBDI_STD_THRESH, val);
9083
9084         if (tg3_flag(tp, 57765_PLUS))
9085                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9086
9087         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9088                 return;
9089
9090         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9091
9092         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9093
9094         val = min(bdcache_maxcnt / 2, host_rep_thresh);
9095         tw32(RCVBDI_JUMBO_THRESH, val);
9096
9097         if (tg3_flag(tp, 57765_PLUS))
9098                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9099 }
9100
9101 static inline u32 calc_crc(unsigned char *buf, int len)
9102 {
9103         u32 reg;
9104         u32 tmp;
9105         int j, k;
9106
9107         reg = 0xffffffff;
9108
9109         for (j = 0; j < len; j++) {
9110                 reg ^= buf[j];
9111
9112                 for (k = 0; k < 8; k++) {
9113                         tmp = reg & 0x01;
9114
9115                         reg >>= 1;
9116
9117                         if (tmp)
9118                                 reg ^= 0xedb88320;
9119                 }
9120         }
9121
9122         return ~reg;
9123 }
9124
9125 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9126 {
9127         /* accept or reject all multicast frames */
9128         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9129         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9130         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9131         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9132 }
9133
9134 static void __tg3_set_rx_mode(struct net_device *dev)
9135 {
9136         struct tg3 *tp = netdev_priv(dev);
9137         u32 rx_mode;
9138
9139         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9140                                   RX_MODE_KEEP_VLAN_TAG);
9141
9142 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9143         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9144          * flag clear.
9145          */
9146         if (!tg3_flag(tp, ENABLE_ASF))
9147                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9148 #endif
9149
9150         if (dev->flags & IFF_PROMISC) {
9151                 /* Promiscuous mode. */
9152                 rx_mode |= RX_MODE_PROMISC;
9153         } else if (dev->flags & IFF_ALLMULTI) {
9154                 /* Accept all multicast. */
9155                 tg3_set_multi(tp, 1);
9156         } else if (netdev_mc_empty(dev)) {
9157                 /* Reject all multicast. */
9158                 tg3_set_multi(tp, 0);
9159         } else {
9160                 /* Accept one or more multicast(s). */
9161                 struct netdev_hw_addr *ha;
9162                 u32 mc_filter[4] = { 0, };
9163                 u32 regidx;
9164                 u32 bit;
9165                 u32 crc;
9166
9167                 netdev_for_each_mc_addr(ha, dev) {
9168                         crc = calc_crc(ha->addr, ETH_ALEN);
9169                         bit = ~crc & 0x7f;
9170                         regidx = (bit & 0x60) >> 5;
9171                         bit &= 0x1f;
9172                         mc_filter[regidx] |= (1 << bit);
9173                 }
9174
9175                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9176                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9177                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9178                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9179         }
9180
9181         if (rx_mode != tp->rx_mode) {
9182                 tp->rx_mode = rx_mode;
9183                 tw32_f(MAC_RX_MODE, rx_mode);
9184                 udelay(10);
9185         }
9186 }
9187
9188 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9189 {
9190         int i;
9191
9192         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9193                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9194 }
9195
9196 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9197 {
9198         int i;
9199
9200         if (!tg3_flag(tp, SUPPORT_MSIX))
9201                 return;
9202
9203         if (tp->rxq_cnt == 1) {
9204                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9205                 return;
9206         }
9207
9208         /* Validate table against current IRQ count */
9209         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9210                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9211                         break;
9212         }
9213
9214         if (i != TG3_RSS_INDIR_TBL_SIZE)
9215                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9216 }
9217
9218 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9219 {
9220         int i = 0;
9221         u32 reg = MAC_RSS_INDIR_TBL_0;
9222
9223         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9224                 u32 val = tp->rss_ind_tbl[i];
9225                 i++;
9226                 for (; i % 8; i++) {
9227                         val <<= 4;
9228                         val |= tp->rss_ind_tbl[i];
9229                 }
9230                 tw32(reg, val);
9231                 reg += 4;
9232         }
9233 }
9234
9235 /* tp->lock is held. */
9236 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
9237 {
9238         u32 val, rdmac_mode;
9239         int i, err, limit;
9240         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9241
9242         tg3_disable_ints(tp);
9243
9244         tg3_stop_fw(tp);
9245
9246         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9247
9248         if (tg3_flag(tp, INIT_COMPLETE))
9249                 tg3_abort_hw(tp, 1);
9250
9251         /* Enable MAC control of LPI */
9252         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
9253                 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
9254                       TG3_CPMU_EEE_LNKIDL_UART_IDL;
9255                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9256                         val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
9257
9258                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
9259
9260                 tw32_f(TG3_CPMU_EEE_CTRL,
9261                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9262
9263                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9264                       TG3_CPMU_EEEMD_LPI_IN_TX |
9265                       TG3_CPMU_EEEMD_LPI_IN_RX |
9266                       TG3_CPMU_EEEMD_EEE_ENABLE;
9267
9268                 if (tg3_asic_rev(tp) != ASIC_REV_5717)
9269                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9270
9271                 if (tg3_flag(tp, ENABLE_APE))
9272                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9273
9274                 tw32_f(TG3_CPMU_EEE_MODE, val);
9275
9276                 tw32_f(TG3_CPMU_EEE_DBTMR1,
9277                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9278                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9279
9280                 tw32_f(TG3_CPMU_EEE_DBTMR2,
9281                        TG3_CPMU_DBTMR2_APE_TX_2047US |
9282                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9283         }
9284
9285         if (reset_phy)
9286                 tg3_phy_reset(tp);
9287
9288         err = tg3_chip_reset(tp);
9289         if (err)
9290                 return err;
9291
9292         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9293
9294         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9295                 val = tr32(TG3_CPMU_CTRL);
9296                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9297                 tw32(TG3_CPMU_CTRL, val);
9298
9299                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9300                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9301                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9302                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9303
9304                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9305                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9306                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9307                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9308
9309                 val = tr32(TG3_CPMU_HST_ACC);
9310                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9311                 val |= CPMU_HST_ACC_MACCLK_6_25;
9312                 tw32(TG3_CPMU_HST_ACC, val);
9313         }
9314
9315         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9316                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9317                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9318                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9319                 tw32(PCIE_PWR_MGMT_THRESH, val);
9320
9321                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9322                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9323
9324                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9325
9326                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9327                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9328         }
9329
9330         if (tg3_flag(tp, L1PLLPD_EN)) {
9331                 u32 grc_mode = tr32(GRC_MODE);
9332
9333                 /* Access the lower 1K of PL PCIE block registers. */
9334                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9335                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9336
9337                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9338                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9339                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9340
9341                 tw32(GRC_MODE, grc_mode);
9342         }
9343
9344         if (tg3_flag(tp, 57765_CLASS)) {
9345                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9346                         u32 grc_mode = tr32(GRC_MODE);
9347
9348                         /* Access the lower 1K of PL PCIE block registers. */
9349                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9350                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9351
9352                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9353                                    TG3_PCIE_PL_LO_PHYCTL5);
9354                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9355                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9356
9357                         tw32(GRC_MODE, grc_mode);
9358                 }
9359
9360                 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9361                         u32 grc_mode;
9362
9363                         /* Fix transmit hangs */
9364                         val = tr32(TG3_CPMU_PADRNG_CTL);
9365                         val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9366                         tw32(TG3_CPMU_PADRNG_CTL, val);
9367
9368                         grc_mode = tr32(GRC_MODE);
9369
9370                         /* Access the lower 1K of DL PCIE block registers. */
9371                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9372                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9373
9374                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9375                                    TG3_PCIE_DL_LO_FTSMAX);
9376                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9377                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9378                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9379
9380                         tw32(GRC_MODE, grc_mode);
9381                 }
9382
9383                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9384                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9385                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9386                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9387         }
9388
9389         /* This works around an issue with Athlon chipsets on
9390          * B3 tigon3 silicon.  This bit has no effect on any
9391          * other revision.  But do not set this on PCI Express
9392          * chips and don't even touch the clocks if the CPMU is present.
9393          */
9394         if (!tg3_flag(tp, CPMU_PRESENT)) {
9395                 if (!tg3_flag(tp, PCI_EXPRESS))
9396                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9397                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9398         }
9399
9400         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9401             tg3_flag(tp, PCIX_MODE)) {
9402                 val = tr32(TG3PCI_PCISTATE);
9403                 val |= PCISTATE_RETRY_SAME_DMA;
9404                 tw32(TG3PCI_PCISTATE, val);
9405         }
9406
9407         if (tg3_flag(tp, ENABLE_APE)) {
9408                 /* Allow reads and writes to the
9409                  * APE register and memory space.
9410                  */
9411                 val = tr32(TG3PCI_PCISTATE);
9412                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9413                        PCISTATE_ALLOW_APE_SHMEM_WR |
9414                        PCISTATE_ALLOW_APE_PSPACE_WR;
9415                 tw32(TG3PCI_PCISTATE, val);
9416         }
9417
9418         if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9419                 /* Enable some hw fixes.  */
9420                 val = tr32(TG3PCI_MSI_DATA);
9421                 val |= (1 << 26) | (1 << 28) | (1 << 29);
9422                 tw32(TG3PCI_MSI_DATA, val);
9423         }
9424
9425         /* Descriptor ring init may make accesses to the
9426          * NIC SRAM area to setup the TX descriptors, so we
9427          * can only do this after the hardware has been
9428          * successfully reset.
9429          */
9430         err = tg3_init_rings(tp);
9431         if (err)
9432                 return err;
9433
9434         if (tg3_flag(tp, 57765_PLUS)) {
9435                 val = tr32(TG3PCI_DMA_RW_CTRL) &
9436                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9437                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9438                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9439                 if (!tg3_flag(tp, 57765_CLASS) &&
9440                     tg3_asic_rev(tp) != ASIC_REV_5717 &&
9441                     tg3_asic_rev(tp) != ASIC_REV_5762)
9442                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
9443                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9444         } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9445                    tg3_asic_rev(tp) != ASIC_REV_5761) {
9446                 /* This value is determined during the probe time DMA
9447                  * engine test, tg3_test_dma.
9448                  */
9449                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9450         }
9451
9452         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9453                           GRC_MODE_4X_NIC_SEND_RINGS |
9454                           GRC_MODE_NO_TX_PHDR_CSUM |
9455                           GRC_MODE_NO_RX_PHDR_CSUM);
9456         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9457
9458         /* Pseudo-header checksum is done by hardware logic and not
9459          * the offload processers, so make the chip do the pseudo-
9460          * header checksums on receive.  For transmit it is more
9461          * convenient to do the pseudo-header checksum in software
9462          * as Linux does that on transmit for us in all cases.
9463          */
9464         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9465
9466         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9467         if (tp->rxptpctl)
9468                 tw32(TG3_RX_PTP_CTL,
9469                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9470
9471         if (tg3_flag(tp, PTP_CAPABLE))
9472                 val |= GRC_MODE_TIME_SYNC_ENABLE;
9473
9474         tw32(GRC_MODE, tp->grc_mode | val);
9475
9476         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
9477         val = tr32(GRC_MISC_CFG);
9478         val &= ~0xff;
9479         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9480         tw32(GRC_MISC_CFG, val);
9481
9482         /* Initialize MBUF/DESC pool. */
9483         if (tg3_flag(tp, 5750_PLUS)) {
9484                 /* Do nothing.  */
9485         } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9486                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9487                 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9488                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9489                 else
9490                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9491                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9492                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9493         } else if (tg3_flag(tp, TSO_CAPABLE)) {
9494                 int fw_len;
9495
9496                 fw_len = tp->fw_len;
9497                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9498                 tw32(BUFMGR_MB_POOL_ADDR,
9499                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9500                 tw32(BUFMGR_MB_POOL_SIZE,
9501                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9502         }
9503
9504         if (tp->dev->mtu <= ETH_DATA_LEN) {
9505                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9506                      tp->bufmgr_config.mbuf_read_dma_low_water);
9507                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9508                      tp->bufmgr_config.mbuf_mac_rx_low_water);
9509                 tw32(BUFMGR_MB_HIGH_WATER,
9510                      tp->bufmgr_config.mbuf_high_water);
9511         } else {
9512                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9513                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9514                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9515                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9516                 tw32(BUFMGR_MB_HIGH_WATER,
9517                      tp->bufmgr_config.mbuf_high_water_jumbo);
9518         }
9519         tw32(BUFMGR_DMA_LOW_WATER,
9520              tp->bufmgr_config.dma_low_water);
9521         tw32(BUFMGR_DMA_HIGH_WATER,
9522              tp->bufmgr_config.dma_high_water);
9523
9524         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9525         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9526                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9527         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9528             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9529             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9530                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9531         tw32(BUFMGR_MODE, val);
9532         for (i = 0; i < 2000; i++) {
9533                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9534                         break;
9535                 udelay(10);
9536         }
9537         if (i >= 2000) {
9538                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9539                 return -ENODEV;
9540         }
9541
9542         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9543                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9544
9545         tg3_setup_rxbd_thresholds(tp);
9546
9547         /* Initialize TG3_BDINFO's at:
9548          *  RCVDBDI_STD_BD:     standard eth size rx ring
9549          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
9550          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
9551          *
9552          * like so:
9553          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
9554          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
9555          *                              ring attribute flags
9556          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
9557          *
9558          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9559          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9560          *
9561          * The size of each ring is fixed in the firmware, but the location is
9562          * configurable.
9563          */
9564         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9565              ((u64) tpr->rx_std_mapping >> 32));
9566         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9567              ((u64) tpr->rx_std_mapping & 0xffffffff));
9568         if (!tg3_flag(tp, 5717_PLUS))
9569                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9570                      NIC_SRAM_RX_BUFFER_DESC);
9571
9572         /* Disable the mini ring */
9573         if (!tg3_flag(tp, 5705_PLUS))
9574                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9575                      BDINFO_FLAGS_DISABLED);
9576
9577         /* Program the jumbo buffer descriptor ring control
9578          * blocks on those devices that have them.
9579          */
9580         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9581             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9582
9583                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9584                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9585                              ((u64) tpr->rx_jmb_mapping >> 32));
9586                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9587                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9588                         val = TG3_RX_JMB_RING_SIZE(tp) <<
9589                               BDINFO_FLAGS_MAXLEN_SHIFT;
9590                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9591                              val | BDINFO_FLAGS_USE_EXT_RECV);
9592                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9593                             tg3_flag(tp, 57765_CLASS) ||
9594                             tg3_asic_rev(tp) == ASIC_REV_5762)
9595                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9596                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9597                 } else {
9598                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9599                              BDINFO_FLAGS_DISABLED);
9600                 }
9601
9602                 if (tg3_flag(tp, 57765_PLUS)) {
9603                         val = TG3_RX_STD_RING_SIZE(tp);
9604                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9605                         val |= (TG3_RX_STD_DMA_SZ << 2);
9606                 } else
9607                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9608         } else
9609                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9610
9611         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9612
9613         tpr->rx_std_prod_idx = tp->rx_pending;
9614         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9615
9616         tpr->rx_jmb_prod_idx =
9617                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9618         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9619
9620         tg3_rings_reset(tp);
9621
9622         /* Initialize MAC address and backoff seed. */
9623         __tg3_set_mac_addr(tp, 0);
9624
9625         /* MTU + ethernet header + FCS + optional VLAN tag */
9626         tw32(MAC_RX_MTU_SIZE,
9627              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9628
9629         /* The slot time is changed by tg3_setup_phy if we
9630          * run at gigabit with half duplex.
9631          */
9632         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9633               (6 << TX_LENGTHS_IPG_SHIFT) |
9634               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9635
9636         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9637             tg3_asic_rev(tp) == ASIC_REV_5762)
9638                 val |= tr32(MAC_TX_LENGTHS) &
9639                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
9640                         TX_LENGTHS_CNT_DWN_VAL_MSK);
9641
9642         tw32(MAC_TX_LENGTHS, val);
9643
9644         /* Receive rules. */
9645         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9646         tw32(RCVLPC_CONFIG, 0x0181);
9647
9648         /* Calculate RDMAC_MODE setting early, we need it to determine
9649          * the RCVLPC_STATE_ENABLE mask.
9650          */
9651         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9652                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9653                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9654                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9655                       RDMAC_MODE_LNGREAD_ENAB);
9656
9657         if (tg3_asic_rev(tp) == ASIC_REV_5717)
9658                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9659
9660         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
9661             tg3_asic_rev(tp) == ASIC_REV_5785 ||
9662             tg3_asic_rev(tp) == ASIC_REV_57780)
9663                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9664                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9665                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9666
9667         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9668             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9669                 if (tg3_flag(tp, TSO_CAPABLE) &&
9670                     tg3_asic_rev(tp) == ASIC_REV_5705) {
9671                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9672                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9673                            !tg3_flag(tp, IS_5788)) {
9674                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9675                 }
9676         }
9677
9678         if (tg3_flag(tp, PCI_EXPRESS))
9679                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9680
9681         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9682                 tp->dma_limit = 0;
9683                 if (tp->dev->mtu <= ETH_DATA_LEN) {
9684                         rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
9685                         tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
9686                 }
9687         }
9688
9689         if (tg3_flag(tp, HW_TSO_1) ||
9690             tg3_flag(tp, HW_TSO_2) ||
9691             tg3_flag(tp, HW_TSO_3))
9692                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9693
9694         if (tg3_flag(tp, 57765_PLUS) ||
9695             tg3_asic_rev(tp) == ASIC_REV_5785 ||
9696             tg3_asic_rev(tp) == ASIC_REV_57780)
9697                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9698
9699         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9700             tg3_asic_rev(tp) == ASIC_REV_5762)
9701                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9702
9703         if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
9704             tg3_asic_rev(tp) == ASIC_REV_5784 ||
9705             tg3_asic_rev(tp) == ASIC_REV_5785 ||
9706             tg3_asic_rev(tp) == ASIC_REV_57780 ||
9707             tg3_flag(tp, 57765_PLUS)) {
9708                 u32 tgtreg;
9709
9710                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9711                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
9712                 else
9713                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
9714
9715                 val = tr32(tgtreg);
9716                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9717                     tg3_asic_rev(tp) == ASIC_REV_5762) {
9718                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9719                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9720                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9721                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9722                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9723                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9724                 }
9725                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9726         }
9727
9728         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
9729             tg3_asic_rev(tp) == ASIC_REV_5720 ||
9730             tg3_asic_rev(tp) == ASIC_REV_5762) {
9731                 u32 tgtreg;
9732
9733                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9734                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
9735                 else
9736                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
9737
9738                 val = tr32(tgtreg);
9739                 tw32(tgtreg, val |
9740                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9741                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9742         }
9743
9744         /* Receive/send statistics. */
9745         if (tg3_flag(tp, 5750_PLUS)) {
9746                 val = tr32(RCVLPC_STATS_ENABLE);
9747                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9748                 tw32(RCVLPC_STATS_ENABLE, val);
9749         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9750                    tg3_flag(tp, TSO_CAPABLE)) {
9751                 val = tr32(RCVLPC_STATS_ENABLE);
9752                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9753                 tw32(RCVLPC_STATS_ENABLE, val);
9754         } else {
9755                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9756         }
9757         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9758         tw32(SNDDATAI_STATSENAB, 0xffffff);
9759         tw32(SNDDATAI_STATSCTRL,
9760              (SNDDATAI_SCTRL_ENABLE |
9761               SNDDATAI_SCTRL_FASTUPD));
9762
9763         /* Setup host coalescing engine. */
9764         tw32(HOSTCC_MODE, 0);
9765         for (i = 0; i < 2000; i++) {
9766                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9767                         break;
9768                 udelay(10);
9769         }
9770
9771         __tg3_set_coalesce(tp, &tp->coal);
9772
9773         if (!tg3_flag(tp, 5705_PLUS)) {
9774                 /* Status/statistics block address.  See tg3_timer,
9775                  * the tg3_periodic_fetch_stats call there, and
9776                  * tg3_get_stats to see how this works for 5705/5750 chips.
9777                  */
9778                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9779                      ((u64) tp->stats_mapping >> 32));
9780                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9781                      ((u64) tp->stats_mapping & 0xffffffff));
9782                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9783
9784                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9785
9786                 /* Clear statistics and status block memory areas */
9787                 for (i = NIC_SRAM_STATS_BLK;
9788                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9789                      i += sizeof(u32)) {
9790                         tg3_write_mem(tp, i, 0);
9791                         udelay(40);
9792                 }
9793         }
9794
9795         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9796
9797         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9798         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9799         if (!tg3_flag(tp, 5705_PLUS))
9800                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9801
9802         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9803                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9804                 /* reset to prevent losing 1st rx packet intermittently */
9805                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9806                 udelay(10);
9807         }
9808
9809         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9810                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9811                         MAC_MODE_FHDE_ENABLE;
9812         if (tg3_flag(tp, ENABLE_APE))
9813                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9814         if (!tg3_flag(tp, 5705_PLUS) &&
9815             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9816             tg3_asic_rev(tp) != ASIC_REV_5700)
9817                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9818         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9819         udelay(40);
9820
9821         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9822          * If TG3_FLAG_IS_NIC is zero, we should read the
9823          * register to preserve the GPIO settings for LOMs. The GPIOs,
9824          * whether used as inputs or outputs, are set by boot code after
9825          * reset.
9826          */
9827         if (!tg3_flag(tp, IS_NIC)) {
9828                 u32 gpio_mask;
9829
9830                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9831                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9832                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9833
9834                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
9835                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9836                                      GRC_LCLCTRL_GPIO_OUTPUT3;
9837
9838                 if (tg3_asic_rev(tp) == ASIC_REV_5755)
9839                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9840
9841                 tp->grc_local_ctrl &= ~gpio_mask;
9842                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9843
9844                 /* GPIO1 must be driven high for eeprom write protect */
9845                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9846                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9847                                                GRC_LCLCTRL_GPIO_OUTPUT1);
9848         }
9849         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9850         udelay(100);
9851
9852         if (tg3_flag(tp, USING_MSIX)) {
9853                 val = tr32(MSGINT_MODE);
9854                 val |= MSGINT_MODE_ENABLE;
9855                 if (tp->irq_cnt > 1)
9856                         val |= MSGINT_MODE_MULTIVEC_EN;
9857                 if (!tg3_flag(tp, 1SHOT_MSI))
9858                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9859                 tw32(MSGINT_MODE, val);
9860         }
9861
9862         if (!tg3_flag(tp, 5705_PLUS)) {
9863                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9864                 udelay(40);
9865         }
9866
9867         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9868                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9869                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9870                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9871                WDMAC_MODE_LNGREAD_ENAB);
9872
9873         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9874             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9875                 if (tg3_flag(tp, TSO_CAPABLE) &&
9876                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
9877                      tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
9878                         /* nothing */
9879                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9880                            !tg3_flag(tp, IS_5788)) {
9881                         val |= WDMAC_MODE_RX_ACCEL;
9882                 }
9883         }
9884
9885         /* Enable host coalescing bug fix */
9886         if (tg3_flag(tp, 5755_PLUS))
9887                 val |= WDMAC_MODE_STATUS_TAG_FIX;
9888
9889         if (tg3_asic_rev(tp) == ASIC_REV_5785)
9890                 val |= WDMAC_MODE_BURST_ALL_DATA;
9891
9892         tw32_f(WDMAC_MODE, val);
9893         udelay(40);
9894
9895         if (tg3_flag(tp, PCIX_MODE)) {
9896                 u16 pcix_cmd;
9897
9898                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9899                                      &pcix_cmd);
9900                 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
9901                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9902                         pcix_cmd |= PCI_X_CMD_READ_2K;
9903                 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
9904                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9905                         pcix_cmd |= PCI_X_CMD_READ_2K;
9906                 }
9907                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9908                                       pcix_cmd);
9909         }
9910
9911         tw32_f(RDMAC_MODE, rdmac_mode);
9912         udelay(40);
9913
9914         if (tg3_asic_rev(tp) == ASIC_REV_5719) {
9915                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9916                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9917                                 break;
9918                 }
9919                 if (i < TG3_NUM_RDMA_CHANNELS) {
9920                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9921                         val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9922                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9923                         tg3_flag_set(tp, 5719_RDMA_BUG);
9924                 }
9925         }
9926
9927         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9928         if (!tg3_flag(tp, 5705_PLUS))
9929                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9930
9931         if (tg3_asic_rev(tp) == ASIC_REV_5761)
9932                 tw32(SNDDATAC_MODE,
9933                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9934         else
9935                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9936
9937         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9938         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9939         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9940         if (tg3_flag(tp, LRG_PROD_RING_CAP))
9941                 val |= RCVDBDI_MODE_LRG_RING_SZ;
9942         tw32(RCVDBDI_MODE, val);
9943         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9944         if (tg3_flag(tp, HW_TSO_1) ||
9945             tg3_flag(tp, HW_TSO_2) ||
9946             tg3_flag(tp, HW_TSO_3))
9947                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9948         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9949         if (tg3_flag(tp, ENABLE_TSS))
9950                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9951         tw32(SNDBDI_MODE, val);
9952         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9953
9954         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
9955                 err = tg3_load_5701_a0_firmware_fix(tp);
9956                 if (err)
9957                         return err;
9958         }
9959
9960         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9961                 /* Ignore any errors for the firmware download. If download
9962                  * fails, the device will operate with EEE disabled
9963                  */
9964                 tg3_load_57766_firmware(tp);
9965         }
9966
9967         if (tg3_flag(tp, TSO_CAPABLE)) {
9968                 err = tg3_load_tso_firmware(tp);
9969                 if (err)
9970                         return err;
9971         }
9972
9973         tp->tx_mode = TX_MODE_ENABLE;
9974
9975         if (tg3_flag(tp, 5755_PLUS) ||
9976             tg3_asic_rev(tp) == ASIC_REV_5906)
9977                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9978
9979         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9980             tg3_asic_rev(tp) == ASIC_REV_5762) {
9981                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9982                 tp->tx_mode &= ~val;
9983                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9984         }
9985
9986         tw32_f(MAC_TX_MODE, tp->tx_mode);
9987         udelay(100);
9988
9989         if (tg3_flag(tp, ENABLE_RSS)) {
9990                 tg3_rss_write_indir_tbl(tp);
9991
9992                 /* Setup the "secret" hash key. */
9993                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9994                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9995                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9996                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9997                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9998                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9999                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
10000                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
10001                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
10002                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
10003         }
10004
10005         tp->rx_mode = RX_MODE_ENABLE;
10006         if (tg3_flag(tp, 5755_PLUS))
10007                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10008
10009         if (tg3_flag(tp, ENABLE_RSS))
10010                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10011                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
10012                                RX_MODE_RSS_IPV6_HASH_EN |
10013                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
10014                                RX_MODE_RSS_IPV4_HASH_EN |
10015                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
10016
10017         tw32_f(MAC_RX_MODE, tp->rx_mode);
10018         udelay(10);
10019
10020         tw32(MAC_LED_CTRL, tp->led_ctrl);
10021
10022         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10023         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10024                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10025                 udelay(10);
10026         }
10027         tw32_f(MAC_RX_MODE, tp->rx_mode);
10028         udelay(10);
10029
10030         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10031                 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10032                     !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10033                         /* Set drive transmission level to 1.2V  */
10034                         /* only if the signal pre-emphasis bit is not set  */
10035                         val = tr32(MAC_SERDES_CFG);
10036                         val &= 0xfffff000;
10037                         val |= 0x880;
10038                         tw32(MAC_SERDES_CFG, val);
10039                 }
10040                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10041                         tw32(MAC_SERDES_CFG, 0x616000);
10042         }
10043
10044         /* Prevent chip from dropping frames when flow control
10045          * is enabled.
10046          */
10047         if (tg3_flag(tp, 57765_CLASS))
10048                 val = 1;
10049         else
10050                 val = 2;
10051         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10052
10053         if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10054             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10055                 /* Use hardware link auto-negotiation */
10056                 tg3_flag_set(tp, HW_AUTONEG);
10057         }
10058
10059         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10060             tg3_asic_rev(tp) == ASIC_REV_5714) {
10061                 u32 tmp;
10062
10063                 tmp = tr32(SERDES_RX_CTRL);
10064                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10065                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10066                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10067                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10068         }
10069
10070         if (!tg3_flag(tp, USE_PHYLIB)) {
10071                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10072                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10073
10074                 err = tg3_setup_phy(tp, 0);
10075                 if (err)
10076                         return err;
10077
10078                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10079                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10080                         u32 tmp;
10081
10082                         /* Clear CRC stats. */
10083                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10084                                 tg3_writephy(tp, MII_TG3_TEST1,
10085                                              tmp | MII_TG3_TEST1_CRC_EN);
10086                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10087                         }
10088                 }
10089         }
10090
10091         __tg3_set_rx_mode(tp->dev);
10092
10093         /* Initialize receive rules. */
10094         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10095         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10096         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10097         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10098
10099         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10100                 limit = 8;
10101         else
10102                 limit = 16;
10103         if (tg3_flag(tp, ENABLE_ASF))
10104                 limit -= 4;
10105         switch (limit) {
10106         case 16:
10107                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10108         case 15:
10109                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10110         case 14:
10111                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10112         case 13:
10113                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10114         case 12:
10115                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10116         case 11:
10117                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10118         case 10:
10119                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10120         case 9:
10121                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10122         case 8:
10123                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10124         case 7:
10125                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10126         case 6:
10127                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10128         case 5:
10129                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10130         case 4:
10131                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10132         case 3:
10133                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10134         case 2:
10135         case 1:
10136
10137         default:
10138                 break;
10139         }
10140
10141         if (tg3_flag(tp, ENABLE_APE))
10142                 /* Write our heartbeat update interval to APE. */
10143                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10144                                 APE_HOST_HEARTBEAT_INT_DISABLE);
10145
10146         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10147
10148         return 0;
10149 }
10150
10151 /* Called at device open time to get the chip ready for
10152  * packet processing.  Invoked with tp->lock held.
10153  */
10154 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
10155 {
10156         tg3_switch_clocks(tp);
10157
10158         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10159
10160         return tg3_reset_hw(tp, reset_phy);
10161 }
10162
10163 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10164 {
10165         int i;
10166
10167         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10168                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10169
10170                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10171                 off += len;
10172
10173                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10174                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10175                         memset(ocir, 0, TG3_OCIR_LEN);
10176         }
10177 }
10178
10179 /* sysfs attributes for hwmon */
10180 static ssize_t tg3_show_temp(struct device *dev,
10181                              struct device_attribute *devattr, char *buf)
10182 {
10183         struct pci_dev *pdev = to_pci_dev(dev);
10184         struct net_device *netdev = pci_get_drvdata(pdev);
10185         struct tg3 *tp = netdev_priv(netdev);
10186         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10187         u32 temperature;
10188
10189         spin_lock_bh(&tp->lock);
10190         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10191                                 sizeof(temperature));
10192         spin_unlock_bh(&tp->lock);
10193         return sprintf(buf, "%u\n", temperature);
10194 }
10195
10196
10197 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10198                           TG3_TEMP_SENSOR_OFFSET);
10199 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10200                           TG3_TEMP_CAUTION_OFFSET);
10201 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10202                           TG3_TEMP_MAX_OFFSET);
10203
10204 static struct attribute *tg3_attributes[] = {
10205         &sensor_dev_attr_temp1_input.dev_attr.attr,
10206         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10207         &sensor_dev_attr_temp1_max.dev_attr.attr,
10208         NULL
10209 };
10210
10211 static const struct attribute_group tg3_group = {
10212         .attrs = tg3_attributes,
10213 };
10214
10215 static void tg3_hwmon_close(struct tg3 *tp)
10216 {
10217         if (tp->hwmon_dev) {
10218                 hwmon_device_unregister(tp->hwmon_dev);
10219                 tp->hwmon_dev = NULL;
10220                 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10221         }
10222 }
10223
10224 static void tg3_hwmon_open(struct tg3 *tp)
10225 {
10226         int i, err;
10227         u32 size = 0;
10228         struct pci_dev *pdev = tp->pdev;
10229         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10230
10231         tg3_sd_scan_scratchpad(tp, ocirs);
10232
10233         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10234                 if (!ocirs[i].src_data_length)
10235                         continue;
10236
10237                 size += ocirs[i].src_hdr_length;
10238                 size += ocirs[i].src_data_length;
10239         }
10240
10241         if (!size)
10242                 return;
10243
10244         /* Register hwmon sysfs hooks */
10245         err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10246         if (err) {
10247                 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10248                 return;
10249         }
10250
10251         tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10252         if (IS_ERR(tp->hwmon_dev)) {
10253                 tp->hwmon_dev = NULL;
10254                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10255                 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10256         }
10257 }
10258
10259
10260 #define TG3_STAT_ADD32(PSTAT, REG) \
10261 do {    u32 __val = tr32(REG); \
10262         (PSTAT)->low += __val; \
10263         if ((PSTAT)->low < __val) \
10264                 (PSTAT)->high += 1; \
10265 } while (0)
10266
10267 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10268 {
10269         struct tg3_hw_stats *sp = tp->hw_stats;
10270
10271         if (!tp->link_up)
10272                 return;
10273
10274         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10275         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10276         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10277         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10278         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10279         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10280         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10281         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10282         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10283         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10284         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10285         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10286         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10287         if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
10288                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10289                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10290                 u32 val;
10291
10292                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10293                 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
10294                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10295                 tg3_flag_clear(tp, 5719_RDMA_BUG);
10296         }
10297
10298         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10299         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10300         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10301         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10302         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10303         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10304         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10305         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10306         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10307         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10308         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10309         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10310         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10311         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10312
10313         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10314         if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10315             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10316             tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10317                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10318         } else {
10319                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10320                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10321                 if (val) {
10322                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10323                         sp->rx_discards.low += val;
10324                         if (sp->rx_discards.low < val)
10325                                 sp->rx_discards.high += 1;
10326                 }
10327                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10328         }
10329         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10330 }
10331
10332 static void tg3_chk_missed_msi(struct tg3 *tp)
10333 {
10334         u32 i;
10335
10336         for (i = 0; i < tp->irq_cnt; i++) {
10337                 struct tg3_napi *tnapi = &tp->napi[i];
10338
10339                 if (tg3_has_work(tnapi)) {
10340                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10341                             tnapi->last_tx_cons == tnapi->tx_cons) {
10342                                 if (tnapi->chk_msi_cnt < 1) {
10343                                         tnapi->chk_msi_cnt++;
10344                                         return;
10345                                 }
10346                                 tg3_msi(0, tnapi);
10347                         }
10348                 }
10349                 tnapi->chk_msi_cnt = 0;
10350                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10351                 tnapi->last_tx_cons = tnapi->tx_cons;
10352         }
10353 }
10354
10355 static void tg3_timer(unsigned long __opaque)
10356 {
10357         struct tg3 *tp = (struct tg3 *) __opaque;
10358
10359         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10360                 goto restart_timer;
10361
10362         spin_lock(&tp->lock);
10363
10364         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10365             tg3_flag(tp, 57765_CLASS))
10366                 tg3_chk_missed_msi(tp);
10367
10368         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10369                 /* BCM4785: Flush posted writes from GbE to host memory. */
10370                 tr32(HOSTCC_MODE);
10371         }
10372
10373         if (!tg3_flag(tp, TAGGED_STATUS)) {
10374                 /* All of this garbage is because when using non-tagged
10375                  * IRQ status the mailbox/status_block protocol the chip
10376                  * uses with the cpu is race prone.
10377                  */
10378                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10379                         tw32(GRC_LOCAL_CTRL,
10380                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10381                 } else {
10382                         tw32(HOSTCC_MODE, tp->coalesce_mode |
10383                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10384                 }
10385
10386                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10387                         spin_unlock(&tp->lock);
10388                         tg3_reset_task_schedule(tp);
10389                         goto restart_timer;
10390                 }
10391         }
10392
10393         /* This part only runs once per second. */
10394         if (!--tp->timer_counter) {
10395                 if (tg3_flag(tp, 5705_PLUS))
10396                         tg3_periodic_fetch_stats(tp);
10397
10398                 if (tp->setlpicnt && !--tp->setlpicnt)
10399                         tg3_phy_eee_enable(tp);
10400
10401                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10402                         u32 mac_stat;
10403                         int phy_event;
10404
10405                         mac_stat = tr32(MAC_STATUS);
10406
10407                         phy_event = 0;
10408                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10409                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10410                                         phy_event = 1;
10411                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10412                                 phy_event = 1;
10413
10414                         if (phy_event)
10415                                 tg3_setup_phy(tp, 0);
10416                 } else if (tg3_flag(tp, POLL_SERDES)) {
10417                         u32 mac_stat = tr32(MAC_STATUS);
10418                         int need_setup = 0;
10419
10420                         if (tp->link_up &&
10421                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10422                                 need_setup = 1;
10423                         }
10424                         if (!tp->link_up &&
10425                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
10426                                          MAC_STATUS_SIGNAL_DET))) {
10427                                 need_setup = 1;
10428                         }
10429                         if (need_setup) {
10430                                 if (!tp->serdes_counter) {
10431                                         tw32_f(MAC_MODE,
10432                                              (tp->mac_mode &
10433                                               ~MAC_MODE_PORT_MODE_MASK));
10434                                         udelay(40);
10435                                         tw32_f(MAC_MODE, tp->mac_mode);
10436                                         udelay(40);
10437                                 }
10438                                 tg3_setup_phy(tp, 0);
10439                         }
10440                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10441                            tg3_flag(tp, 5780_CLASS)) {
10442                         tg3_serdes_parallel_detect(tp);
10443                 }
10444
10445                 tp->timer_counter = tp->timer_multiplier;
10446         }
10447
10448         /* Heartbeat is only sent once every 2 seconds.
10449          *
10450          * The heartbeat is to tell the ASF firmware that the host
10451          * driver is still alive.  In the event that the OS crashes,
10452          * ASF needs to reset the hardware to free up the FIFO space
10453          * that may be filled with rx packets destined for the host.
10454          * If the FIFO is full, ASF will no longer function properly.
10455          *
10456          * Unintended resets have been reported on real time kernels
10457          * where the timer doesn't run on time.  Netpoll will also have
10458          * same problem.
10459          *
10460          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10461          * to check the ring condition when the heartbeat is expiring
10462          * before doing the reset.  This will prevent most unintended
10463          * resets.
10464          */
10465         if (!--tp->asf_counter) {
10466                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10467                         tg3_wait_for_event_ack(tp);
10468
10469                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10470                                       FWCMD_NICDRV_ALIVE3);
10471                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10472                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10473                                       TG3_FW_UPDATE_TIMEOUT_SEC);
10474
10475                         tg3_generate_fw_event(tp);
10476                 }
10477                 tp->asf_counter = tp->asf_multiplier;
10478         }
10479
10480         spin_unlock(&tp->lock);
10481
10482 restart_timer:
10483         tp->timer.expires = jiffies + tp->timer_offset;
10484         add_timer(&tp->timer);
10485 }
10486
10487 static void tg3_timer_init(struct tg3 *tp)
10488 {
10489         if (tg3_flag(tp, TAGGED_STATUS) &&
10490             tg3_asic_rev(tp) != ASIC_REV_5717 &&
10491             !tg3_flag(tp, 57765_CLASS))
10492                 tp->timer_offset = HZ;
10493         else
10494                 tp->timer_offset = HZ / 10;
10495
10496         BUG_ON(tp->timer_offset > HZ);
10497
10498         tp->timer_multiplier = (HZ / tp->timer_offset);
10499         tp->asf_multiplier = (HZ / tp->timer_offset) *
10500                              TG3_FW_UPDATE_FREQ_SEC;
10501
10502         init_timer(&tp->timer);
10503         tp->timer.data = (unsigned long) tp;
10504         tp->timer.function = tg3_timer;
10505 }
10506
10507 static void tg3_timer_start(struct tg3 *tp)
10508 {
10509         tp->asf_counter   = tp->asf_multiplier;
10510         tp->timer_counter = tp->timer_multiplier;
10511
10512         tp->timer.expires = jiffies + tp->timer_offset;
10513         add_timer(&tp->timer);
10514 }
10515
10516 static void tg3_timer_stop(struct tg3 *tp)
10517 {
10518         del_timer_sync(&tp->timer);
10519 }
10520
10521 /* Restart hardware after configuration changes, self-test, etc.
10522  * Invoked with tp->lock held.
10523  */
10524 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
10525         __releases(tp->lock)
10526         __acquires(tp->lock)
10527 {
10528         int err;
10529
10530         err = tg3_init_hw(tp, reset_phy);
10531         if (err) {
10532                 netdev_err(tp->dev,
10533                            "Failed to re-initialize device, aborting\n");
10534                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10535                 tg3_full_unlock(tp);
10536                 tg3_timer_stop(tp);
10537                 tp->irq_sync = 0;
10538                 tg3_napi_enable(tp);
10539                 dev_close(tp->dev);
10540                 tg3_full_lock(tp, 0);
10541         }
10542         return err;
10543 }
10544
10545 static void tg3_reset_task(struct work_struct *work)
10546 {
10547         struct tg3 *tp = container_of(work, struct tg3, reset_task);
10548         int err;
10549
10550         tg3_full_lock(tp, 0);
10551
10552         if (!netif_running(tp->dev)) {
10553                 tg3_flag_clear(tp, RESET_TASK_PENDING);
10554                 tg3_full_unlock(tp);
10555                 return;
10556         }
10557
10558         tg3_full_unlock(tp);
10559
10560         tg3_phy_stop(tp);
10561
10562         tg3_netif_stop(tp);
10563
10564         tg3_full_lock(tp, 1);
10565
10566         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10567                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10568                 tp->write32_rx_mbox = tg3_write_flush_reg32;
10569                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10570                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10571         }
10572
10573         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10574         err = tg3_init_hw(tp, 1);
10575         if (err)
10576                 goto out;
10577
10578         tg3_netif_start(tp);
10579
10580 out:
10581         tg3_full_unlock(tp);
10582
10583         if (!err)
10584                 tg3_phy_start(tp);
10585
10586         tg3_flag_clear(tp, RESET_TASK_PENDING);
10587 }
10588
10589 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10590 {
10591         irq_handler_t fn;
10592         unsigned long flags;
10593         char *name;
10594         struct tg3_napi *tnapi = &tp->napi[irq_num];
10595
10596         if (tp->irq_cnt == 1)
10597                 name = tp->dev->name;
10598         else {
10599                 name = &tnapi->irq_lbl[0];
10600                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10601                 name[IFNAMSIZ-1] = 0;
10602         }
10603
10604         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10605                 fn = tg3_msi;
10606                 if (tg3_flag(tp, 1SHOT_MSI))
10607                         fn = tg3_msi_1shot;
10608                 flags = 0;
10609         } else {
10610                 fn = tg3_interrupt;
10611                 if (tg3_flag(tp, TAGGED_STATUS))
10612                         fn = tg3_interrupt_tagged;
10613                 flags = IRQF_SHARED;
10614         }
10615
10616         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10617 }
10618
10619 static int tg3_test_interrupt(struct tg3 *tp)
10620 {
10621         struct tg3_napi *tnapi = &tp->napi[0];
10622         struct net_device *dev = tp->dev;
10623         int err, i, intr_ok = 0;
10624         u32 val;
10625
10626         if (!netif_running(dev))
10627                 return -ENODEV;
10628
10629         tg3_disable_ints(tp);
10630
10631         free_irq(tnapi->irq_vec, tnapi);
10632
10633         /*
10634          * Turn off MSI one shot mode.  Otherwise this test has no
10635          * observable way to know whether the interrupt was delivered.
10636          */
10637         if (tg3_flag(tp, 57765_PLUS)) {
10638                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10639                 tw32(MSGINT_MODE, val);
10640         }
10641
10642         err = request_irq(tnapi->irq_vec, tg3_test_isr,
10643                           IRQF_SHARED, dev->name, tnapi);
10644         if (err)
10645                 return err;
10646
10647         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10648         tg3_enable_ints(tp);
10649
10650         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10651                tnapi->coal_now);
10652
10653         for (i = 0; i < 5; i++) {
10654                 u32 int_mbox, misc_host_ctrl;
10655
10656                 int_mbox = tr32_mailbox(tnapi->int_mbox);
10657                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10658
10659                 if ((int_mbox != 0) ||
10660                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10661                         intr_ok = 1;
10662                         break;
10663                 }
10664
10665                 if (tg3_flag(tp, 57765_PLUS) &&
10666                     tnapi->hw_status->status_tag != tnapi->last_tag)
10667                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10668
10669                 msleep(10);
10670         }
10671
10672         tg3_disable_ints(tp);
10673
10674         free_irq(tnapi->irq_vec, tnapi);
10675
10676         err = tg3_request_irq(tp, 0);
10677
10678         if (err)
10679                 return err;
10680
10681         if (intr_ok) {
10682                 /* Reenable MSI one shot mode. */
10683                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10684                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10685                         tw32(MSGINT_MODE, val);
10686                 }
10687                 return 0;
10688         }
10689
10690         return -EIO;
10691 }
10692
10693 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10694  * successfully restored
10695  */
10696 static int tg3_test_msi(struct tg3 *tp)
10697 {
10698         int err;
10699         u16 pci_cmd;
10700
10701         if (!tg3_flag(tp, USING_MSI))
10702                 return 0;
10703
10704         /* Turn off SERR reporting in case MSI terminates with Master
10705          * Abort.
10706          */
10707         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10708         pci_write_config_word(tp->pdev, PCI_COMMAND,
10709                               pci_cmd & ~PCI_COMMAND_SERR);
10710
10711         err = tg3_test_interrupt(tp);
10712
10713         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10714
10715         if (!err)
10716                 return 0;
10717
10718         /* other failures */
10719         if (err != -EIO)
10720                 return err;
10721
10722         /* MSI test failed, go back to INTx mode */
10723         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10724                     "to INTx mode. Please report this failure to the PCI "
10725                     "maintainer and include system chipset information\n");
10726
10727         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10728
10729         pci_disable_msi(tp->pdev);
10730
10731         tg3_flag_clear(tp, USING_MSI);
10732         tp->napi[0].irq_vec = tp->pdev->irq;
10733
10734         err = tg3_request_irq(tp, 0);
10735         if (err)
10736                 return err;
10737
10738         /* Need to reset the chip because the MSI cycle may have terminated
10739          * with Master Abort.
10740          */
10741         tg3_full_lock(tp, 1);
10742
10743         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10744         err = tg3_init_hw(tp, 1);
10745
10746         tg3_full_unlock(tp);
10747
10748         if (err)
10749                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10750
10751         return err;
10752 }
10753
10754 static int tg3_request_firmware(struct tg3 *tp)
10755 {
10756         const struct tg3_firmware_hdr *fw_hdr;
10757
10758         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10759                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10760                            tp->fw_needed);
10761                 return -ENOENT;
10762         }
10763
10764         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
10765
10766         /* Firmware blob starts with version numbers, followed by
10767          * start address and _full_ length including BSS sections
10768          * (which must be longer than the actual data, of course
10769          */
10770
10771         tp->fw_len = be32_to_cpu(fw_hdr->len);  /* includes bss */
10772         if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
10773                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10774                            tp->fw_len, tp->fw_needed);
10775                 release_firmware(tp->fw);
10776                 tp->fw = NULL;
10777                 return -EINVAL;
10778         }
10779
10780         /* We no longer need firmware; we have it. */
10781         tp->fw_needed = NULL;
10782         return 0;
10783 }
10784
10785 static u32 tg3_irq_count(struct tg3 *tp)
10786 {
10787         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10788
10789         if (irq_cnt > 1) {
10790                 /* We want as many rx rings enabled as there are cpus.
10791                  * In multiqueue MSI-X mode, the first MSI-X vector
10792                  * only deals with link interrupts, etc, so we add
10793                  * one to the number of vectors we are requesting.
10794                  */
10795                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
10796         }
10797
10798         return irq_cnt;
10799 }
10800
10801 static bool tg3_enable_msix(struct tg3 *tp)
10802 {
10803         int i, rc;
10804         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
10805
10806         tp->txq_cnt = tp->txq_req;
10807         tp->rxq_cnt = tp->rxq_req;
10808         if (!tp->rxq_cnt)
10809                 tp->rxq_cnt = netif_get_num_default_rss_queues();
10810         if (tp->rxq_cnt > tp->rxq_max)
10811                 tp->rxq_cnt = tp->rxq_max;
10812
10813         /* Disable multiple TX rings by default.  Simple round-robin hardware
10814          * scheduling of the TX rings can cause starvation of rings with
10815          * small packets when other rings have TSO or jumbo packets.
10816          */
10817         if (!tp->txq_req)
10818                 tp->txq_cnt = 1;
10819
10820         tp->irq_cnt = tg3_irq_count(tp);
10821
10822         for (i = 0; i < tp->irq_max; i++) {
10823                 msix_ent[i].entry  = i;
10824                 msix_ent[i].vector = 0;
10825         }
10826
10827         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10828         if (rc < 0) {
10829                 return false;
10830         } else if (rc != 0) {
10831                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10832                         return false;
10833                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10834                               tp->irq_cnt, rc);
10835                 tp->irq_cnt = rc;
10836                 tp->rxq_cnt = max(rc - 1, 1);
10837                 if (tp->txq_cnt)
10838                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
10839         }
10840
10841         for (i = 0; i < tp->irq_max; i++)
10842                 tp->napi[i].irq_vec = msix_ent[i].vector;
10843
10844         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
10845                 pci_disable_msix(tp->pdev);
10846                 return false;
10847         }
10848
10849         if (tp->irq_cnt == 1)
10850                 return true;
10851
10852         tg3_flag_set(tp, ENABLE_RSS);
10853
10854         if (tp->txq_cnt > 1)
10855                 tg3_flag_set(tp, ENABLE_TSS);
10856
10857         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
10858
10859         return true;
10860 }
10861
10862 static void tg3_ints_init(struct tg3 *tp)
10863 {
10864         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10865             !tg3_flag(tp, TAGGED_STATUS)) {
10866                 /* All MSI supporting chips should support tagged
10867                  * status.  Assert that this is the case.
10868                  */
10869                 netdev_warn(tp->dev,
10870                             "MSI without TAGGED_STATUS? Not using MSI\n");
10871                 goto defcfg;
10872         }
10873
10874         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10875                 tg3_flag_set(tp, USING_MSIX);
10876         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10877                 tg3_flag_set(tp, USING_MSI);
10878
10879         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10880                 u32 msi_mode = tr32(MSGINT_MODE);
10881                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10882                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10883                 if (!tg3_flag(tp, 1SHOT_MSI))
10884                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10885                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10886         }
10887 defcfg:
10888         if (!tg3_flag(tp, USING_MSIX)) {
10889                 tp->irq_cnt = 1;
10890                 tp->napi[0].irq_vec = tp->pdev->irq;
10891         }
10892
10893         if (tp->irq_cnt == 1) {
10894                 tp->txq_cnt = 1;
10895                 tp->rxq_cnt = 1;
10896                 netif_set_real_num_tx_queues(tp->dev, 1);
10897                 netif_set_real_num_rx_queues(tp->dev, 1);
10898         }
10899 }
10900
10901 static void tg3_ints_fini(struct tg3 *tp)
10902 {
10903         if (tg3_flag(tp, USING_MSIX))
10904                 pci_disable_msix(tp->pdev);
10905         else if (tg3_flag(tp, USING_MSI))
10906                 pci_disable_msi(tp->pdev);
10907         tg3_flag_clear(tp, USING_MSI);
10908         tg3_flag_clear(tp, USING_MSIX);
10909         tg3_flag_clear(tp, ENABLE_RSS);
10910         tg3_flag_clear(tp, ENABLE_TSS);
10911 }
10912
10913 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
10914                      bool init)
10915 {
10916         struct net_device *dev = tp->dev;
10917         int i, err;
10918
10919         /*
10920          * Setup interrupts first so we know how
10921          * many NAPI resources to allocate
10922          */
10923         tg3_ints_init(tp);
10924
10925         tg3_rss_check_indir_tbl(tp);
10926
10927         /* The placement of this call is tied
10928          * to the setup and use of Host TX descriptors.
10929          */
10930         err = tg3_alloc_consistent(tp);
10931         if (err)
10932                 goto err_out1;
10933
10934         tg3_napi_init(tp);
10935
10936         tg3_napi_enable(tp);
10937
10938         for (i = 0; i < tp->irq_cnt; i++) {
10939                 struct tg3_napi *tnapi = &tp->napi[i];
10940                 err = tg3_request_irq(tp, i);
10941                 if (err) {
10942                         for (i--; i >= 0; i--) {
10943                                 tnapi = &tp->napi[i];
10944                                 free_irq(tnapi->irq_vec, tnapi);
10945                         }
10946                         goto err_out2;
10947                 }
10948         }
10949
10950         tg3_full_lock(tp, 0);
10951
10952         err = tg3_init_hw(tp, reset_phy);
10953         if (err) {
10954                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10955                 tg3_free_rings(tp);
10956         }
10957
10958         tg3_full_unlock(tp);
10959
10960         if (err)
10961                 goto err_out3;
10962
10963         if (test_irq && tg3_flag(tp, USING_MSI)) {
10964                 err = tg3_test_msi(tp);
10965
10966                 if (err) {
10967                         tg3_full_lock(tp, 0);
10968                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10969                         tg3_free_rings(tp);
10970                         tg3_full_unlock(tp);
10971
10972                         goto err_out2;
10973                 }
10974
10975                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10976                         u32 val = tr32(PCIE_TRANSACTION_CFG);
10977
10978                         tw32(PCIE_TRANSACTION_CFG,
10979                              val | PCIE_TRANS_CFG_1SHOT_MSI);
10980                 }
10981         }
10982
10983         tg3_phy_start(tp);
10984
10985         tg3_hwmon_open(tp);
10986
10987         tg3_full_lock(tp, 0);
10988
10989         tg3_timer_start(tp);
10990         tg3_flag_set(tp, INIT_COMPLETE);
10991         tg3_enable_ints(tp);
10992
10993         if (init)
10994                 tg3_ptp_init(tp);
10995         else
10996                 tg3_ptp_resume(tp);
10997
10998
10999         tg3_full_unlock(tp);
11000
11001         netif_tx_start_all_queues(dev);
11002
11003         /*
11004          * Reset loopback feature if it was turned on while the device was down
11005          * make sure that it's installed properly now.
11006          */
11007         if (dev->features & NETIF_F_LOOPBACK)
11008                 tg3_set_loopback(dev, dev->features);
11009
11010         return 0;
11011
11012 err_out3:
11013         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11014                 struct tg3_napi *tnapi = &tp->napi[i];
11015                 free_irq(tnapi->irq_vec, tnapi);
11016         }
11017
11018 err_out2:
11019         tg3_napi_disable(tp);
11020         tg3_napi_fini(tp);
11021         tg3_free_consistent(tp);
11022
11023 err_out1:
11024         tg3_ints_fini(tp);
11025
11026         return err;
11027 }
11028
11029 static void tg3_stop(struct tg3 *tp)
11030 {
11031         int i;
11032
11033         tg3_reset_task_cancel(tp);
11034         tg3_netif_stop(tp);
11035
11036         tg3_timer_stop(tp);
11037
11038         tg3_hwmon_close(tp);
11039
11040         tg3_phy_stop(tp);
11041
11042         tg3_full_lock(tp, 1);
11043
11044         tg3_disable_ints(tp);
11045
11046         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11047         tg3_free_rings(tp);
11048         tg3_flag_clear(tp, INIT_COMPLETE);
11049
11050         tg3_full_unlock(tp);
11051
11052         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11053                 struct tg3_napi *tnapi = &tp->napi[i];
11054                 free_irq(tnapi->irq_vec, tnapi);
11055         }
11056
11057         tg3_ints_fini(tp);
11058
11059         tg3_napi_fini(tp);
11060
11061         tg3_free_consistent(tp);
11062 }
11063
11064 static int tg3_open(struct net_device *dev)
11065 {
11066         struct tg3 *tp = netdev_priv(dev);
11067         int err;
11068
11069         if (tp->fw_needed) {
11070                 err = tg3_request_firmware(tp);
11071                 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11072                         if (err) {
11073                                 netdev_warn(tp->dev, "EEE capability disabled\n");
11074                                 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11075                         } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11076                                 netdev_warn(tp->dev, "EEE capability restored\n");
11077                                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11078                         }
11079                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11080                         if (err)
11081                                 return err;
11082                 } else if (err) {
11083                         netdev_warn(tp->dev, "TSO capability disabled\n");
11084                         tg3_flag_clear(tp, TSO_CAPABLE);
11085                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11086                         netdev_notice(tp->dev, "TSO capability restored\n");
11087                         tg3_flag_set(tp, TSO_CAPABLE);
11088                 }
11089         }
11090
11091         tg3_carrier_off(tp);
11092
11093         err = tg3_power_up(tp);
11094         if (err)
11095                 return err;
11096
11097         tg3_full_lock(tp, 0);
11098
11099         tg3_disable_ints(tp);
11100         tg3_flag_clear(tp, INIT_COMPLETE);
11101
11102         tg3_full_unlock(tp);
11103
11104         err = tg3_start(tp, true, true, true);
11105         if (err) {
11106                 tg3_frob_aux_power(tp, false);
11107                 pci_set_power_state(tp->pdev, PCI_D3hot);
11108         }
11109
11110         if (tg3_flag(tp, PTP_CAPABLE)) {
11111                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11112                                                    &tp->pdev->dev);
11113                 if (IS_ERR(tp->ptp_clock))
11114                         tp->ptp_clock = NULL;
11115         }
11116
11117         return err;
11118 }
11119
11120 static int tg3_close(struct net_device *dev)
11121 {
11122         struct tg3 *tp = netdev_priv(dev);
11123
11124         tg3_ptp_fini(tp);
11125
11126         tg3_stop(tp);
11127
11128         /* Clear stats across close / open calls */
11129         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11130         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11131
11132         tg3_power_down(tp);
11133
11134         tg3_carrier_off(tp);
11135
11136         return 0;
11137 }
11138
11139 static inline u64 get_stat64(tg3_stat64_t *val)
11140 {
11141        return ((u64)val->high << 32) | ((u64)val->low);
11142 }
11143
11144 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11145 {
11146         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11147
11148         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11149             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11150              tg3_asic_rev(tp) == ASIC_REV_5701)) {
11151                 u32 val;
11152
11153                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11154                         tg3_writephy(tp, MII_TG3_TEST1,
11155                                      val | MII_TG3_TEST1_CRC_EN);
11156                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11157                 } else
11158                         val = 0;
11159
11160                 tp->phy_crc_errors += val;
11161
11162                 return tp->phy_crc_errors;
11163         }
11164
11165         return get_stat64(&hw_stats->rx_fcs_errors);
11166 }
11167
11168 #define ESTAT_ADD(member) \
11169         estats->member =        old_estats->member + \
11170                                 get_stat64(&hw_stats->member)
11171
11172 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11173 {
11174         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11175         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11176
11177         ESTAT_ADD(rx_octets);
11178         ESTAT_ADD(rx_fragments);
11179         ESTAT_ADD(rx_ucast_packets);
11180         ESTAT_ADD(rx_mcast_packets);
11181         ESTAT_ADD(rx_bcast_packets);
11182         ESTAT_ADD(rx_fcs_errors);
11183         ESTAT_ADD(rx_align_errors);
11184         ESTAT_ADD(rx_xon_pause_rcvd);
11185         ESTAT_ADD(rx_xoff_pause_rcvd);
11186         ESTAT_ADD(rx_mac_ctrl_rcvd);
11187         ESTAT_ADD(rx_xoff_entered);
11188         ESTAT_ADD(rx_frame_too_long_errors);
11189         ESTAT_ADD(rx_jabbers);
11190         ESTAT_ADD(rx_undersize_packets);
11191         ESTAT_ADD(rx_in_length_errors);
11192         ESTAT_ADD(rx_out_length_errors);
11193         ESTAT_ADD(rx_64_or_less_octet_packets);
11194         ESTAT_ADD(rx_65_to_127_octet_packets);
11195         ESTAT_ADD(rx_128_to_255_octet_packets);
11196         ESTAT_ADD(rx_256_to_511_octet_packets);
11197         ESTAT_ADD(rx_512_to_1023_octet_packets);
11198         ESTAT_ADD(rx_1024_to_1522_octet_packets);
11199         ESTAT_ADD(rx_1523_to_2047_octet_packets);
11200         ESTAT_ADD(rx_2048_to_4095_octet_packets);
11201         ESTAT_ADD(rx_4096_to_8191_octet_packets);
11202         ESTAT_ADD(rx_8192_to_9022_octet_packets);
11203
11204         ESTAT_ADD(tx_octets);
11205         ESTAT_ADD(tx_collisions);
11206         ESTAT_ADD(tx_xon_sent);
11207         ESTAT_ADD(tx_xoff_sent);
11208         ESTAT_ADD(tx_flow_control);
11209         ESTAT_ADD(tx_mac_errors);
11210         ESTAT_ADD(tx_single_collisions);
11211         ESTAT_ADD(tx_mult_collisions);
11212         ESTAT_ADD(tx_deferred);
11213         ESTAT_ADD(tx_excessive_collisions);
11214         ESTAT_ADD(tx_late_collisions);
11215         ESTAT_ADD(tx_collide_2times);
11216         ESTAT_ADD(tx_collide_3times);
11217         ESTAT_ADD(tx_collide_4times);
11218         ESTAT_ADD(tx_collide_5times);
11219         ESTAT_ADD(tx_collide_6times);
11220         ESTAT_ADD(tx_collide_7times);
11221         ESTAT_ADD(tx_collide_8times);
11222         ESTAT_ADD(tx_collide_9times);
11223         ESTAT_ADD(tx_collide_10times);
11224         ESTAT_ADD(tx_collide_11times);
11225         ESTAT_ADD(tx_collide_12times);
11226         ESTAT_ADD(tx_collide_13times);
11227         ESTAT_ADD(tx_collide_14times);
11228         ESTAT_ADD(tx_collide_15times);
11229         ESTAT_ADD(tx_ucast_packets);
11230         ESTAT_ADD(tx_mcast_packets);
11231         ESTAT_ADD(tx_bcast_packets);
11232         ESTAT_ADD(tx_carrier_sense_errors);
11233         ESTAT_ADD(tx_discards);
11234         ESTAT_ADD(tx_errors);
11235
11236         ESTAT_ADD(dma_writeq_full);
11237         ESTAT_ADD(dma_write_prioq_full);
11238         ESTAT_ADD(rxbds_empty);
11239         ESTAT_ADD(rx_discards);
11240         ESTAT_ADD(rx_errors);
11241         ESTAT_ADD(rx_threshold_hit);
11242
11243         ESTAT_ADD(dma_readq_full);
11244         ESTAT_ADD(dma_read_prioq_full);
11245         ESTAT_ADD(tx_comp_queue_full);
11246
11247         ESTAT_ADD(ring_set_send_prod_index);
11248         ESTAT_ADD(ring_status_update);
11249         ESTAT_ADD(nic_irqs);
11250         ESTAT_ADD(nic_avoided_irqs);
11251         ESTAT_ADD(nic_tx_threshold_hit);
11252
11253         ESTAT_ADD(mbuf_lwm_thresh_hit);
11254 }
11255
11256 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11257 {
11258         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11259         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11260
11261         stats->rx_packets = old_stats->rx_packets +
11262                 get_stat64(&hw_stats->rx_ucast_packets) +
11263                 get_stat64(&hw_stats->rx_mcast_packets) +
11264                 get_stat64(&hw_stats->rx_bcast_packets);
11265
11266         stats->tx_packets = old_stats->tx_packets +
11267                 get_stat64(&hw_stats->tx_ucast_packets) +
11268                 get_stat64(&hw_stats->tx_mcast_packets) +
11269                 get_stat64(&hw_stats->tx_bcast_packets);
11270
11271         stats->rx_bytes = old_stats->rx_bytes +
11272                 get_stat64(&hw_stats->rx_octets);
11273         stats->tx_bytes = old_stats->tx_bytes +
11274                 get_stat64(&hw_stats->tx_octets);
11275
11276         stats->rx_errors = old_stats->rx_errors +
11277                 get_stat64(&hw_stats->rx_errors);
11278         stats->tx_errors = old_stats->tx_errors +
11279                 get_stat64(&hw_stats->tx_errors) +
11280                 get_stat64(&hw_stats->tx_mac_errors) +
11281                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11282                 get_stat64(&hw_stats->tx_discards);
11283
11284         stats->multicast = old_stats->multicast +
11285                 get_stat64(&hw_stats->rx_mcast_packets);
11286         stats->collisions = old_stats->collisions +
11287                 get_stat64(&hw_stats->tx_collisions);
11288
11289         stats->rx_length_errors = old_stats->rx_length_errors +
11290                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11291                 get_stat64(&hw_stats->rx_undersize_packets);
11292
11293         stats->rx_over_errors = old_stats->rx_over_errors +
11294                 get_stat64(&hw_stats->rxbds_empty);
11295         stats->rx_frame_errors = old_stats->rx_frame_errors +
11296                 get_stat64(&hw_stats->rx_align_errors);
11297         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11298                 get_stat64(&hw_stats->tx_discards);
11299         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11300                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11301
11302         stats->rx_crc_errors = old_stats->rx_crc_errors +
11303                 tg3_calc_crc_errors(tp);
11304
11305         stats->rx_missed_errors = old_stats->rx_missed_errors +
11306                 get_stat64(&hw_stats->rx_discards);
11307
11308         stats->rx_dropped = tp->rx_dropped;
11309         stats->tx_dropped = tp->tx_dropped;
11310 }
11311
11312 static int tg3_get_regs_len(struct net_device *dev)
11313 {
11314         return TG3_REG_BLK_SIZE;
11315 }
11316
11317 static void tg3_get_regs(struct net_device *dev,
11318                 struct ethtool_regs *regs, void *_p)
11319 {
11320         struct tg3 *tp = netdev_priv(dev);
11321
11322         regs->version = 0;
11323
11324         memset(_p, 0, TG3_REG_BLK_SIZE);
11325
11326         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11327                 return;
11328
11329         tg3_full_lock(tp, 0);
11330
11331         tg3_dump_legacy_regs(tp, (u32 *)_p);
11332
11333         tg3_full_unlock(tp);
11334 }
11335
11336 static int tg3_get_eeprom_len(struct net_device *dev)
11337 {
11338         struct tg3 *tp = netdev_priv(dev);
11339
11340         return tp->nvram_size;
11341 }
11342
11343 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11344 {
11345         struct tg3 *tp = netdev_priv(dev);
11346         int ret;
11347         u8  *pd;
11348         u32 i, offset, len, b_offset, b_count;
11349         __be32 val;
11350
11351         if (tg3_flag(tp, NO_NVRAM))
11352                 return -EINVAL;
11353
11354         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11355                 return -EAGAIN;
11356
11357         offset = eeprom->offset;
11358         len = eeprom->len;
11359         eeprom->len = 0;
11360
11361         eeprom->magic = TG3_EEPROM_MAGIC;
11362
11363         if (offset & 3) {
11364                 /* adjustments to start on required 4 byte boundary */
11365                 b_offset = offset & 3;
11366                 b_count = 4 - b_offset;
11367                 if (b_count > len) {
11368                         /* i.e. offset=1 len=2 */
11369                         b_count = len;
11370                 }
11371                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11372                 if (ret)
11373                         return ret;
11374                 memcpy(data, ((char *)&val) + b_offset, b_count);
11375                 len -= b_count;
11376                 offset += b_count;
11377                 eeprom->len += b_count;
11378         }
11379
11380         /* read bytes up to the last 4 byte boundary */
11381         pd = &data[eeprom->len];
11382         for (i = 0; i < (len - (len & 3)); i += 4) {
11383                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11384                 if (ret) {
11385                         eeprom->len += i;
11386                         return ret;
11387                 }
11388                 memcpy(pd + i, &val, 4);
11389         }
11390         eeprom->len += i;
11391
11392         if (len & 3) {
11393                 /* read last bytes not ending on 4 byte boundary */
11394                 pd = &data[eeprom->len];
11395                 b_count = len & 3;
11396                 b_offset = offset + len - b_count;
11397                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11398                 if (ret)
11399                         return ret;
11400                 memcpy(pd, &val, b_count);
11401                 eeprom->len += b_count;
11402         }
11403         return 0;
11404 }
11405
11406 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11407 {
11408         struct tg3 *tp = netdev_priv(dev);
11409         int ret;
11410         u32 offset, len, b_offset, odd_len;
11411         u8 *buf;
11412         __be32 start, end;
11413
11414         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11415                 return -EAGAIN;
11416
11417         if (tg3_flag(tp, NO_NVRAM) ||
11418             eeprom->magic != TG3_EEPROM_MAGIC)
11419                 return -EINVAL;
11420
11421         offset = eeprom->offset;
11422         len = eeprom->len;
11423
11424         if ((b_offset = (offset & 3))) {
11425                 /* adjustments to start on required 4 byte boundary */
11426                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11427                 if (ret)
11428                         return ret;
11429                 len += b_offset;
11430                 offset &= ~3;
11431                 if (len < 4)
11432                         len = 4;
11433         }
11434
11435         odd_len = 0;
11436         if (len & 3) {
11437                 /* adjustments to end on required 4 byte boundary */
11438                 odd_len = 1;
11439                 len = (len + 3) & ~3;
11440                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11441                 if (ret)
11442                         return ret;
11443         }
11444
11445         buf = data;
11446         if (b_offset || odd_len) {
11447                 buf = kmalloc(len, GFP_KERNEL);
11448                 if (!buf)
11449                         return -ENOMEM;
11450                 if (b_offset)
11451                         memcpy(buf, &start, 4);
11452                 if (odd_len)
11453                         memcpy(buf+len-4, &end, 4);
11454                 memcpy(buf + b_offset, data, eeprom->len);
11455         }
11456
11457         ret = tg3_nvram_write_block(tp, offset, len, buf);
11458
11459         if (buf != data)
11460                 kfree(buf);
11461
11462         return ret;
11463 }
11464
11465 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11466 {
11467         struct tg3 *tp = netdev_priv(dev);
11468
11469         if (tg3_flag(tp, USE_PHYLIB)) {
11470                 struct phy_device *phydev;
11471                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11472                         return -EAGAIN;
11473                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11474                 return phy_ethtool_gset(phydev, cmd);
11475         }
11476
11477         cmd->supported = (SUPPORTED_Autoneg);
11478
11479         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11480                 cmd->supported |= (SUPPORTED_1000baseT_Half |
11481                                    SUPPORTED_1000baseT_Full);
11482
11483         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11484                 cmd->supported |= (SUPPORTED_100baseT_Half |
11485                                   SUPPORTED_100baseT_Full |
11486                                   SUPPORTED_10baseT_Half |
11487                                   SUPPORTED_10baseT_Full |
11488                                   SUPPORTED_TP);
11489                 cmd->port = PORT_TP;
11490         } else {
11491                 cmd->supported |= SUPPORTED_FIBRE;
11492                 cmd->port = PORT_FIBRE;
11493         }
11494
11495         cmd->advertising = tp->link_config.advertising;
11496         if (tg3_flag(tp, PAUSE_AUTONEG)) {
11497                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11498                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11499                                 cmd->advertising |= ADVERTISED_Pause;
11500                         } else {
11501                                 cmd->advertising |= ADVERTISED_Pause |
11502                                                     ADVERTISED_Asym_Pause;
11503                         }
11504                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11505                         cmd->advertising |= ADVERTISED_Asym_Pause;
11506                 }
11507         }
11508         if (netif_running(dev) && tp->link_up) {
11509                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11510                 cmd->duplex = tp->link_config.active_duplex;
11511                 cmd->lp_advertising = tp->link_config.rmt_adv;
11512                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11513                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11514                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11515                         else
11516                                 cmd->eth_tp_mdix = ETH_TP_MDI;
11517                 }
11518         } else {
11519                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11520                 cmd->duplex = DUPLEX_UNKNOWN;
11521                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11522         }
11523         cmd->phy_address = tp->phy_addr;
11524         cmd->transceiver = XCVR_INTERNAL;
11525         cmd->autoneg = tp->link_config.autoneg;
11526         cmd->maxtxpkt = 0;
11527         cmd->maxrxpkt = 0;
11528         return 0;
11529 }
11530
11531 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11532 {
11533         struct tg3 *tp = netdev_priv(dev);
11534         u32 speed = ethtool_cmd_speed(cmd);
11535
11536         if (tg3_flag(tp, USE_PHYLIB)) {
11537                 struct phy_device *phydev;
11538                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11539                         return -EAGAIN;
11540                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11541                 return phy_ethtool_sset(phydev, cmd);
11542         }
11543
11544         if (cmd->autoneg != AUTONEG_ENABLE &&
11545             cmd->autoneg != AUTONEG_DISABLE)
11546                 return -EINVAL;
11547
11548         if (cmd->autoneg == AUTONEG_DISABLE &&
11549             cmd->duplex != DUPLEX_FULL &&
11550             cmd->duplex != DUPLEX_HALF)
11551                 return -EINVAL;
11552
11553         if (cmd->autoneg == AUTONEG_ENABLE) {
11554                 u32 mask = ADVERTISED_Autoneg |
11555                            ADVERTISED_Pause |
11556                            ADVERTISED_Asym_Pause;
11557
11558                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11559                         mask |= ADVERTISED_1000baseT_Half |
11560                                 ADVERTISED_1000baseT_Full;
11561
11562                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11563                         mask |= ADVERTISED_100baseT_Half |
11564                                 ADVERTISED_100baseT_Full |
11565                                 ADVERTISED_10baseT_Half |
11566                                 ADVERTISED_10baseT_Full |
11567                                 ADVERTISED_TP;
11568                 else
11569                         mask |= ADVERTISED_FIBRE;
11570
11571                 if (cmd->advertising & ~mask)
11572                         return -EINVAL;
11573
11574                 mask &= (ADVERTISED_1000baseT_Half |
11575                          ADVERTISED_1000baseT_Full |
11576                          ADVERTISED_100baseT_Half |
11577                          ADVERTISED_100baseT_Full |
11578                          ADVERTISED_10baseT_Half |
11579                          ADVERTISED_10baseT_Full);
11580
11581                 cmd->advertising &= mask;
11582         } else {
11583                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11584                         if (speed != SPEED_1000)
11585                                 return -EINVAL;
11586
11587                         if (cmd->duplex != DUPLEX_FULL)
11588                                 return -EINVAL;
11589                 } else {
11590                         if (speed != SPEED_100 &&
11591                             speed != SPEED_10)
11592                                 return -EINVAL;
11593                 }
11594         }
11595
11596         tg3_full_lock(tp, 0);
11597
11598         tp->link_config.autoneg = cmd->autoneg;
11599         if (cmd->autoneg == AUTONEG_ENABLE) {
11600                 tp->link_config.advertising = (cmd->advertising |
11601                                               ADVERTISED_Autoneg);
11602                 tp->link_config.speed = SPEED_UNKNOWN;
11603                 tp->link_config.duplex = DUPLEX_UNKNOWN;
11604         } else {
11605                 tp->link_config.advertising = 0;
11606                 tp->link_config.speed = speed;
11607                 tp->link_config.duplex = cmd->duplex;
11608         }
11609
11610         tg3_warn_mgmt_link_flap(tp);
11611
11612         if (netif_running(dev))
11613                 tg3_setup_phy(tp, 1);
11614
11615         tg3_full_unlock(tp);
11616
11617         return 0;
11618 }
11619
11620 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11621 {
11622         struct tg3 *tp = netdev_priv(dev);
11623
11624         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11625         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11626         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11627         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11628 }
11629
11630 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11631 {
11632         struct tg3 *tp = netdev_priv(dev);
11633
11634         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11635                 wol->supported = WAKE_MAGIC;
11636         else
11637                 wol->supported = 0;
11638         wol->wolopts = 0;
11639         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11640                 wol->wolopts = WAKE_MAGIC;
11641         memset(&wol->sopass, 0, sizeof(wol->sopass));
11642 }
11643
11644 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11645 {
11646         struct tg3 *tp = netdev_priv(dev);
11647         struct device *dp = &tp->pdev->dev;
11648
11649         if (wol->wolopts & ~WAKE_MAGIC)
11650                 return -EINVAL;
11651         if ((wol->wolopts & WAKE_MAGIC) &&
11652             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11653                 return -EINVAL;
11654
11655         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11656
11657         spin_lock_bh(&tp->lock);
11658         if (device_may_wakeup(dp))
11659                 tg3_flag_set(tp, WOL_ENABLE);
11660         else
11661                 tg3_flag_clear(tp, WOL_ENABLE);
11662         spin_unlock_bh(&tp->lock);
11663
11664         return 0;
11665 }
11666
11667 static u32 tg3_get_msglevel(struct net_device *dev)
11668 {
11669         struct tg3 *tp = netdev_priv(dev);
11670         return tp->msg_enable;
11671 }
11672
11673 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11674 {
11675         struct tg3 *tp = netdev_priv(dev);
11676         tp->msg_enable = value;
11677 }
11678
11679 static int tg3_nway_reset(struct net_device *dev)
11680 {
11681         struct tg3 *tp = netdev_priv(dev);
11682         int r;
11683
11684         if (!netif_running(dev))
11685                 return -EAGAIN;
11686
11687         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11688                 return -EINVAL;
11689
11690         tg3_warn_mgmt_link_flap(tp);
11691
11692         if (tg3_flag(tp, USE_PHYLIB)) {
11693                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11694                         return -EAGAIN;
11695                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11696         } else {
11697                 u32 bmcr;
11698
11699                 spin_lock_bh(&tp->lock);
11700                 r = -EINVAL;
11701                 tg3_readphy(tp, MII_BMCR, &bmcr);
11702                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11703                     ((bmcr & BMCR_ANENABLE) ||
11704                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11705                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11706                                                    BMCR_ANENABLE);
11707                         r = 0;
11708                 }
11709                 spin_unlock_bh(&tp->lock);
11710         }
11711
11712         return r;
11713 }
11714
11715 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11716 {
11717         struct tg3 *tp = netdev_priv(dev);
11718
11719         ering->rx_max_pending = tp->rx_std_ring_mask;
11720         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11721                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11722         else
11723                 ering->rx_jumbo_max_pending = 0;
11724
11725         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11726
11727         ering->rx_pending = tp->rx_pending;
11728         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11729                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11730         else
11731                 ering->rx_jumbo_pending = 0;
11732
11733         ering->tx_pending = tp->napi[0].tx_pending;
11734 }
11735
11736 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11737 {
11738         struct tg3 *tp = netdev_priv(dev);
11739         int i, irq_sync = 0, err = 0;
11740
11741         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11742             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11743             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11744             (ering->tx_pending <= MAX_SKB_FRAGS) ||
11745             (tg3_flag(tp, TSO_BUG) &&
11746              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11747                 return -EINVAL;
11748
11749         if (netif_running(dev)) {
11750                 tg3_phy_stop(tp);
11751                 tg3_netif_stop(tp);
11752                 irq_sync = 1;
11753         }
11754
11755         tg3_full_lock(tp, irq_sync);
11756
11757         tp->rx_pending = ering->rx_pending;
11758
11759         if (tg3_flag(tp, MAX_RXPEND_64) &&
11760             tp->rx_pending > 63)
11761                 tp->rx_pending = 63;
11762         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11763
11764         for (i = 0; i < tp->irq_max; i++)
11765                 tp->napi[i].tx_pending = ering->tx_pending;
11766
11767         if (netif_running(dev)) {
11768                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11769                 err = tg3_restart_hw(tp, 0);
11770                 if (!err)
11771                         tg3_netif_start(tp);
11772         }
11773
11774         tg3_full_unlock(tp);
11775
11776         if (irq_sync && !err)
11777                 tg3_phy_start(tp);
11778
11779         return err;
11780 }
11781
11782 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11783 {
11784         struct tg3 *tp = netdev_priv(dev);
11785
11786         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11787
11788         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11789                 epause->rx_pause = 1;
11790         else
11791                 epause->rx_pause = 0;
11792
11793         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11794                 epause->tx_pause = 1;
11795         else
11796                 epause->tx_pause = 0;
11797 }
11798
11799 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11800 {
11801         struct tg3 *tp = netdev_priv(dev);
11802         int err = 0;
11803
11804         if (tp->link_config.autoneg == AUTONEG_ENABLE)
11805                 tg3_warn_mgmt_link_flap(tp);
11806
11807         if (tg3_flag(tp, USE_PHYLIB)) {
11808                 u32 newadv;
11809                 struct phy_device *phydev;
11810
11811                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11812
11813                 if (!(phydev->supported & SUPPORTED_Pause) ||
11814                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11815                      (epause->rx_pause != epause->tx_pause)))
11816                         return -EINVAL;
11817
11818                 tp->link_config.flowctrl = 0;
11819                 if (epause->rx_pause) {
11820                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11821
11822                         if (epause->tx_pause) {
11823                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11824                                 newadv = ADVERTISED_Pause;
11825                         } else
11826                                 newadv = ADVERTISED_Pause |
11827                                          ADVERTISED_Asym_Pause;
11828                 } else if (epause->tx_pause) {
11829                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11830                         newadv = ADVERTISED_Asym_Pause;
11831                 } else
11832                         newadv = 0;
11833
11834                 if (epause->autoneg)
11835                         tg3_flag_set(tp, PAUSE_AUTONEG);
11836                 else
11837                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11838
11839                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11840                         u32 oldadv = phydev->advertising &
11841                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11842                         if (oldadv != newadv) {
11843                                 phydev->advertising &=
11844                                         ~(ADVERTISED_Pause |
11845                                           ADVERTISED_Asym_Pause);
11846                                 phydev->advertising |= newadv;
11847                                 if (phydev->autoneg) {
11848                                         /*
11849                                          * Always renegotiate the link to
11850                                          * inform our link partner of our
11851                                          * flow control settings, even if the
11852                                          * flow control is forced.  Let
11853                                          * tg3_adjust_link() do the final
11854                                          * flow control setup.
11855                                          */
11856                                         return phy_start_aneg(phydev);
11857                                 }
11858                         }
11859
11860                         if (!epause->autoneg)
11861                                 tg3_setup_flow_control(tp, 0, 0);
11862                 } else {
11863                         tp->link_config.advertising &=
11864                                         ~(ADVERTISED_Pause |
11865                                           ADVERTISED_Asym_Pause);
11866                         tp->link_config.advertising |= newadv;
11867                 }
11868         } else {
11869                 int irq_sync = 0;
11870
11871                 if (netif_running(dev)) {
11872                         tg3_netif_stop(tp);
11873                         irq_sync = 1;
11874                 }
11875
11876                 tg3_full_lock(tp, irq_sync);
11877
11878                 if (epause->autoneg)
11879                         tg3_flag_set(tp, PAUSE_AUTONEG);
11880                 else
11881                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11882                 if (epause->rx_pause)
11883                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11884                 else
11885                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11886                 if (epause->tx_pause)
11887                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11888                 else
11889                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11890
11891                 if (netif_running(dev)) {
11892                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11893                         err = tg3_restart_hw(tp, 0);
11894                         if (!err)
11895                                 tg3_netif_start(tp);
11896                 }
11897
11898                 tg3_full_unlock(tp);
11899         }
11900
11901         return err;
11902 }
11903
11904 static int tg3_get_sset_count(struct net_device *dev, int sset)
11905 {
11906         switch (sset) {
11907         case ETH_SS_TEST:
11908                 return TG3_NUM_TEST;
11909         case ETH_SS_STATS:
11910                 return TG3_NUM_STATS;
11911         default:
11912                 return -EOPNOTSUPP;
11913         }
11914 }
11915
11916 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11917                          u32 *rules __always_unused)
11918 {
11919         struct tg3 *tp = netdev_priv(dev);
11920
11921         if (!tg3_flag(tp, SUPPORT_MSIX))
11922                 return -EOPNOTSUPP;
11923
11924         switch (info->cmd) {
11925         case ETHTOOL_GRXRINGS:
11926                 if (netif_running(tp->dev))
11927                         info->data = tp->rxq_cnt;
11928                 else {
11929                         info->data = num_online_cpus();
11930                         if (info->data > TG3_RSS_MAX_NUM_QS)
11931                                 info->data = TG3_RSS_MAX_NUM_QS;
11932                 }
11933
11934                 /* The first interrupt vector only
11935                  * handles link interrupts.
11936                  */
11937                 info->data -= 1;
11938                 return 0;
11939
11940         default:
11941                 return -EOPNOTSUPP;
11942         }
11943 }
11944
11945 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11946 {
11947         u32 size = 0;
11948         struct tg3 *tp = netdev_priv(dev);
11949
11950         if (tg3_flag(tp, SUPPORT_MSIX))
11951                 size = TG3_RSS_INDIR_TBL_SIZE;
11952
11953         return size;
11954 }
11955
11956 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11957 {
11958         struct tg3 *tp = netdev_priv(dev);
11959         int i;
11960
11961         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11962                 indir[i] = tp->rss_ind_tbl[i];
11963
11964         return 0;
11965 }
11966
11967 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11968 {
11969         struct tg3 *tp = netdev_priv(dev);
11970         size_t i;
11971
11972         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11973                 tp->rss_ind_tbl[i] = indir[i];
11974
11975         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11976                 return 0;
11977
11978         /* It is legal to write the indirection
11979          * table while the device is running.
11980          */
11981         tg3_full_lock(tp, 0);
11982         tg3_rss_write_indir_tbl(tp);
11983         tg3_full_unlock(tp);
11984
11985         return 0;
11986 }
11987
11988 static void tg3_get_channels(struct net_device *dev,
11989                              struct ethtool_channels *channel)
11990 {
11991         struct tg3 *tp = netdev_priv(dev);
11992         u32 deflt_qs = netif_get_num_default_rss_queues();
11993
11994         channel->max_rx = tp->rxq_max;
11995         channel->max_tx = tp->txq_max;
11996
11997         if (netif_running(dev)) {
11998                 channel->rx_count = tp->rxq_cnt;
11999                 channel->tx_count = tp->txq_cnt;
12000         } else {
12001                 if (tp->rxq_req)
12002                         channel->rx_count = tp->rxq_req;
12003                 else
12004                         channel->rx_count = min(deflt_qs, tp->rxq_max);
12005
12006                 if (tp->txq_req)
12007                         channel->tx_count = tp->txq_req;
12008                 else
12009                         channel->tx_count = min(deflt_qs, tp->txq_max);
12010         }
12011 }
12012
12013 static int tg3_set_channels(struct net_device *dev,
12014                             struct ethtool_channels *channel)
12015 {
12016         struct tg3 *tp = netdev_priv(dev);
12017
12018         if (!tg3_flag(tp, SUPPORT_MSIX))
12019                 return -EOPNOTSUPP;
12020
12021         if (channel->rx_count > tp->rxq_max ||
12022             channel->tx_count > tp->txq_max)
12023                 return -EINVAL;
12024
12025         tp->rxq_req = channel->rx_count;
12026         tp->txq_req = channel->tx_count;
12027
12028         if (!netif_running(dev))
12029                 return 0;
12030
12031         tg3_stop(tp);
12032
12033         tg3_carrier_off(tp);
12034
12035         tg3_start(tp, true, false, false);
12036
12037         return 0;
12038 }
12039
12040 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12041 {
12042         switch (stringset) {
12043         case ETH_SS_STATS:
12044                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12045                 break;
12046         case ETH_SS_TEST:
12047                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12048                 break;
12049         default:
12050                 WARN_ON(1);     /* we need a WARN() */
12051                 break;
12052         }
12053 }
12054
12055 static int tg3_set_phys_id(struct net_device *dev,
12056                             enum ethtool_phys_id_state state)
12057 {
12058         struct tg3 *tp = netdev_priv(dev);
12059
12060         if (!netif_running(tp->dev))
12061                 return -EAGAIN;
12062
12063         switch (state) {
12064         case ETHTOOL_ID_ACTIVE:
12065                 return 1;       /* cycle on/off once per second */
12066
12067         case ETHTOOL_ID_ON:
12068                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12069                      LED_CTRL_1000MBPS_ON |
12070                      LED_CTRL_100MBPS_ON |
12071                      LED_CTRL_10MBPS_ON |
12072                      LED_CTRL_TRAFFIC_OVERRIDE |
12073                      LED_CTRL_TRAFFIC_BLINK |
12074                      LED_CTRL_TRAFFIC_LED);
12075                 break;
12076
12077         case ETHTOOL_ID_OFF:
12078                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12079                      LED_CTRL_TRAFFIC_OVERRIDE);
12080                 break;
12081
12082         case ETHTOOL_ID_INACTIVE:
12083                 tw32(MAC_LED_CTRL, tp->led_ctrl);
12084                 break;
12085         }
12086
12087         return 0;
12088 }
12089
12090 static void tg3_get_ethtool_stats(struct net_device *dev,
12091                                    struct ethtool_stats *estats, u64 *tmp_stats)
12092 {
12093         struct tg3 *tp = netdev_priv(dev);
12094
12095         if (tp->hw_stats)
12096                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12097         else
12098                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12099 }
12100
12101 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12102 {
12103         int i;
12104         __be32 *buf;
12105         u32 offset = 0, len = 0;
12106         u32 magic, val;
12107
12108         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12109                 return NULL;
12110
12111         if (magic == TG3_EEPROM_MAGIC) {
12112                 for (offset = TG3_NVM_DIR_START;
12113                      offset < TG3_NVM_DIR_END;
12114                      offset += TG3_NVM_DIRENT_SIZE) {
12115                         if (tg3_nvram_read(tp, offset, &val))
12116                                 return NULL;
12117
12118                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12119                             TG3_NVM_DIRTYPE_EXTVPD)
12120                                 break;
12121                 }
12122
12123                 if (offset != TG3_NVM_DIR_END) {
12124                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12125                         if (tg3_nvram_read(tp, offset + 4, &offset))
12126                                 return NULL;
12127
12128                         offset = tg3_nvram_logical_addr(tp, offset);
12129                 }
12130         }
12131
12132         if (!offset || !len) {
12133                 offset = TG3_NVM_VPD_OFF;
12134                 len = TG3_NVM_VPD_LEN;
12135         }
12136
12137         buf = kmalloc(len, GFP_KERNEL);
12138         if (buf == NULL)
12139                 return NULL;
12140
12141         if (magic == TG3_EEPROM_MAGIC) {
12142                 for (i = 0; i < len; i += 4) {
12143                         /* The data is in little-endian format in NVRAM.
12144                          * Use the big-endian read routines to preserve
12145                          * the byte order as it exists in NVRAM.
12146                          */
12147                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12148                                 goto error;
12149                 }
12150         } else {
12151                 u8 *ptr;
12152                 ssize_t cnt;
12153                 unsigned int pos = 0;
12154
12155                 ptr = (u8 *)&buf[0];
12156                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12157                         cnt = pci_read_vpd(tp->pdev, pos,
12158                                            len - pos, ptr);
12159                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
12160                                 cnt = 0;
12161                         else if (cnt < 0)
12162                                 goto error;
12163                 }
12164                 if (pos != len)
12165                         goto error;
12166         }
12167
12168         *vpdlen = len;
12169
12170         return buf;
12171
12172 error:
12173         kfree(buf);
12174         return NULL;
12175 }
12176
12177 #define NVRAM_TEST_SIZE 0x100
12178 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
12179 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
12180 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
12181 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
12182 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
12183 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
12184 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12185 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12186
12187 static int tg3_test_nvram(struct tg3 *tp)
12188 {
12189         u32 csum, magic, len;
12190         __be32 *buf;
12191         int i, j, k, err = 0, size;
12192
12193         if (tg3_flag(tp, NO_NVRAM))
12194                 return 0;
12195
12196         if (tg3_nvram_read(tp, 0, &magic) != 0)
12197                 return -EIO;
12198
12199         if (magic == TG3_EEPROM_MAGIC)
12200                 size = NVRAM_TEST_SIZE;
12201         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12202                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12203                     TG3_EEPROM_SB_FORMAT_1) {
12204                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12205                         case TG3_EEPROM_SB_REVISION_0:
12206                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12207                                 break;
12208                         case TG3_EEPROM_SB_REVISION_2:
12209                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12210                                 break;
12211                         case TG3_EEPROM_SB_REVISION_3:
12212                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12213                                 break;
12214                         case TG3_EEPROM_SB_REVISION_4:
12215                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12216                                 break;
12217                         case TG3_EEPROM_SB_REVISION_5:
12218                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12219                                 break;
12220                         case TG3_EEPROM_SB_REVISION_6:
12221                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12222                                 break;
12223                         default:
12224                                 return -EIO;
12225                         }
12226                 } else
12227                         return 0;
12228         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12229                 size = NVRAM_SELFBOOT_HW_SIZE;
12230         else
12231                 return -EIO;
12232
12233         buf = kmalloc(size, GFP_KERNEL);
12234         if (buf == NULL)
12235                 return -ENOMEM;
12236
12237         err = -EIO;
12238         for (i = 0, j = 0; i < size; i += 4, j++) {
12239                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12240                 if (err)
12241                         break;
12242         }
12243         if (i < size)
12244                 goto out;
12245
12246         /* Selfboot format */
12247         magic = be32_to_cpu(buf[0]);
12248         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12249             TG3_EEPROM_MAGIC_FW) {
12250                 u8 *buf8 = (u8 *) buf, csum8 = 0;
12251
12252                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12253                     TG3_EEPROM_SB_REVISION_2) {
12254                         /* For rev 2, the csum doesn't include the MBA. */
12255                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12256                                 csum8 += buf8[i];
12257                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12258                                 csum8 += buf8[i];
12259                 } else {
12260                         for (i = 0; i < size; i++)
12261                                 csum8 += buf8[i];
12262                 }
12263
12264                 if (csum8 == 0) {
12265                         err = 0;
12266                         goto out;
12267                 }
12268
12269                 err = -EIO;
12270                 goto out;
12271         }
12272
12273         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12274             TG3_EEPROM_MAGIC_HW) {
12275                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12276                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12277                 u8 *buf8 = (u8 *) buf;
12278
12279                 /* Separate the parity bits and the data bytes.  */
12280                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12281                         if ((i == 0) || (i == 8)) {
12282                                 int l;
12283                                 u8 msk;
12284
12285                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12286                                         parity[k++] = buf8[i] & msk;
12287                                 i++;
12288                         } else if (i == 16) {
12289                                 int l;
12290                                 u8 msk;
12291
12292                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12293                                         parity[k++] = buf8[i] & msk;
12294                                 i++;
12295
12296                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12297                                         parity[k++] = buf8[i] & msk;
12298                                 i++;
12299                         }
12300                         data[j++] = buf8[i];
12301                 }
12302
12303                 err = -EIO;
12304                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12305                         u8 hw8 = hweight8(data[i]);
12306
12307                         if ((hw8 & 0x1) && parity[i])
12308                                 goto out;
12309                         else if (!(hw8 & 0x1) && !parity[i])
12310                                 goto out;
12311                 }
12312                 err = 0;
12313                 goto out;
12314         }
12315
12316         err = -EIO;
12317
12318         /* Bootstrap checksum at offset 0x10 */
12319         csum = calc_crc((unsigned char *) buf, 0x10);
12320         if (csum != le32_to_cpu(buf[0x10/4]))
12321                 goto out;
12322
12323         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12324         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12325         if (csum != le32_to_cpu(buf[0xfc/4]))
12326                 goto out;
12327
12328         kfree(buf);
12329
12330         buf = tg3_vpd_readblock(tp, &len);
12331         if (!buf)
12332                 return -ENOMEM;
12333
12334         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12335         if (i > 0) {
12336                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12337                 if (j < 0)
12338                         goto out;
12339
12340                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12341                         goto out;
12342
12343                 i += PCI_VPD_LRDT_TAG_SIZE;
12344                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12345                                               PCI_VPD_RO_KEYWORD_CHKSUM);
12346                 if (j > 0) {
12347                         u8 csum8 = 0;
12348
12349                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
12350
12351                         for (i = 0; i <= j; i++)
12352                                 csum8 += ((u8 *)buf)[i];
12353
12354                         if (csum8)
12355                                 goto out;
12356                 }
12357         }
12358
12359         err = 0;
12360
12361 out:
12362         kfree(buf);
12363         return err;
12364 }
12365
12366 #define TG3_SERDES_TIMEOUT_SEC  2
12367 #define TG3_COPPER_TIMEOUT_SEC  6
12368
12369 static int tg3_test_link(struct tg3 *tp)
12370 {
12371         int i, max;
12372
12373         if (!netif_running(tp->dev))
12374                 return -ENODEV;
12375
12376         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12377                 max = TG3_SERDES_TIMEOUT_SEC;
12378         else
12379                 max = TG3_COPPER_TIMEOUT_SEC;
12380
12381         for (i = 0; i < max; i++) {
12382                 if (tp->link_up)
12383                         return 0;
12384
12385                 if (msleep_interruptible(1000))
12386                         break;
12387         }
12388
12389         return -EIO;
12390 }
12391
12392 /* Only test the commonly used registers */
12393 static int tg3_test_registers(struct tg3 *tp)
12394 {
12395         int i, is_5705, is_5750;
12396         u32 offset, read_mask, write_mask, val, save_val, read_val;
12397         static struct {
12398                 u16 offset;
12399                 u16 flags;
12400 #define TG3_FL_5705     0x1
12401 #define TG3_FL_NOT_5705 0x2
12402 #define TG3_FL_NOT_5788 0x4
12403 #define TG3_FL_NOT_5750 0x8
12404                 u32 read_mask;
12405                 u32 write_mask;
12406         } reg_tbl[] = {
12407                 /* MAC Control Registers */
12408                 { MAC_MODE, TG3_FL_NOT_5705,
12409                         0x00000000, 0x00ef6f8c },
12410                 { MAC_MODE, TG3_FL_5705,
12411                         0x00000000, 0x01ef6b8c },
12412                 { MAC_STATUS, TG3_FL_NOT_5705,
12413                         0x03800107, 0x00000000 },
12414                 { MAC_STATUS, TG3_FL_5705,
12415                         0x03800100, 0x00000000 },
12416                 { MAC_ADDR_0_HIGH, 0x0000,
12417                         0x00000000, 0x0000ffff },
12418                 { MAC_ADDR_0_LOW, 0x0000,
12419                         0x00000000, 0xffffffff },
12420                 { MAC_RX_MTU_SIZE, 0x0000,
12421                         0x00000000, 0x0000ffff },
12422                 { MAC_TX_MODE, 0x0000,
12423                         0x00000000, 0x00000070 },
12424                 { MAC_TX_LENGTHS, 0x0000,
12425                         0x00000000, 0x00003fff },
12426                 { MAC_RX_MODE, TG3_FL_NOT_5705,
12427                         0x00000000, 0x000007fc },
12428                 { MAC_RX_MODE, TG3_FL_5705,
12429                         0x00000000, 0x000007dc },
12430                 { MAC_HASH_REG_0, 0x0000,
12431                         0x00000000, 0xffffffff },
12432                 { MAC_HASH_REG_1, 0x0000,
12433                         0x00000000, 0xffffffff },
12434                 { MAC_HASH_REG_2, 0x0000,
12435                         0x00000000, 0xffffffff },
12436                 { MAC_HASH_REG_3, 0x0000,
12437                         0x00000000, 0xffffffff },
12438
12439                 /* Receive Data and Receive BD Initiator Control Registers. */
12440                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12441                         0x00000000, 0xffffffff },
12442                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12443                         0x00000000, 0xffffffff },
12444                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12445                         0x00000000, 0x00000003 },
12446                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12447                         0x00000000, 0xffffffff },
12448                 { RCVDBDI_STD_BD+0, 0x0000,
12449                         0x00000000, 0xffffffff },
12450                 { RCVDBDI_STD_BD+4, 0x0000,
12451                         0x00000000, 0xffffffff },
12452                 { RCVDBDI_STD_BD+8, 0x0000,
12453                         0x00000000, 0xffff0002 },
12454                 { RCVDBDI_STD_BD+0xc, 0x0000,
12455                         0x00000000, 0xffffffff },
12456
12457                 /* Receive BD Initiator Control Registers. */
12458                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12459                         0x00000000, 0xffffffff },
12460                 { RCVBDI_STD_THRESH, TG3_FL_5705,
12461                         0x00000000, 0x000003ff },
12462                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12463                         0x00000000, 0xffffffff },
12464
12465                 /* Host Coalescing Control Registers. */
12466                 { HOSTCC_MODE, TG3_FL_NOT_5705,
12467                         0x00000000, 0x00000004 },
12468                 { HOSTCC_MODE, TG3_FL_5705,
12469                         0x00000000, 0x000000f6 },
12470                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12471                         0x00000000, 0xffffffff },
12472                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12473                         0x00000000, 0x000003ff },
12474                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12475                         0x00000000, 0xffffffff },
12476                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12477                         0x00000000, 0x000003ff },
12478                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12479                         0x00000000, 0xffffffff },
12480                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12481                         0x00000000, 0x000000ff },
12482                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12483                         0x00000000, 0xffffffff },
12484                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12485                         0x00000000, 0x000000ff },
12486                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12487                         0x00000000, 0xffffffff },
12488                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12489                         0x00000000, 0xffffffff },
12490                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12491                         0x00000000, 0xffffffff },
12492                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12493                         0x00000000, 0x000000ff },
12494                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12495                         0x00000000, 0xffffffff },
12496                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12497                         0x00000000, 0x000000ff },
12498                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12499                         0x00000000, 0xffffffff },
12500                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12501                         0x00000000, 0xffffffff },
12502                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12503                         0x00000000, 0xffffffff },
12504                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12505                         0x00000000, 0xffffffff },
12506                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12507                         0x00000000, 0xffffffff },
12508                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12509                         0xffffffff, 0x00000000 },
12510                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12511                         0xffffffff, 0x00000000 },
12512
12513                 /* Buffer Manager Control Registers. */
12514                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12515                         0x00000000, 0x007fff80 },
12516                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12517                         0x00000000, 0x007fffff },
12518                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12519                         0x00000000, 0x0000003f },
12520                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12521                         0x00000000, 0x000001ff },
12522                 { BUFMGR_MB_HIGH_WATER, 0x0000,
12523                         0x00000000, 0x000001ff },
12524                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12525                         0xffffffff, 0x00000000 },
12526                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12527                         0xffffffff, 0x00000000 },
12528
12529                 /* Mailbox Registers */
12530                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12531                         0x00000000, 0x000001ff },
12532                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12533                         0x00000000, 0x000001ff },
12534                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12535                         0x00000000, 0x000007ff },
12536                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12537                         0x00000000, 0x000001ff },
12538
12539                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12540         };
12541
12542         is_5705 = is_5750 = 0;
12543         if (tg3_flag(tp, 5705_PLUS)) {
12544                 is_5705 = 1;
12545                 if (tg3_flag(tp, 5750_PLUS))
12546                         is_5750 = 1;
12547         }
12548
12549         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12550                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12551                         continue;
12552
12553                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12554                         continue;
12555
12556                 if (tg3_flag(tp, IS_5788) &&
12557                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
12558                         continue;
12559
12560                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12561                         continue;
12562
12563                 offset = (u32) reg_tbl[i].offset;
12564                 read_mask = reg_tbl[i].read_mask;
12565                 write_mask = reg_tbl[i].write_mask;
12566
12567                 /* Save the original register content */
12568                 save_val = tr32(offset);
12569
12570                 /* Determine the read-only value. */
12571                 read_val = save_val & read_mask;
12572
12573                 /* Write zero to the register, then make sure the read-only bits
12574                  * are not changed and the read/write bits are all zeros.
12575                  */
12576                 tw32(offset, 0);
12577
12578                 val = tr32(offset);
12579
12580                 /* Test the read-only and read/write bits. */
12581                 if (((val & read_mask) != read_val) || (val & write_mask))
12582                         goto out;
12583
12584                 /* Write ones to all the bits defined by RdMask and WrMask, then
12585                  * make sure the read-only bits are not changed and the
12586                  * read/write bits are all ones.
12587                  */
12588                 tw32(offset, read_mask | write_mask);
12589
12590                 val = tr32(offset);
12591
12592                 /* Test the read-only bits. */
12593                 if ((val & read_mask) != read_val)
12594                         goto out;
12595
12596                 /* Test the read/write bits. */
12597                 if ((val & write_mask) != write_mask)
12598                         goto out;
12599
12600                 tw32(offset, save_val);
12601         }
12602
12603         return 0;
12604
12605 out:
12606         if (netif_msg_hw(tp))
12607                 netdev_err(tp->dev,
12608                            "Register test failed at offset %x\n", offset);
12609         tw32(offset, save_val);
12610         return -EIO;
12611 }
12612
12613 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12614 {
12615         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12616         int i;
12617         u32 j;
12618
12619         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12620                 for (j = 0; j < len; j += 4) {
12621                         u32 val;
12622
12623                         tg3_write_mem(tp, offset + j, test_pattern[i]);
12624                         tg3_read_mem(tp, offset + j, &val);
12625                         if (val != test_pattern[i])
12626                                 return -EIO;
12627                 }
12628         }
12629         return 0;
12630 }
12631
12632 static int tg3_test_memory(struct tg3 *tp)
12633 {
12634         static struct mem_entry {
12635                 u32 offset;
12636                 u32 len;
12637         } mem_tbl_570x[] = {
12638                 { 0x00000000, 0x00b50},
12639                 { 0x00002000, 0x1c000},
12640                 { 0xffffffff, 0x00000}
12641         }, mem_tbl_5705[] = {
12642                 { 0x00000100, 0x0000c},
12643                 { 0x00000200, 0x00008},
12644                 { 0x00004000, 0x00800},
12645                 { 0x00006000, 0x01000},
12646                 { 0x00008000, 0x02000},
12647                 { 0x00010000, 0x0e000},
12648                 { 0xffffffff, 0x00000}
12649         }, mem_tbl_5755[] = {
12650                 { 0x00000200, 0x00008},
12651                 { 0x00004000, 0x00800},
12652                 { 0x00006000, 0x00800},
12653                 { 0x00008000, 0x02000},
12654                 { 0x00010000, 0x0c000},
12655                 { 0xffffffff, 0x00000}
12656         }, mem_tbl_5906[] = {
12657                 { 0x00000200, 0x00008},
12658                 { 0x00004000, 0x00400},
12659                 { 0x00006000, 0x00400},
12660                 { 0x00008000, 0x01000},
12661                 { 0x00010000, 0x01000},
12662                 { 0xffffffff, 0x00000}
12663         }, mem_tbl_5717[] = {
12664                 { 0x00000200, 0x00008},
12665                 { 0x00010000, 0x0a000},
12666                 { 0x00020000, 0x13c00},
12667                 { 0xffffffff, 0x00000}
12668         }, mem_tbl_57765[] = {
12669                 { 0x00000200, 0x00008},
12670                 { 0x00004000, 0x00800},
12671                 { 0x00006000, 0x09800},
12672                 { 0x00010000, 0x0a000},
12673                 { 0xffffffff, 0x00000}
12674         };
12675         struct mem_entry *mem_tbl;
12676         int err = 0;
12677         int i;
12678
12679         if (tg3_flag(tp, 5717_PLUS))
12680                 mem_tbl = mem_tbl_5717;
12681         else if (tg3_flag(tp, 57765_CLASS) ||
12682                  tg3_asic_rev(tp) == ASIC_REV_5762)
12683                 mem_tbl = mem_tbl_57765;
12684         else if (tg3_flag(tp, 5755_PLUS))
12685                 mem_tbl = mem_tbl_5755;
12686         else if (tg3_asic_rev(tp) == ASIC_REV_5906)
12687                 mem_tbl = mem_tbl_5906;
12688         else if (tg3_flag(tp, 5705_PLUS))
12689                 mem_tbl = mem_tbl_5705;
12690         else
12691                 mem_tbl = mem_tbl_570x;
12692
12693         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12694                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12695                 if (err)
12696                         break;
12697         }
12698
12699         return err;
12700 }
12701
12702 #define TG3_TSO_MSS             500
12703
12704 #define TG3_TSO_IP_HDR_LEN      20
12705 #define TG3_TSO_TCP_HDR_LEN     20
12706 #define TG3_TSO_TCP_OPT_LEN     12
12707
12708 static const u8 tg3_tso_header[] = {
12709 0x08, 0x00,
12710 0x45, 0x00, 0x00, 0x00,
12711 0x00, 0x00, 0x40, 0x00,
12712 0x40, 0x06, 0x00, 0x00,
12713 0x0a, 0x00, 0x00, 0x01,
12714 0x0a, 0x00, 0x00, 0x02,
12715 0x0d, 0x00, 0xe0, 0x00,
12716 0x00, 0x00, 0x01, 0x00,
12717 0x00, 0x00, 0x02, 0x00,
12718 0x80, 0x10, 0x10, 0x00,
12719 0x14, 0x09, 0x00, 0x00,
12720 0x01, 0x01, 0x08, 0x0a,
12721 0x11, 0x11, 0x11, 0x11,
12722 0x11, 0x11, 0x11, 0x11,
12723 };
12724
12725 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12726 {
12727         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12728         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12729         u32 budget;
12730         struct sk_buff *skb;
12731         u8 *tx_data, *rx_data;
12732         dma_addr_t map;
12733         int num_pkts, tx_len, rx_len, i, err;
12734         struct tg3_rx_buffer_desc *desc;
12735         struct tg3_napi *tnapi, *rnapi;
12736         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12737
12738         tnapi = &tp->napi[0];
12739         rnapi = &tp->napi[0];
12740         if (tp->irq_cnt > 1) {
12741                 if (tg3_flag(tp, ENABLE_RSS))
12742                         rnapi = &tp->napi[1];
12743                 if (tg3_flag(tp, ENABLE_TSS))
12744                         tnapi = &tp->napi[1];
12745         }
12746         coal_now = tnapi->coal_now | rnapi->coal_now;
12747
12748         err = -EIO;
12749
12750         tx_len = pktsz;
12751         skb = netdev_alloc_skb(tp->dev, tx_len);
12752         if (!skb)
12753                 return -ENOMEM;
12754
12755         tx_data = skb_put(skb, tx_len);
12756         memcpy(tx_data, tp->dev->dev_addr, 6);
12757         memset(tx_data + 6, 0x0, 8);
12758
12759         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12760
12761         if (tso_loopback) {
12762                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12763
12764                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12765                               TG3_TSO_TCP_OPT_LEN;
12766
12767                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12768                        sizeof(tg3_tso_header));
12769                 mss = TG3_TSO_MSS;
12770
12771                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12772                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12773
12774                 /* Set the total length field in the IP header */
12775                 iph->tot_len = htons((u16)(mss + hdr_len));
12776
12777                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12778                               TXD_FLAG_CPU_POST_DMA);
12779
12780                 if (tg3_flag(tp, HW_TSO_1) ||
12781                     tg3_flag(tp, HW_TSO_2) ||
12782                     tg3_flag(tp, HW_TSO_3)) {
12783                         struct tcphdr *th;
12784                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12785                         th = (struct tcphdr *)&tx_data[val];
12786                         th->check = 0;
12787                 } else
12788                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
12789
12790                 if (tg3_flag(tp, HW_TSO_3)) {
12791                         mss |= (hdr_len & 0xc) << 12;
12792                         if (hdr_len & 0x10)
12793                                 base_flags |= 0x00000010;
12794                         base_flags |= (hdr_len & 0x3e0) << 5;
12795                 } else if (tg3_flag(tp, HW_TSO_2))
12796                         mss |= hdr_len << 9;
12797                 else if (tg3_flag(tp, HW_TSO_1) ||
12798                          tg3_asic_rev(tp) == ASIC_REV_5705) {
12799                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12800                 } else {
12801                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12802                 }
12803
12804                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12805         } else {
12806                 num_pkts = 1;
12807                 data_off = ETH_HLEN;
12808
12809                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12810                     tx_len > VLAN_ETH_FRAME_LEN)
12811                         base_flags |= TXD_FLAG_JMB_PKT;
12812         }
12813
12814         for (i = data_off; i < tx_len; i++)
12815                 tx_data[i] = (u8) (i & 0xff);
12816
12817         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12818         if (pci_dma_mapping_error(tp->pdev, map)) {
12819                 dev_kfree_skb(skb);
12820                 return -EIO;
12821         }
12822
12823         val = tnapi->tx_prod;
12824         tnapi->tx_buffers[val].skb = skb;
12825         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12826
12827         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12828                rnapi->coal_now);
12829
12830         udelay(10);
12831
12832         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12833
12834         budget = tg3_tx_avail(tnapi);
12835         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12836                             base_flags | TXD_FLAG_END, mss, 0)) {
12837                 tnapi->tx_buffers[val].skb = NULL;
12838                 dev_kfree_skb(skb);
12839                 return -EIO;
12840         }
12841
12842         tnapi->tx_prod++;
12843
12844         /* Sync BD data before updating mailbox */
12845         wmb();
12846
12847         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12848         tr32_mailbox(tnapi->prodmbox);
12849
12850         udelay(10);
12851
12852         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
12853         for (i = 0; i < 35; i++) {
12854                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12855                        coal_now);
12856
12857                 udelay(10);
12858
12859                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12860                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
12861                 if ((tx_idx == tnapi->tx_prod) &&
12862                     (rx_idx == (rx_start_idx + num_pkts)))
12863                         break;
12864         }
12865
12866         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12867         dev_kfree_skb(skb);
12868
12869         if (tx_idx != tnapi->tx_prod)
12870                 goto out;
12871
12872         if (rx_idx != rx_start_idx + num_pkts)
12873                 goto out;
12874
12875         val = data_off;
12876         while (rx_idx != rx_start_idx) {
12877                 desc = &rnapi->rx_rcb[rx_start_idx++];
12878                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12879                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12880
12881                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12882                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12883                         goto out;
12884
12885                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12886                          - ETH_FCS_LEN;
12887
12888                 if (!tso_loopback) {
12889                         if (rx_len != tx_len)
12890                                 goto out;
12891
12892                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12893                                 if (opaque_key != RXD_OPAQUE_RING_STD)
12894                                         goto out;
12895                         } else {
12896                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12897                                         goto out;
12898                         }
12899                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12900                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12901                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
12902                         goto out;
12903                 }
12904
12905                 if (opaque_key == RXD_OPAQUE_RING_STD) {
12906                         rx_data = tpr->rx_std_buffers[desc_idx].data;
12907                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12908                                              mapping);
12909                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12910                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12911                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12912                                              mapping);
12913                 } else
12914                         goto out;
12915
12916                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12917                                             PCI_DMA_FROMDEVICE);
12918
12919                 rx_data += TG3_RX_OFFSET(tp);
12920                 for (i = data_off; i < rx_len; i++, val++) {
12921                         if (*(rx_data + i) != (u8) (val & 0xff))
12922                                 goto out;
12923                 }
12924         }
12925
12926         err = 0;
12927
12928         /* tg3_free_rings will unmap and free the rx_data */
12929 out:
12930         return err;
12931 }
12932
12933 #define TG3_STD_LOOPBACK_FAILED         1
12934 #define TG3_JMB_LOOPBACK_FAILED         2
12935 #define TG3_TSO_LOOPBACK_FAILED         4
12936 #define TG3_LOOPBACK_FAILED \
12937         (TG3_STD_LOOPBACK_FAILED | \
12938          TG3_JMB_LOOPBACK_FAILED | \
12939          TG3_TSO_LOOPBACK_FAILED)
12940
12941 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12942 {
12943         int err = -EIO;
12944         u32 eee_cap;
12945         u32 jmb_pkt_sz = 9000;
12946
12947         if (tp->dma_limit)
12948                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12949
12950         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12951         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12952
12953         if (!netif_running(tp->dev)) {
12954                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12955                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12956                 if (do_extlpbk)
12957                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12958                 goto done;
12959         }
12960
12961         err = tg3_reset_hw(tp, 1);
12962         if (err) {
12963                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12964                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12965                 if (do_extlpbk)
12966                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12967                 goto done;
12968         }
12969
12970         if (tg3_flag(tp, ENABLE_RSS)) {
12971                 int i;
12972
12973                 /* Reroute all rx packets to the 1st queue */
12974                 for (i = MAC_RSS_INDIR_TBL_0;
12975                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12976                         tw32(i, 0x0);
12977         }
12978
12979         /* HW errata - mac loopback fails in some cases on 5780.
12980          * Normal traffic and PHY loopback are not affected by
12981          * errata.  Also, the MAC loopback test is deprecated for
12982          * all newer ASIC revisions.
12983          */
12984         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
12985             !tg3_flag(tp, CPMU_PRESENT)) {
12986                 tg3_mac_loopback(tp, true);
12987
12988                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12989                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12990
12991                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12992                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12993                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12994
12995                 tg3_mac_loopback(tp, false);
12996         }
12997
12998         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12999             !tg3_flag(tp, USE_PHYLIB)) {
13000                 int i;
13001
13002                 tg3_phy_lpbk_set(tp, 0, false);
13003
13004                 /* Wait for link */
13005                 for (i = 0; i < 100; i++) {
13006                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13007                                 break;
13008                         mdelay(1);
13009                 }
13010
13011                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13012                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13013                 if (tg3_flag(tp, TSO_CAPABLE) &&
13014                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13015                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13016                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13017                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13018                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13019
13020                 if (do_extlpbk) {
13021                         tg3_phy_lpbk_set(tp, 0, true);
13022
13023                         /* All link indications report up, but the hardware
13024                          * isn't really ready for about 20 msec.  Double it
13025                          * to be sure.
13026                          */
13027                         mdelay(40);
13028
13029                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13030                                 data[TG3_EXT_LOOPB_TEST] |=
13031                                                         TG3_STD_LOOPBACK_FAILED;
13032                         if (tg3_flag(tp, TSO_CAPABLE) &&
13033                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13034                                 data[TG3_EXT_LOOPB_TEST] |=
13035                                                         TG3_TSO_LOOPBACK_FAILED;
13036                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13037                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13038                                 data[TG3_EXT_LOOPB_TEST] |=
13039                                                         TG3_JMB_LOOPBACK_FAILED;
13040                 }
13041
13042                 /* Re-enable gphy autopowerdown. */
13043                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13044                         tg3_phy_toggle_apd(tp, true);
13045         }
13046
13047         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13048                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13049
13050 done:
13051         tp->phy_flags |= eee_cap;
13052
13053         return err;
13054 }
13055
13056 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13057                           u64 *data)
13058 {
13059         struct tg3 *tp = netdev_priv(dev);
13060         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13061
13062         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
13063             tg3_power_up(tp)) {
13064                 etest->flags |= ETH_TEST_FL_FAILED;
13065                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13066                 return;
13067         }
13068
13069         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13070
13071         if (tg3_test_nvram(tp) != 0) {
13072                 etest->flags |= ETH_TEST_FL_FAILED;
13073                 data[TG3_NVRAM_TEST] = 1;
13074         }
13075         if (!doextlpbk && tg3_test_link(tp)) {
13076                 etest->flags |= ETH_TEST_FL_FAILED;
13077                 data[TG3_LINK_TEST] = 1;
13078         }
13079         if (etest->flags & ETH_TEST_FL_OFFLINE) {
13080                 int err, err2 = 0, irq_sync = 0;
13081
13082                 if (netif_running(dev)) {
13083                         tg3_phy_stop(tp);
13084                         tg3_netif_stop(tp);
13085                         irq_sync = 1;
13086                 }
13087
13088                 tg3_full_lock(tp, irq_sync);
13089                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13090                 err = tg3_nvram_lock(tp);
13091                 tg3_halt_cpu(tp, RX_CPU_BASE);
13092                 if (!tg3_flag(tp, 5705_PLUS))
13093                         tg3_halt_cpu(tp, TX_CPU_BASE);
13094                 if (!err)
13095                         tg3_nvram_unlock(tp);
13096
13097                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13098                         tg3_phy_reset(tp);
13099
13100                 if (tg3_test_registers(tp) != 0) {
13101                         etest->flags |= ETH_TEST_FL_FAILED;
13102                         data[TG3_REGISTER_TEST] = 1;
13103                 }
13104
13105                 if (tg3_test_memory(tp) != 0) {
13106                         etest->flags |= ETH_TEST_FL_FAILED;
13107                         data[TG3_MEMORY_TEST] = 1;
13108                 }
13109
13110                 if (doextlpbk)
13111                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13112
13113                 if (tg3_test_loopback(tp, data, doextlpbk))
13114                         etest->flags |= ETH_TEST_FL_FAILED;
13115
13116                 tg3_full_unlock(tp);
13117
13118                 if (tg3_test_interrupt(tp) != 0) {
13119                         etest->flags |= ETH_TEST_FL_FAILED;
13120                         data[TG3_INTERRUPT_TEST] = 1;
13121                 }
13122
13123                 tg3_full_lock(tp, 0);
13124
13125                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13126                 if (netif_running(dev)) {
13127                         tg3_flag_set(tp, INIT_COMPLETE);
13128                         err2 = tg3_restart_hw(tp, 1);
13129                         if (!err2)
13130                                 tg3_netif_start(tp);
13131                 }
13132
13133                 tg3_full_unlock(tp);
13134
13135                 if (irq_sync && !err2)
13136                         tg3_phy_start(tp);
13137         }
13138         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13139                 tg3_power_down(tp);
13140
13141 }
13142
13143 static int tg3_hwtstamp_ioctl(struct net_device *dev,
13144                               struct ifreq *ifr, int cmd)
13145 {
13146         struct tg3 *tp = netdev_priv(dev);
13147         struct hwtstamp_config stmpconf;
13148
13149         if (!tg3_flag(tp, PTP_CAPABLE))
13150                 return -EINVAL;
13151
13152         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13153                 return -EFAULT;
13154
13155         if (stmpconf.flags)
13156                 return -EINVAL;
13157
13158         switch (stmpconf.tx_type) {
13159         case HWTSTAMP_TX_ON:
13160                 tg3_flag_set(tp, TX_TSTAMP_EN);
13161                 break;
13162         case HWTSTAMP_TX_OFF:
13163                 tg3_flag_clear(tp, TX_TSTAMP_EN);
13164                 break;
13165         default:
13166                 return -ERANGE;
13167         }
13168
13169         switch (stmpconf.rx_filter) {
13170         case HWTSTAMP_FILTER_NONE:
13171                 tp->rxptpctl = 0;
13172                 break;
13173         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13174                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13175                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13176                 break;
13177         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13178                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13179                                TG3_RX_PTP_CTL_SYNC_EVNT;
13180                 break;
13181         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13182                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13183                                TG3_RX_PTP_CTL_DELAY_REQ;
13184                 break;
13185         case HWTSTAMP_FILTER_PTP_V2_EVENT:
13186                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13187                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13188                 break;
13189         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13190                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13191                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13192                 break;
13193         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13194                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13195                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13196                 break;
13197         case HWTSTAMP_FILTER_PTP_V2_SYNC:
13198                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13199                                TG3_RX_PTP_CTL_SYNC_EVNT;
13200                 break;
13201         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13202                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13203                                TG3_RX_PTP_CTL_SYNC_EVNT;
13204                 break;
13205         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13206                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13207                                TG3_RX_PTP_CTL_SYNC_EVNT;
13208                 break;
13209         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13210                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13211                                TG3_RX_PTP_CTL_DELAY_REQ;
13212                 break;
13213         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13214                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13215                                TG3_RX_PTP_CTL_DELAY_REQ;
13216                 break;
13217         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13218                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13219                                TG3_RX_PTP_CTL_DELAY_REQ;
13220                 break;
13221         default:
13222                 return -ERANGE;
13223         }
13224
13225         if (netif_running(dev) && tp->rxptpctl)
13226                 tw32(TG3_RX_PTP_CTL,
13227                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13228
13229         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13230                 -EFAULT : 0;
13231 }
13232
13233 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13234 {
13235         struct mii_ioctl_data *data = if_mii(ifr);
13236         struct tg3 *tp = netdev_priv(dev);
13237         int err;
13238
13239         if (tg3_flag(tp, USE_PHYLIB)) {
13240                 struct phy_device *phydev;
13241                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13242                         return -EAGAIN;
13243                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13244                 return phy_mii_ioctl(phydev, ifr, cmd);
13245         }
13246
13247         switch (cmd) {
13248         case SIOCGMIIPHY:
13249                 data->phy_id = tp->phy_addr;
13250
13251                 /* fallthru */
13252         case SIOCGMIIREG: {
13253                 u32 mii_regval;
13254
13255                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13256                         break;                  /* We have no PHY */
13257
13258                 if (!netif_running(dev))
13259                         return -EAGAIN;
13260
13261                 spin_lock_bh(&tp->lock);
13262                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13263                                     data->reg_num & 0x1f, &mii_regval);
13264                 spin_unlock_bh(&tp->lock);
13265
13266                 data->val_out = mii_regval;
13267
13268                 return err;
13269         }
13270
13271         case SIOCSMIIREG:
13272                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13273                         break;                  /* We have no PHY */
13274
13275                 if (!netif_running(dev))
13276                         return -EAGAIN;
13277
13278                 spin_lock_bh(&tp->lock);
13279                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13280                                      data->reg_num & 0x1f, data->val_in);
13281                 spin_unlock_bh(&tp->lock);
13282
13283                 return err;
13284
13285         case SIOCSHWTSTAMP:
13286                 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13287
13288         default:
13289                 /* do nothing */
13290                 break;
13291         }
13292         return -EOPNOTSUPP;
13293 }
13294
13295 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13296 {
13297         struct tg3 *tp = netdev_priv(dev);
13298
13299         memcpy(ec, &tp->coal, sizeof(*ec));
13300         return 0;
13301 }
13302
13303 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13304 {
13305         struct tg3 *tp = netdev_priv(dev);
13306         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13307         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13308
13309         if (!tg3_flag(tp, 5705_PLUS)) {
13310                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13311                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13312                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13313                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13314         }
13315
13316         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13317             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13318             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13319             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13320             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13321             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13322             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13323             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13324             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13325             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13326                 return -EINVAL;
13327
13328         /* No rx interrupts will be generated if both are zero */
13329         if ((ec->rx_coalesce_usecs == 0) &&
13330             (ec->rx_max_coalesced_frames == 0))
13331                 return -EINVAL;
13332
13333         /* No tx interrupts will be generated if both are zero */
13334         if ((ec->tx_coalesce_usecs == 0) &&
13335             (ec->tx_max_coalesced_frames == 0))
13336                 return -EINVAL;
13337
13338         /* Only copy relevant parameters, ignore all others. */
13339         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13340         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13341         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13342         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13343         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13344         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13345         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13346         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13347         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13348
13349         if (netif_running(dev)) {
13350                 tg3_full_lock(tp, 0);
13351                 __tg3_set_coalesce(tp, &tp->coal);
13352                 tg3_full_unlock(tp);
13353         }
13354         return 0;
13355 }
13356
13357 static const struct ethtool_ops tg3_ethtool_ops = {
13358         .get_settings           = tg3_get_settings,
13359         .set_settings           = tg3_set_settings,
13360         .get_drvinfo            = tg3_get_drvinfo,
13361         .get_regs_len           = tg3_get_regs_len,
13362         .get_regs               = tg3_get_regs,
13363         .get_wol                = tg3_get_wol,
13364         .set_wol                = tg3_set_wol,
13365         .get_msglevel           = tg3_get_msglevel,
13366         .set_msglevel           = tg3_set_msglevel,
13367         .nway_reset             = tg3_nway_reset,
13368         .get_link               = ethtool_op_get_link,
13369         .get_eeprom_len         = tg3_get_eeprom_len,
13370         .get_eeprom             = tg3_get_eeprom,
13371         .set_eeprom             = tg3_set_eeprom,
13372         .get_ringparam          = tg3_get_ringparam,
13373         .set_ringparam          = tg3_set_ringparam,
13374         .get_pauseparam         = tg3_get_pauseparam,
13375         .set_pauseparam         = tg3_set_pauseparam,
13376         .self_test              = tg3_self_test,
13377         .get_strings            = tg3_get_strings,
13378         .set_phys_id            = tg3_set_phys_id,
13379         .get_ethtool_stats      = tg3_get_ethtool_stats,
13380         .get_coalesce           = tg3_get_coalesce,
13381         .set_coalesce           = tg3_set_coalesce,
13382         .get_sset_count         = tg3_get_sset_count,
13383         .get_rxnfc              = tg3_get_rxnfc,
13384         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
13385         .get_rxfh_indir         = tg3_get_rxfh_indir,
13386         .set_rxfh_indir         = tg3_set_rxfh_indir,
13387         .get_channels           = tg3_get_channels,
13388         .set_channels           = tg3_set_channels,
13389         .get_ts_info            = tg3_get_ts_info,
13390 };
13391
13392 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13393                                                 struct rtnl_link_stats64 *stats)
13394 {
13395         struct tg3 *tp = netdev_priv(dev);
13396
13397         spin_lock_bh(&tp->lock);
13398         if (!tp->hw_stats) {
13399                 spin_unlock_bh(&tp->lock);
13400                 return &tp->net_stats_prev;
13401         }
13402
13403         tg3_get_nstats(tp, stats);
13404         spin_unlock_bh(&tp->lock);
13405
13406         return stats;
13407 }
13408
13409 static void tg3_set_rx_mode(struct net_device *dev)
13410 {
13411         struct tg3 *tp = netdev_priv(dev);
13412
13413         if (!netif_running(dev))
13414                 return;
13415
13416         tg3_full_lock(tp, 0);
13417         __tg3_set_rx_mode(dev);
13418         tg3_full_unlock(tp);
13419 }
13420
13421 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13422                                int new_mtu)
13423 {
13424         dev->mtu = new_mtu;
13425
13426         if (new_mtu > ETH_DATA_LEN) {
13427                 if (tg3_flag(tp, 5780_CLASS)) {
13428                         netdev_update_features(dev);
13429                         tg3_flag_clear(tp, TSO_CAPABLE);
13430                 } else {
13431                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
13432                 }
13433         } else {
13434                 if (tg3_flag(tp, 5780_CLASS)) {
13435                         tg3_flag_set(tp, TSO_CAPABLE);
13436                         netdev_update_features(dev);
13437                 }
13438                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13439         }
13440 }
13441
13442 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13443 {
13444         struct tg3 *tp = netdev_priv(dev);
13445         int err, reset_phy = 0;
13446
13447         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13448                 return -EINVAL;
13449
13450         if (!netif_running(dev)) {
13451                 /* We'll just catch it later when the
13452                  * device is up'd.
13453                  */
13454                 tg3_set_mtu(dev, tp, new_mtu);
13455                 return 0;
13456         }
13457
13458         tg3_phy_stop(tp);
13459
13460         tg3_netif_stop(tp);
13461
13462         tg3_full_lock(tp, 1);
13463
13464         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13465
13466         tg3_set_mtu(dev, tp, new_mtu);
13467
13468         /* Reset PHY, otherwise the read DMA engine will be in a mode that
13469          * breaks all requests to 256 bytes.
13470          */
13471         if (tg3_asic_rev(tp) == ASIC_REV_57766)
13472                 reset_phy = 1;
13473
13474         err = tg3_restart_hw(tp, reset_phy);
13475
13476         if (!err)
13477                 tg3_netif_start(tp);
13478
13479         tg3_full_unlock(tp);
13480
13481         if (!err)
13482                 tg3_phy_start(tp);
13483
13484         return err;
13485 }
13486
13487 static const struct net_device_ops tg3_netdev_ops = {
13488         .ndo_open               = tg3_open,
13489         .ndo_stop               = tg3_close,
13490         .ndo_start_xmit         = tg3_start_xmit,
13491         .ndo_get_stats64        = tg3_get_stats64,
13492         .ndo_validate_addr      = eth_validate_addr,
13493         .ndo_set_rx_mode        = tg3_set_rx_mode,
13494         .ndo_set_mac_address    = tg3_set_mac_addr,
13495         .ndo_do_ioctl           = tg3_ioctl,
13496         .ndo_tx_timeout         = tg3_tx_timeout,
13497         .ndo_change_mtu         = tg3_change_mtu,
13498         .ndo_fix_features       = tg3_fix_features,
13499         .ndo_set_features       = tg3_set_features,
13500 #ifdef CONFIG_NET_POLL_CONTROLLER
13501         .ndo_poll_controller    = tg3_poll_controller,
13502 #endif
13503 };
13504
13505 static void tg3_get_eeprom_size(struct tg3 *tp)
13506 {
13507         u32 cursize, val, magic;
13508
13509         tp->nvram_size = EEPROM_CHIP_SIZE;
13510
13511         if (tg3_nvram_read(tp, 0, &magic) != 0)
13512                 return;
13513
13514         if ((magic != TG3_EEPROM_MAGIC) &&
13515             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13516             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13517                 return;
13518
13519         /*
13520          * Size the chip by reading offsets at increasing powers of two.
13521          * When we encounter our validation signature, we know the addressing
13522          * has wrapped around, and thus have our chip size.
13523          */
13524         cursize = 0x10;
13525
13526         while (cursize < tp->nvram_size) {
13527                 if (tg3_nvram_read(tp, cursize, &val) != 0)
13528                         return;
13529
13530                 if (val == magic)
13531                         break;
13532
13533                 cursize <<= 1;
13534         }
13535
13536         tp->nvram_size = cursize;
13537 }
13538
13539 static void tg3_get_nvram_size(struct tg3 *tp)
13540 {
13541         u32 val;
13542
13543         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13544                 return;
13545
13546         /* Selfboot format */
13547         if (val != TG3_EEPROM_MAGIC) {
13548                 tg3_get_eeprom_size(tp);
13549                 return;
13550         }
13551
13552         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13553                 if (val != 0) {
13554                         /* This is confusing.  We want to operate on the
13555                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
13556                          * call will read from NVRAM and byteswap the data
13557                          * according to the byteswapping settings for all
13558                          * other register accesses.  This ensures the data we
13559                          * want will always reside in the lower 16-bits.
13560                          * However, the data in NVRAM is in LE format, which
13561                          * means the data from the NVRAM read will always be
13562                          * opposite the endianness of the CPU.  The 16-bit
13563                          * byteswap then brings the data to CPU endianness.
13564                          */
13565                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13566                         return;
13567                 }
13568         }
13569         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13570 }
13571
13572 static void tg3_get_nvram_info(struct tg3 *tp)
13573 {
13574         u32 nvcfg1;
13575
13576         nvcfg1 = tr32(NVRAM_CFG1);
13577         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13578                 tg3_flag_set(tp, FLASH);
13579         } else {
13580                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13581                 tw32(NVRAM_CFG1, nvcfg1);
13582         }
13583
13584         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
13585             tg3_flag(tp, 5780_CLASS)) {
13586                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13587                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13588                         tp->nvram_jedecnum = JEDEC_ATMEL;
13589                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13590                         tg3_flag_set(tp, NVRAM_BUFFERED);
13591                         break;
13592                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13593                         tp->nvram_jedecnum = JEDEC_ATMEL;
13594                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13595                         break;
13596                 case FLASH_VENDOR_ATMEL_EEPROM:
13597                         tp->nvram_jedecnum = JEDEC_ATMEL;
13598                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13599                         tg3_flag_set(tp, NVRAM_BUFFERED);
13600                         break;
13601                 case FLASH_VENDOR_ST:
13602                         tp->nvram_jedecnum = JEDEC_ST;
13603                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13604                         tg3_flag_set(tp, NVRAM_BUFFERED);
13605                         break;
13606                 case FLASH_VENDOR_SAIFUN:
13607                         tp->nvram_jedecnum = JEDEC_SAIFUN;
13608                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13609                         break;
13610                 case FLASH_VENDOR_SST_SMALL:
13611                 case FLASH_VENDOR_SST_LARGE:
13612                         tp->nvram_jedecnum = JEDEC_SST;
13613                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13614                         break;
13615                 }
13616         } else {
13617                 tp->nvram_jedecnum = JEDEC_ATMEL;
13618                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13619                 tg3_flag_set(tp, NVRAM_BUFFERED);
13620         }
13621 }
13622
13623 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13624 {
13625         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13626         case FLASH_5752PAGE_SIZE_256:
13627                 tp->nvram_pagesize = 256;
13628                 break;
13629         case FLASH_5752PAGE_SIZE_512:
13630                 tp->nvram_pagesize = 512;
13631                 break;
13632         case FLASH_5752PAGE_SIZE_1K:
13633                 tp->nvram_pagesize = 1024;
13634                 break;
13635         case FLASH_5752PAGE_SIZE_2K:
13636                 tp->nvram_pagesize = 2048;
13637                 break;
13638         case FLASH_5752PAGE_SIZE_4K:
13639                 tp->nvram_pagesize = 4096;
13640                 break;
13641         case FLASH_5752PAGE_SIZE_264:
13642                 tp->nvram_pagesize = 264;
13643                 break;
13644         case FLASH_5752PAGE_SIZE_528:
13645                 tp->nvram_pagesize = 528;
13646                 break;
13647         }
13648 }
13649
13650 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13651 {
13652         u32 nvcfg1;
13653
13654         nvcfg1 = tr32(NVRAM_CFG1);
13655
13656         /* NVRAM protection for TPM */
13657         if (nvcfg1 & (1 << 27))
13658                 tg3_flag_set(tp, PROTECTED_NVRAM);
13659
13660         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13661         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13662         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13663                 tp->nvram_jedecnum = JEDEC_ATMEL;
13664                 tg3_flag_set(tp, NVRAM_BUFFERED);
13665                 break;
13666         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13667                 tp->nvram_jedecnum = JEDEC_ATMEL;
13668                 tg3_flag_set(tp, NVRAM_BUFFERED);
13669                 tg3_flag_set(tp, FLASH);
13670                 break;
13671         case FLASH_5752VENDOR_ST_M45PE10:
13672         case FLASH_5752VENDOR_ST_M45PE20:
13673         case FLASH_5752VENDOR_ST_M45PE40:
13674                 tp->nvram_jedecnum = JEDEC_ST;
13675                 tg3_flag_set(tp, NVRAM_BUFFERED);
13676                 tg3_flag_set(tp, FLASH);
13677                 break;
13678         }
13679
13680         if (tg3_flag(tp, FLASH)) {
13681                 tg3_nvram_get_pagesize(tp, nvcfg1);
13682         } else {
13683                 /* For eeprom, set pagesize to maximum eeprom size */
13684                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13685
13686                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13687                 tw32(NVRAM_CFG1, nvcfg1);
13688         }
13689 }
13690
13691 static void tg3_get_5755_nvram_info(struct tg3 *tp)
13692 {
13693         u32 nvcfg1, protect = 0;
13694
13695         nvcfg1 = tr32(NVRAM_CFG1);
13696
13697         /* NVRAM protection for TPM */
13698         if (nvcfg1 & (1 << 27)) {
13699                 tg3_flag_set(tp, PROTECTED_NVRAM);
13700                 protect = 1;
13701         }
13702
13703         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13704         switch (nvcfg1) {
13705         case FLASH_5755VENDOR_ATMEL_FLASH_1:
13706         case FLASH_5755VENDOR_ATMEL_FLASH_2:
13707         case FLASH_5755VENDOR_ATMEL_FLASH_3:
13708         case FLASH_5755VENDOR_ATMEL_FLASH_5:
13709                 tp->nvram_jedecnum = JEDEC_ATMEL;
13710                 tg3_flag_set(tp, NVRAM_BUFFERED);
13711                 tg3_flag_set(tp, FLASH);
13712                 tp->nvram_pagesize = 264;
13713                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13714                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13715                         tp->nvram_size = (protect ? 0x3e200 :
13716                                           TG3_NVRAM_SIZE_512KB);
13717                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13718                         tp->nvram_size = (protect ? 0x1f200 :
13719                                           TG3_NVRAM_SIZE_256KB);
13720                 else
13721                         tp->nvram_size = (protect ? 0x1f200 :
13722                                           TG3_NVRAM_SIZE_128KB);
13723                 break;
13724         case FLASH_5752VENDOR_ST_M45PE10:
13725         case FLASH_5752VENDOR_ST_M45PE20:
13726         case FLASH_5752VENDOR_ST_M45PE40:
13727                 tp->nvram_jedecnum = JEDEC_ST;
13728                 tg3_flag_set(tp, NVRAM_BUFFERED);
13729                 tg3_flag_set(tp, FLASH);
13730                 tp->nvram_pagesize = 256;
13731                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13732                         tp->nvram_size = (protect ?
13733                                           TG3_NVRAM_SIZE_64KB :
13734                                           TG3_NVRAM_SIZE_128KB);
13735                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13736                         tp->nvram_size = (protect ?
13737                                           TG3_NVRAM_SIZE_64KB :
13738                                           TG3_NVRAM_SIZE_256KB);
13739                 else
13740                         tp->nvram_size = (protect ?
13741                                           TG3_NVRAM_SIZE_128KB :
13742                                           TG3_NVRAM_SIZE_512KB);
13743                 break;
13744         }
13745 }
13746
13747 static void tg3_get_5787_nvram_info(struct tg3 *tp)
13748 {
13749         u32 nvcfg1;
13750
13751         nvcfg1 = tr32(NVRAM_CFG1);
13752
13753         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13754         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13755         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13756         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13757         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13758                 tp->nvram_jedecnum = JEDEC_ATMEL;
13759                 tg3_flag_set(tp, NVRAM_BUFFERED);
13760                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13761
13762                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13763                 tw32(NVRAM_CFG1, nvcfg1);
13764                 break;
13765         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13766         case FLASH_5755VENDOR_ATMEL_FLASH_1:
13767         case FLASH_5755VENDOR_ATMEL_FLASH_2:
13768         case FLASH_5755VENDOR_ATMEL_FLASH_3:
13769                 tp->nvram_jedecnum = JEDEC_ATMEL;
13770                 tg3_flag_set(tp, NVRAM_BUFFERED);
13771                 tg3_flag_set(tp, FLASH);
13772                 tp->nvram_pagesize = 264;
13773                 break;
13774         case FLASH_5752VENDOR_ST_M45PE10:
13775         case FLASH_5752VENDOR_ST_M45PE20:
13776         case FLASH_5752VENDOR_ST_M45PE40:
13777                 tp->nvram_jedecnum = JEDEC_ST;
13778                 tg3_flag_set(tp, NVRAM_BUFFERED);
13779                 tg3_flag_set(tp, FLASH);
13780                 tp->nvram_pagesize = 256;
13781                 break;
13782         }
13783 }
13784
13785 static void tg3_get_5761_nvram_info(struct tg3 *tp)
13786 {
13787         u32 nvcfg1, protect = 0;
13788
13789         nvcfg1 = tr32(NVRAM_CFG1);
13790
13791         /* NVRAM protection for TPM */
13792         if (nvcfg1 & (1 << 27)) {
13793                 tg3_flag_set(tp, PROTECTED_NVRAM);
13794                 protect = 1;
13795         }
13796
13797         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13798         switch (nvcfg1) {
13799         case FLASH_5761VENDOR_ATMEL_ADB021D:
13800         case FLASH_5761VENDOR_ATMEL_ADB041D:
13801         case FLASH_5761VENDOR_ATMEL_ADB081D:
13802         case FLASH_5761VENDOR_ATMEL_ADB161D:
13803         case FLASH_5761VENDOR_ATMEL_MDB021D:
13804         case FLASH_5761VENDOR_ATMEL_MDB041D:
13805         case FLASH_5761VENDOR_ATMEL_MDB081D:
13806         case FLASH_5761VENDOR_ATMEL_MDB161D:
13807                 tp->nvram_jedecnum = JEDEC_ATMEL;
13808                 tg3_flag_set(tp, NVRAM_BUFFERED);
13809                 tg3_flag_set(tp, FLASH);
13810                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13811                 tp->nvram_pagesize = 256;
13812                 break;
13813         case FLASH_5761VENDOR_ST_A_M45PE20:
13814         case FLASH_5761VENDOR_ST_A_M45PE40:
13815         case FLASH_5761VENDOR_ST_A_M45PE80:
13816         case FLASH_5761VENDOR_ST_A_M45PE16:
13817         case FLASH_5761VENDOR_ST_M_M45PE20:
13818         case FLASH_5761VENDOR_ST_M_M45PE40:
13819         case FLASH_5761VENDOR_ST_M_M45PE80:
13820         case FLASH_5761VENDOR_ST_M_M45PE16:
13821                 tp->nvram_jedecnum = JEDEC_ST;
13822                 tg3_flag_set(tp, NVRAM_BUFFERED);
13823                 tg3_flag_set(tp, FLASH);
13824                 tp->nvram_pagesize = 256;
13825                 break;
13826         }
13827
13828         if (protect) {
13829                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13830         } else {
13831                 switch (nvcfg1) {
13832                 case FLASH_5761VENDOR_ATMEL_ADB161D:
13833                 case FLASH_5761VENDOR_ATMEL_MDB161D:
13834                 case FLASH_5761VENDOR_ST_A_M45PE16:
13835                 case FLASH_5761VENDOR_ST_M_M45PE16:
13836                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13837                         break;
13838                 case FLASH_5761VENDOR_ATMEL_ADB081D:
13839                 case FLASH_5761VENDOR_ATMEL_MDB081D:
13840                 case FLASH_5761VENDOR_ST_A_M45PE80:
13841                 case FLASH_5761VENDOR_ST_M_M45PE80:
13842                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13843                         break;
13844                 case FLASH_5761VENDOR_ATMEL_ADB041D:
13845                 case FLASH_5761VENDOR_ATMEL_MDB041D:
13846                 case FLASH_5761VENDOR_ST_A_M45PE40:
13847                 case FLASH_5761VENDOR_ST_M_M45PE40:
13848                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13849                         break;
13850                 case FLASH_5761VENDOR_ATMEL_ADB021D:
13851                 case FLASH_5761VENDOR_ATMEL_MDB021D:
13852                 case FLASH_5761VENDOR_ST_A_M45PE20:
13853                 case FLASH_5761VENDOR_ST_M_M45PE20:
13854                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13855                         break;
13856                 }
13857         }
13858 }
13859
13860 static void tg3_get_5906_nvram_info(struct tg3 *tp)
13861 {
13862         tp->nvram_jedecnum = JEDEC_ATMEL;
13863         tg3_flag_set(tp, NVRAM_BUFFERED);
13864         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13865 }
13866
13867 static void tg3_get_57780_nvram_info(struct tg3 *tp)
13868 {
13869         u32 nvcfg1;
13870
13871         nvcfg1 = tr32(NVRAM_CFG1);
13872
13873         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13874         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13875         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13876                 tp->nvram_jedecnum = JEDEC_ATMEL;
13877                 tg3_flag_set(tp, NVRAM_BUFFERED);
13878                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13879
13880                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13881                 tw32(NVRAM_CFG1, nvcfg1);
13882                 return;
13883         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13884         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13885         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13886         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13887         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13888         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13889         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13890                 tp->nvram_jedecnum = JEDEC_ATMEL;
13891                 tg3_flag_set(tp, NVRAM_BUFFERED);
13892                 tg3_flag_set(tp, FLASH);
13893
13894                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13895                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13896                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13897                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13898                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13899                         break;
13900                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13901                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13902                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13903                         break;
13904                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13905                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13906                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13907                         break;
13908                 }
13909                 break;
13910         case FLASH_5752VENDOR_ST_M45PE10:
13911         case FLASH_5752VENDOR_ST_M45PE20:
13912         case FLASH_5752VENDOR_ST_M45PE40:
13913                 tp->nvram_jedecnum = JEDEC_ST;
13914                 tg3_flag_set(tp, NVRAM_BUFFERED);
13915                 tg3_flag_set(tp, FLASH);
13916
13917                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13918                 case FLASH_5752VENDOR_ST_M45PE10:
13919                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13920                         break;
13921                 case FLASH_5752VENDOR_ST_M45PE20:
13922                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13923                         break;
13924                 case FLASH_5752VENDOR_ST_M45PE40:
13925                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13926                         break;
13927                 }
13928                 break;
13929         default:
13930                 tg3_flag_set(tp, NO_NVRAM);
13931                 return;
13932         }
13933
13934         tg3_nvram_get_pagesize(tp, nvcfg1);
13935         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13936                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13937 }
13938
13939
13940 static void tg3_get_5717_nvram_info(struct tg3 *tp)
13941 {
13942         u32 nvcfg1;
13943
13944         nvcfg1 = tr32(NVRAM_CFG1);
13945
13946         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13947         case FLASH_5717VENDOR_ATMEL_EEPROM:
13948         case FLASH_5717VENDOR_MICRO_EEPROM:
13949                 tp->nvram_jedecnum = JEDEC_ATMEL;
13950                 tg3_flag_set(tp, NVRAM_BUFFERED);
13951                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13952
13953                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13954                 tw32(NVRAM_CFG1, nvcfg1);
13955                 return;
13956         case FLASH_5717VENDOR_ATMEL_MDB011D:
13957         case FLASH_5717VENDOR_ATMEL_ADB011B:
13958         case FLASH_5717VENDOR_ATMEL_ADB011D:
13959         case FLASH_5717VENDOR_ATMEL_MDB021D:
13960         case FLASH_5717VENDOR_ATMEL_ADB021B:
13961         case FLASH_5717VENDOR_ATMEL_ADB021D:
13962         case FLASH_5717VENDOR_ATMEL_45USPT:
13963                 tp->nvram_jedecnum = JEDEC_ATMEL;
13964                 tg3_flag_set(tp, NVRAM_BUFFERED);
13965                 tg3_flag_set(tp, FLASH);
13966
13967                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13968                 case FLASH_5717VENDOR_ATMEL_MDB021D:
13969                         /* Detect size with tg3_nvram_get_size() */
13970                         break;
13971                 case FLASH_5717VENDOR_ATMEL_ADB021B:
13972                 case FLASH_5717VENDOR_ATMEL_ADB021D:
13973                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13974                         break;
13975                 default:
13976                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13977                         break;
13978                 }
13979                 break;
13980         case FLASH_5717VENDOR_ST_M_M25PE10:
13981         case FLASH_5717VENDOR_ST_A_M25PE10:
13982         case FLASH_5717VENDOR_ST_M_M45PE10:
13983         case FLASH_5717VENDOR_ST_A_M45PE10:
13984         case FLASH_5717VENDOR_ST_M_M25PE20:
13985         case FLASH_5717VENDOR_ST_A_M25PE20:
13986         case FLASH_5717VENDOR_ST_M_M45PE20:
13987         case FLASH_5717VENDOR_ST_A_M45PE20:
13988         case FLASH_5717VENDOR_ST_25USPT:
13989         case FLASH_5717VENDOR_ST_45USPT:
13990                 tp->nvram_jedecnum = JEDEC_ST;
13991                 tg3_flag_set(tp, NVRAM_BUFFERED);
13992                 tg3_flag_set(tp, FLASH);
13993
13994                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13995                 case FLASH_5717VENDOR_ST_M_M25PE20:
13996                 case FLASH_5717VENDOR_ST_M_M45PE20:
13997                         /* Detect size with tg3_nvram_get_size() */
13998                         break;
13999                 case FLASH_5717VENDOR_ST_A_M25PE20:
14000                 case FLASH_5717VENDOR_ST_A_M45PE20:
14001                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14002                         break;
14003                 default:
14004                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14005                         break;
14006                 }
14007                 break;
14008         default:
14009                 tg3_flag_set(tp, NO_NVRAM);
14010                 return;
14011         }
14012
14013         tg3_nvram_get_pagesize(tp, nvcfg1);
14014         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14015                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14016 }
14017
14018 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14019 {
14020         u32 nvcfg1, nvmpinstrp;
14021
14022         nvcfg1 = tr32(NVRAM_CFG1);
14023         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14024
14025         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14026                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14027                         tg3_flag_set(tp, NO_NVRAM);
14028                         return;
14029                 }
14030
14031                 switch (nvmpinstrp) {
14032                 case FLASH_5762_EEPROM_HD:
14033                         nvmpinstrp = FLASH_5720_EEPROM_HD;
14034                         break;
14035                 case FLASH_5762_EEPROM_LD:
14036                         nvmpinstrp = FLASH_5720_EEPROM_LD;
14037                         break;
14038                 case FLASH_5720VENDOR_M_ST_M45PE20:
14039                         /* This pinstrap supports multiple sizes, so force it
14040                          * to read the actual size from location 0xf0.
14041                          */
14042                         nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14043                         break;
14044                 }
14045         }
14046
14047         switch (nvmpinstrp) {
14048         case FLASH_5720_EEPROM_HD:
14049         case FLASH_5720_EEPROM_LD:
14050                 tp->nvram_jedecnum = JEDEC_ATMEL;
14051                 tg3_flag_set(tp, NVRAM_BUFFERED);
14052
14053                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14054                 tw32(NVRAM_CFG1, nvcfg1);
14055                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14056                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14057                 else
14058                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14059                 return;
14060         case FLASH_5720VENDOR_M_ATMEL_DB011D:
14061         case FLASH_5720VENDOR_A_ATMEL_DB011B:
14062         case FLASH_5720VENDOR_A_ATMEL_DB011D:
14063         case FLASH_5720VENDOR_M_ATMEL_DB021D:
14064         case FLASH_5720VENDOR_A_ATMEL_DB021B:
14065         case FLASH_5720VENDOR_A_ATMEL_DB021D:
14066         case FLASH_5720VENDOR_M_ATMEL_DB041D:
14067         case FLASH_5720VENDOR_A_ATMEL_DB041B:
14068         case FLASH_5720VENDOR_A_ATMEL_DB041D:
14069         case FLASH_5720VENDOR_M_ATMEL_DB081D:
14070         case FLASH_5720VENDOR_A_ATMEL_DB081D:
14071         case FLASH_5720VENDOR_ATMEL_45USPT:
14072                 tp->nvram_jedecnum = JEDEC_ATMEL;
14073                 tg3_flag_set(tp, NVRAM_BUFFERED);
14074                 tg3_flag_set(tp, FLASH);
14075
14076                 switch (nvmpinstrp) {
14077                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14078                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14079                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14080                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14081                         break;
14082                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14083                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14084                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14085                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14086                         break;
14087                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14088                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14089                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14090                         break;
14091                 default:
14092                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14093                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14094                         break;
14095                 }
14096                 break;
14097         case FLASH_5720VENDOR_M_ST_M25PE10:
14098         case FLASH_5720VENDOR_M_ST_M45PE10:
14099         case FLASH_5720VENDOR_A_ST_M25PE10:
14100         case FLASH_5720VENDOR_A_ST_M45PE10:
14101         case FLASH_5720VENDOR_M_ST_M25PE20:
14102         case FLASH_5720VENDOR_M_ST_M45PE20:
14103         case FLASH_5720VENDOR_A_ST_M25PE20:
14104         case FLASH_5720VENDOR_A_ST_M45PE20:
14105         case FLASH_5720VENDOR_M_ST_M25PE40:
14106         case FLASH_5720VENDOR_M_ST_M45PE40:
14107         case FLASH_5720VENDOR_A_ST_M25PE40:
14108         case FLASH_5720VENDOR_A_ST_M45PE40:
14109         case FLASH_5720VENDOR_M_ST_M25PE80:
14110         case FLASH_5720VENDOR_M_ST_M45PE80:
14111         case FLASH_5720VENDOR_A_ST_M25PE80:
14112         case FLASH_5720VENDOR_A_ST_M45PE80:
14113         case FLASH_5720VENDOR_ST_25USPT:
14114         case FLASH_5720VENDOR_ST_45USPT:
14115                 tp->nvram_jedecnum = JEDEC_ST;
14116                 tg3_flag_set(tp, NVRAM_BUFFERED);
14117                 tg3_flag_set(tp, FLASH);
14118
14119                 switch (nvmpinstrp) {
14120                 case FLASH_5720VENDOR_M_ST_M25PE20:
14121                 case FLASH_5720VENDOR_M_ST_M45PE20:
14122                 case FLASH_5720VENDOR_A_ST_M25PE20:
14123                 case FLASH_5720VENDOR_A_ST_M45PE20:
14124                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14125                         break;
14126                 case FLASH_5720VENDOR_M_ST_M25PE40:
14127                 case FLASH_5720VENDOR_M_ST_M45PE40:
14128                 case FLASH_5720VENDOR_A_ST_M25PE40:
14129                 case FLASH_5720VENDOR_A_ST_M45PE40:
14130                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14131                         break;
14132                 case FLASH_5720VENDOR_M_ST_M25PE80:
14133                 case FLASH_5720VENDOR_M_ST_M45PE80:
14134                 case FLASH_5720VENDOR_A_ST_M25PE80:
14135                 case FLASH_5720VENDOR_A_ST_M45PE80:
14136                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14137                         break;
14138                 default:
14139                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14140                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14141                         break;
14142                 }
14143                 break;
14144         default:
14145                 tg3_flag_set(tp, NO_NVRAM);
14146                 return;
14147         }
14148
14149         tg3_nvram_get_pagesize(tp, nvcfg1);
14150         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14151                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14152
14153         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14154                 u32 val;
14155
14156                 if (tg3_nvram_read(tp, 0, &val))
14157                         return;
14158
14159                 if (val != TG3_EEPROM_MAGIC &&
14160                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14161                         tg3_flag_set(tp, NO_NVRAM);
14162         }
14163 }
14164
14165 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14166 static void tg3_nvram_init(struct tg3 *tp)
14167 {
14168         if (tg3_flag(tp, IS_SSB_CORE)) {
14169                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14170                 tg3_flag_clear(tp, NVRAM);
14171                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14172                 tg3_flag_set(tp, NO_NVRAM);
14173                 return;
14174         }
14175
14176         tw32_f(GRC_EEPROM_ADDR,
14177              (EEPROM_ADDR_FSM_RESET |
14178               (EEPROM_DEFAULT_CLOCK_PERIOD <<
14179                EEPROM_ADDR_CLKPERD_SHIFT)));
14180
14181         msleep(1);
14182
14183         /* Enable seeprom accesses. */
14184         tw32_f(GRC_LOCAL_CTRL,
14185              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14186         udelay(100);
14187
14188         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14189             tg3_asic_rev(tp) != ASIC_REV_5701) {
14190                 tg3_flag_set(tp, NVRAM);
14191
14192                 if (tg3_nvram_lock(tp)) {
14193                         netdev_warn(tp->dev,
14194                                     "Cannot get nvram lock, %s failed\n",
14195                                     __func__);
14196                         return;
14197                 }
14198                 tg3_enable_nvram_access(tp);
14199
14200                 tp->nvram_size = 0;
14201
14202                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14203                         tg3_get_5752_nvram_info(tp);
14204                 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14205                         tg3_get_5755_nvram_info(tp);
14206                 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14207                          tg3_asic_rev(tp) == ASIC_REV_5784 ||
14208                          tg3_asic_rev(tp) == ASIC_REV_5785)
14209                         tg3_get_5787_nvram_info(tp);
14210                 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14211                         tg3_get_5761_nvram_info(tp);
14212                 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14213                         tg3_get_5906_nvram_info(tp);
14214                 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14215                          tg3_flag(tp, 57765_CLASS))
14216                         tg3_get_57780_nvram_info(tp);
14217                 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14218                          tg3_asic_rev(tp) == ASIC_REV_5719)
14219                         tg3_get_5717_nvram_info(tp);
14220                 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14221                          tg3_asic_rev(tp) == ASIC_REV_5762)
14222                         tg3_get_5720_nvram_info(tp);
14223                 else
14224                         tg3_get_nvram_info(tp);
14225
14226                 if (tp->nvram_size == 0)
14227                         tg3_get_nvram_size(tp);
14228
14229                 tg3_disable_nvram_access(tp);
14230                 tg3_nvram_unlock(tp);
14231
14232         } else {
14233                 tg3_flag_clear(tp, NVRAM);
14234                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14235
14236                 tg3_get_eeprom_size(tp);
14237         }
14238 }
14239
14240 struct subsys_tbl_ent {
14241         u16 subsys_vendor, subsys_devid;
14242         u32 phy_id;
14243 };
14244
14245 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14246         /* Broadcom boards. */
14247         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14248           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14249         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14250           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14251         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14252           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14253         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14254           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14255         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14256           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14257         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14258           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14259         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14260           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14261         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14262           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14263         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14264           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14265         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14266           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14267         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14268           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14269
14270         /* 3com boards. */
14271         { TG3PCI_SUBVENDOR_ID_3COM,
14272           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14273         { TG3PCI_SUBVENDOR_ID_3COM,
14274           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14275         { TG3PCI_SUBVENDOR_ID_3COM,
14276           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14277         { TG3PCI_SUBVENDOR_ID_3COM,
14278           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14279         { TG3PCI_SUBVENDOR_ID_3COM,
14280           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14281
14282         /* DELL boards. */
14283         { TG3PCI_SUBVENDOR_ID_DELL,
14284           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14285         { TG3PCI_SUBVENDOR_ID_DELL,
14286           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14287         { TG3PCI_SUBVENDOR_ID_DELL,
14288           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14289         { TG3PCI_SUBVENDOR_ID_DELL,
14290           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14291
14292         /* Compaq boards. */
14293         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14294           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14295         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14296           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14297         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14298           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14299         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14300           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14301         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14302           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14303
14304         /* IBM boards. */
14305         { TG3PCI_SUBVENDOR_ID_IBM,
14306           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14307 };
14308
14309 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14310 {
14311         int i;
14312
14313         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14314                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14315                      tp->pdev->subsystem_vendor) &&
14316                     (subsys_id_to_phy_id[i].subsys_devid ==
14317                      tp->pdev->subsystem_device))
14318                         return &subsys_id_to_phy_id[i];
14319         }
14320         return NULL;
14321 }
14322
14323 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14324 {
14325         u32 val;
14326
14327         tp->phy_id = TG3_PHY_ID_INVALID;
14328         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14329
14330         /* Assume an onboard device and WOL capable by default.  */
14331         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14332         tg3_flag_set(tp, WOL_CAP);
14333
14334         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14335                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14336                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14337                         tg3_flag_set(tp, IS_NIC);
14338                 }
14339                 val = tr32(VCPU_CFGSHDW);
14340                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14341                         tg3_flag_set(tp, ASPM_WORKAROUND);
14342                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14343                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14344                         tg3_flag_set(tp, WOL_ENABLE);
14345                         device_set_wakeup_enable(&tp->pdev->dev, true);
14346                 }
14347                 goto done;
14348         }
14349
14350         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14351         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14352                 u32 nic_cfg, led_cfg;
14353                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14354                 int eeprom_phy_serdes = 0;
14355
14356                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14357                 tp->nic_sram_data_cfg = nic_cfg;
14358
14359                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14360                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14361                 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14362                     tg3_asic_rev(tp) != ASIC_REV_5701 &&
14363                     tg3_asic_rev(tp) != ASIC_REV_5703 &&
14364                     (ver > 0) && (ver < 0x100))
14365                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14366
14367                 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14368                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14369
14370                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14371                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14372                         eeprom_phy_serdes = 1;
14373
14374                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14375                 if (nic_phy_id != 0) {
14376                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14377                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14378
14379                         eeprom_phy_id  = (id1 >> 16) << 10;
14380                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
14381                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
14382                 } else
14383                         eeprom_phy_id = 0;
14384
14385                 tp->phy_id = eeprom_phy_id;
14386                 if (eeprom_phy_serdes) {
14387                         if (!tg3_flag(tp, 5705_PLUS))
14388                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14389                         else
14390                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14391                 }
14392
14393                 if (tg3_flag(tp, 5750_PLUS))
14394                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14395                                     SHASTA_EXT_LED_MODE_MASK);
14396                 else
14397                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14398
14399                 switch (led_cfg) {
14400                 default:
14401                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14402                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14403                         break;
14404
14405                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14406                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14407                         break;
14408
14409                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14410                         tp->led_ctrl = LED_CTRL_MODE_MAC;
14411
14412                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14413                          * read on some older 5700/5701 bootcode.
14414                          */
14415                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14416                             tg3_asic_rev(tp) == ASIC_REV_5701)
14417                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14418
14419                         break;
14420
14421                 case SHASTA_EXT_LED_SHARED:
14422                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
14423                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14424                             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14425                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14426                                                  LED_CTRL_MODE_PHY_2);
14427                         break;
14428
14429                 case SHASTA_EXT_LED_MAC:
14430                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14431                         break;
14432
14433                 case SHASTA_EXT_LED_COMBO:
14434                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
14435                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14436                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14437                                                  LED_CTRL_MODE_PHY_2);
14438                         break;
14439
14440                 }
14441
14442                 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14443                      tg3_asic_rev(tp) == ASIC_REV_5701) &&
14444                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14445                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14446
14447                 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14448                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14449
14450                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14451                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14452                         if ((tp->pdev->subsystem_vendor ==
14453                              PCI_VENDOR_ID_ARIMA) &&
14454                             (tp->pdev->subsystem_device == 0x205a ||
14455                              tp->pdev->subsystem_device == 0x2063))
14456                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14457                 } else {
14458                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14459                         tg3_flag_set(tp, IS_NIC);
14460                 }
14461
14462                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14463                         tg3_flag_set(tp, ENABLE_ASF);
14464                         if (tg3_flag(tp, 5750_PLUS))
14465                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14466                 }
14467
14468                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14469                     tg3_flag(tp, 5750_PLUS))
14470                         tg3_flag_set(tp, ENABLE_APE);
14471
14472                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14473                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14474                         tg3_flag_clear(tp, WOL_CAP);
14475
14476                 if (tg3_flag(tp, WOL_CAP) &&
14477                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14478                         tg3_flag_set(tp, WOL_ENABLE);
14479                         device_set_wakeup_enable(&tp->pdev->dev, true);
14480                 }
14481
14482                 if (cfg2 & (1 << 17))
14483                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14484
14485                 /* serdes signal pre-emphasis in register 0x590 set by */
14486                 /* bootcode if bit 18 is set */
14487                 if (cfg2 & (1 << 18))
14488                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14489
14490                 if ((tg3_flag(tp, 57765_PLUS) ||
14491                      (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14492                       tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14493                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14494                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14495
14496                 if (tg3_flag(tp, PCI_EXPRESS) &&
14497                     tg3_asic_rev(tp) != ASIC_REV_5785 &&
14498                     !tg3_flag(tp, 57765_PLUS)) {
14499                         u32 cfg3;
14500
14501                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14502                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
14503                                 tg3_flag_set(tp, ASPM_WORKAROUND);
14504                 }
14505
14506                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14507                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14508                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14509                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14510                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14511                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14512         }
14513 done:
14514         if (tg3_flag(tp, WOL_CAP))
14515                 device_set_wakeup_enable(&tp->pdev->dev,
14516                                          tg3_flag(tp, WOL_ENABLE));
14517         else
14518                 device_set_wakeup_capable(&tp->pdev->dev, false);
14519 }
14520
14521 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14522 {
14523         int i, err;
14524         u32 val2, off = offset * 8;
14525
14526         err = tg3_nvram_lock(tp);
14527         if (err)
14528                 return err;
14529
14530         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14531         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14532                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14533         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14534         udelay(10);
14535
14536         for (i = 0; i < 100; i++) {
14537                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14538                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14539                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14540                         break;
14541                 }
14542                 udelay(10);
14543         }
14544
14545         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14546
14547         tg3_nvram_unlock(tp);
14548         if (val2 & APE_OTP_STATUS_CMD_DONE)
14549                 return 0;
14550
14551         return -EBUSY;
14552 }
14553
14554 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14555 {
14556         int i;
14557         u32 val;
14558
14559         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14560         tw32(OTP_CTRL, cmd);
14561
14562         /* Wait for up to 1 ms for command to execute. */
14563         for (i = 0; i < 100; i++) {
14564                 val = tr32(OTP_STATUS);
14565                 if (val & OTP_STATUS_CMD_DONE)
14566                         break;
14567                 udelay(10);
14568         }
14569
14570         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14571 }
14572
14573 /* Read the gphy configuration from the OTP region of the chip.  The gphy
14574  * configuration is a 32-bit value that straddles the alignment boundary.
14575  * We do two 32-bit reads and then shift and merge the results.
14576  */
14577 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14578 {
14579         u32 bhalf_otp, thalf_otp;
14580
14581         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14582
14583         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14584                 return 0;
14585
14586         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14587
14588         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14589                 return 0;
14590
14591         thalf_otp = tr32(OTP_READ_DATA);
14592
14593         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14594
14595         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14596                 return 0;
14597
14598         bhalf_otp = tr32(OTP_READ_DATA);
14599
14600         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14601 }
14602
14603 static void tg3_phy_init_link_config(struct tg3 *tp)
14604 {
14605         u32 adv = ADVERTISED_Autoneg;
14606
14607         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14608                 adv |= ADVERTISED_1000baseT_Half |
14609                        ADVERTISED_1000baseT_Full;
14610
14611         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14612                 adv |= ADVERTISED_100baseT_Half |
14613                        ADVERTISED_100baseT_Full |
14614                        ADVERTISED_10baseT_Half |
14615                        ADVERTISED_10baseT_Full |
14616                        ADVERTISED_TP;
14617         else
14618                 adv |= ADVERTISED_FIBRE;
14619
14620         tp->link_config.advertising = adv;
14621         tp->link_config.speed = SPEED_UNKNOWN;
14622         tp->link_config.duplex = DUPLEX_UNKNOWN;
14623         tp->link_config.autoneg = AUTONEG_ENABLE;
14624         tp->link_config.active_speed = SPEED_UNKNOWN;
14625         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14626
14627         tp->old_link = -1;
14628 }
14629
14630 static int tg3_phy_probe(struct tg3 *tp)
14631 {
14632         u32 hw_phy_id_1, hw_phy_id_2;
14633         u32 hw_phy_id, hw_phy_id_masked;
14634         int err;
14635
14636         /* flow control autonegotiation is default behavior */
14637         tg3_flag_set(tp, PAUSE_AUTONEG);
14638         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14639
14640         if (tg3_flag(tp, ENABLE_APE)) {
14641                 switch (tp->pci_fn) {
14642                 case 0:
14643                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14644                         break;
14645                 case 1:
14646                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14647                         break;
14648                 case 2:
14649                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14650                         break;
14651                 case 3:
14652                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14653                         break;
14654                 }
14655         }
14656
14657         if (tg3_flag(tp, USE_PHYLIB))
14658                 return tg3_phy_init(tp);
14659
14660         /* Reading the PHY ID register can conflict with ASF
14661          * firmware access to the PHY hardware.
14662          */
14663         err = 0;
14664         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14665                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14666         } else {
14667                 /* Now read the physical PHY_ID from the chip and verify
14668                  * that it is sane.  If it doesn't look good, we fall back
14669                  * to either the hard-coded table based PHY_ID and failing
14670                  * that the value found in the eeprom area.
14671                  */
14672                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
14673                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
14674
14675                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
14676                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
14677                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
14678
14679                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
14680         }
14681
14682         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
14683                 tp->phy_id = hw_phy_id;
14684                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
14685                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14686                 else
14687                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
14688         } else {
14689                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
14690                         /* Do nothing, phy ID already set up in
14691                          * tg3_get_eeprom_hw_cfg().
14692                          */
14693                 } else {
14694                         struct subsys_tbl_ent *p;
14695
14696                         /* No eeprom signature?  Try the hardcoded
14697                          * subsys device table.
14698                          */
14699                         p = tg3_lookup_by_subsys(tp);
14700                         if (p) {
14701                                 tp->phy_id = p->phy_id;
14702                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
14703                                 /* For now we saw the IDs 0xbc050cd0,
14704                                  * 0xbc050f80 and 0xbc050c30 on devices
14705                                  * connected to an BCM4785 and there are
14706                                  * probably more. Just assume that the phy is
14707                                  * supported when it is connected to a SSB core
14708                                  * for now.
14709                                  */
14710                                 return -ENODEV;
14711                         }
14712
14713                         if (!tp->phy_id ||
14714                             tp->phy_id == TG3_PHY_ID_BCM8002)
14715                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14716                 }
14717         }
14718
14719         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14720             (tg3_asic_rev(tp) == ASIC_REV_5719 ||
14721              tg3_asic_rev(tp) == ASIC_REV_5720 ||
14722              tg3_asic_rev(tp) == ASIC_REV_57766 ||
14723              tg3_asic_rev(tp) == ASIC_REV_5762 ||
14724              (tg3_asic_rev(tp) == ASIC_REV_5717 &&
14725               tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
14726              (tg3_asic_rev(tp) == ASIC_REV_57765 &&
14727               tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0)))
14728                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
14729
14730         tg3_phy_init_link_config(tp);
14731
14732         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14733             !tg3_flag(tp, ENABLE_APE) &&
14734             !tg3_flag(tp, ENABLE_ASF)) {
14735                 u32 bmsr, dummy;
14736
14737                 tg3_readphy(tp, MII_BMSR, &bmsr);
14738                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
14739                     (bmsr & BMSR_LSTATUS))
14740                         goto skip_phy_reset;
14741
14742                 err = tg3_phy_reset(tp);
14743                 if (err)
14744                         return err;
14745
14746                 tg3_phy_set_wirespeed(tp);
14747
14748                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
14749                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
14750                                             tp->link_config.flowctrl);
14751
14752                         tg3_writephy(tp, MII_BMCR,
14753                                      BMCR_ANENABLE | BMCR_ANRESTART);
14754                 }
14755         }
14756
14757 skip_phy_reset:
14758         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
14759                 err = tg3_init_5401phy_dsp(tp);
14760                 if (err)
14761                         return err;
14762
14763                 err = tg3_init_5401phy_dsp(tp);
14764         }
14765
14766         return err;
14767 }
14768
14769 static void tg3_read_vpd(struct tg3 *tp)
14770 {
14771         u8 *vpd_data;
14772         unsigned int block_end, rosize, len;
14773         u32 vpdlen;
14774         int j, i = 0;
14775
14776         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
14777         if (!vpd_data)
14778                 goto out_no_vpd;
14779
14780         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
14781         if (i < 0)
14782                 goto out_not_found;
14783
14784         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
14785         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
14786         i += PCI_VPD_LRDT_TAG_SIZE;
14787
14788         if (block_end > vpdlen)
14789                 goto out_not_found;
14790
14791         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14792                                       PCI_VPD_RO_KEYWORD_MFR_ID);
14793         if (j > 0) {
14794                 len = pci_vpd_info_field_size(&vpd_data[j]);
14795
14796                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14797                 if (j + len > block_end || len != 4 ||
14798                     memcmp(&vpd_data[j], "1028", 4))
14799                         goto partno;
14800
14801                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14802                                               PCI_VPD_RO_KEYWORD_VENDOR0);
14803                 if (j < 0)
14804                         goto partno;
14805
14806                 len = pci_vpd_info_field_size(&vpd_data[j]);
14807
14808                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14809                 if (j + len > block_end)
14810                         goto partno;
14811
14812                 if (len >= sizeof(tp->fw_ver))
14813                         len = sizeof(tp->fw_ver) - 1;
14814                 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
14815                 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
14816                          &vpd_data[j]);
14817         }
14818
14819 partno:
14820         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14821                                       PCI_VPD_RO_KEYWORD_PARTNO);
14822         if (i < 0)
14823                 goto out_not_found;
14824
14825         len = pci_vpd_info_field_size(&vpd_data[i]);
14826
14827         i += PCI_VPD_INFO_FLD_HDR_SIZE;
14828         if (len > TG3_BPN_SIZE ||
14829             (len + i) > vpdlen)
14830                 goto out_not_found;
14831
14832         memcpy(tp->board_part_number, &vpd_data[i], len);
14833
14834 out_not_found:
14835         kfree(vpd_data);
14836         if (tp->board_part_number[0])
14837                 return;
14838
14839 out_no_vpd:
14840         if (tg3_asic_rev(tp) == ASIC_REV_5717) {
14841                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14842                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
14843                         strcpy(tp->board_part_number, "BCM5717");
14844                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14845                         strcpy(tp->board_part_number, "BCM5718");
14846                 else
14847                         goto nomatch;
14848         } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
14849                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14850                         strcpy(tp->board_part_number, "BCM57780");
14851                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14852                         strcpy(tp->board_part_number, "BCM57760");
14853                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14854                         strcpy(tp->board_part_number, "BCM57790");
14855                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14856                         strcpy(tp->board_part_number, "BCM57788");
14857                 else
14858                         goto nomatch;
14859         } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
14860                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14861                         strcpy(tp->board_part_number, "BCM57761");
14862                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14863                         strcpy(tp->board_part_number, "BCM57765");
14864                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14865                         strcpy(tp->board_part_number, "BCM57781");
14866                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14867                         strcpy(tp->board_part_number, "BCM57785");
14868                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14869                         strcpy(tp->board_part_number, "BCM57791");
14870                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14871                         strcpy(tp->board_part_number, "BCM57795");
14872                 else
14873                         goto nomatch;
14874         } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
14875                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14876                         strcpy(tp->board_part_number, "BCM57762");
14877                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14878                         strcpy(tp->board_part_number, "BCM57766");
14879                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14880                         strcpy(tp->board_part_number, "BCM57782");
14881                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14882                         strcpy(tp->board_part_number, "BCM57786");
14883                 else
14884                         goto nomatch;
14885         } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14886                 strcpy(tp->board_part_number, "BCM95906");
14887         } else {
14888 nomatch:
14889                 strcpy(tp->board_part_number, "none");
14890         }
14891 }
14892
14893 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14894 {
14895         u32 val;
14896
14897         if (tg3_nvram_read(tp, offset, &val) ||
14898             (val & 0xfc000000) != 0x0c000000 ||
14899             tg3_nvram_read(tp, offset + 4, &val) ||
14900             val != 0)
14901                 return 0;
14902
14903         return 1;
14904 }
14905
14906 static void tg3_read_bc_ver(struct tg3 *tp)
14907 {
14908         u32 val, offset, start, ver_offset;
14909         int i, dst_off;
14910         bool newver = false;
14911
14912         if (tg3_nvram_read(tp, 0xc, &offset) ||
14913             tg3_nvram_read(tp, 0x4, &start))
14914                 return;
14915
14916         offset = tg3_nvram_logical_addr(tp, offset);
14917
14918         if (tg3_nvram_read(tp, offset, &val))
14919                 return;
14920
14921         if ((val & 0xfc000000) == 0x0c000000) {
14922                 if (tg3_nvram_read(tp, offset + 4, &val))
14923                         return;
14924
14925                 if (val == 0)
14926                         newver = true;
14927         }
14928
14929         dst_off = strlen(tp->fw_ver);
14930
14931         if (newver) {
14932                 if (TG3_VER_SIZE - dst_off < 16 ||
14933                     tg3_nvram_read(tp, offset + 8, &ver_offset))
14934                         return;
14935
14936                 offset = offset + ver_offset - start;
14937                 for (i = 0; i < 16; i += 4) {
14938                         __be32 v;
14939                         if (tg3_nvram_read_be32(tp, offset + i, &v))
14940                                 return;
14941
14942                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
14943                 }
14944         } else {
14945                 u32 major, minor;
14946
14947                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14948                         return;
14949
14950                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14951                         TG3_NVM_BCVER_MAJSFT;
14952                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
14953                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14954                          "v%d.%02d", major, minor);
14955         }
14956 }
14957
14958 static void tg3_read_hwsb_ver(struct tg3 *tp)
14959 {
14960         u32 val, major, minor;
14961
14962         /* Use native endian representation */
14963         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14964                 return;
14965
14966         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14967                 TG3_NVM_HWSB_CFG1_MAJSFT;
14968         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14969                 TG3_NVM_HWSB_CFG1_MINSFT;
14970
14971         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14972 }
14973
14974 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
14975 {
14976         u32 offset, major, minor, build;
14977
14978         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
14979
14980         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14981                 return;
14982
14983         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14984         case TG3_EEPROM_SB_REVISION_0:
14985                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14986                 break;
14987         case TG3_EEPROM_SB_REVISION_2:
14988                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14989                 break;
14990         case TG3_EEPROM_SB_REVISION_3:
14991                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14992                 break;
14993         case TG3_EEPROM_SB_REVISION_4:
14994                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14995                 break;
14996         case TG3_EEPROM_SB_REVISION_5:
14997                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14998                 break;
14999         case TG3_EEPROM_SB_REVISION_6:
15000                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15001                 break;
15002         default:
15003                 return;
15004         }
15005
15006         if (tg3_nvram_read(tp, offset, &val))
15007                 return;
15008
15009         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15010                 TG3_EEPROM_SB_EDH_BLD_SHFT;
15011         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15012                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15013         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15014
15015         if (minor > 99 || build > 26)
15016                 return;
15017
15018         offset = strlen(tp->fw_ver);
15019         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15020                  " v%d.%02d", major, minor);
15021
15022         if (build > 0) {
15023                 offset = strlen(tp->fw_ver);
15024                 if (offset < TG3_VER_SIZE - 1)
15025                         tp->fw_ver[offset] = 'a' + build - 1;
15026         }
15027 }
15028
15029 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15030 {
15031         u32 val, offset, start;
15032         int i, vlen;
15033
15034         for (offset = TG3_NVM_DIR_START;
15035              offset < TG3_NVM_DIR_END;
15036              offset += TG3_NVM_DIRENT_SIZE) {
15037                 if (tg3_nvram_read(tp, offset, &val))
15038                         return;
15039
15040                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15041                         break;
15042         }
15043
15044         if (offset == TG3_NVM_DIR_END)
15045                 return;
15046
15047         if (!tg3_flag(tp, 5705_PLUS))
15048                 start = 0x08000000;
15049         else if (tg3_nvram_read(tp, offset - 4, &start))
15050                 return;
15051
15052         if (tg3_nvram_read(tp, offset + 4, &offset) ||
15053             !tg3_fw_img_is_valid(tp, offset) ||
15054             tg3_nvram_read(tp, offset + 8, &val))
15055                 return;
15056
15057         offset += val - start;
15058
15059         vlen = strlen(tp->fw_ver);
15060
15061         tp->fw_ver[vlen++] = ',';
15062         tp->fw_ver[vlen++] = ' ';
15063
15064         for (i = 0; i < 4; i++) {
15065                 __be32 v;
15066                 if (tg3_nvram_read_be32(tp, offset, &v))
15067                         return;
15068
15069                 offset += sizeof(v);
15070
15071                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15072                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15073                         break;
15074                 }
15075
15076                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15077                 vlen += sizeof(v);
15078         }
15079 }
15080
15081 static void tg3_probe_ncsi(struct tg3 *tp)
15082 {
15083         u32 apedata;
15084
15085         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15086         if (apedata != APE_SEG_SIG_MAGIC)
15087                 return;
15088
15089         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15090         if (!(apedata & APE_FW_STATUS_READY))
15091                 return;
15092
15093         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15094                 tg3_flag_set(tp, APE_HAS_NCSI);
15095 }
15096
15097 static void tg3_read_dash_ver(struct tg3 *tp)
15098 {
15099         int vlen;
15100         u32 apedata;
15101         char *fwtype;
15102
15103         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15104
15105         if (tg3_flag(tp, APE_HAS_NCSI))
15106                 fwtype = "NCSI";
15107         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15108                 fwtype = "SMASH";
15109         else
15110                 fwtype = "DASH";
15111
15112         vlen = strlen(tp->fw_ver);
15113
15114         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15115                  fwtype,
15116                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15117                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15118                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15119                  (apedata & APE_FW_VERSION_BLDMSK));
15120 }
15121
15122 static void tg3_read_otp_ver(struct tg3 *tp)
15123 {
15124         u32 val, val2;
15125
15126         if (tg3_asic_rev(tp) != ASIC_REV_5762)
15127                 return;
15128
15129         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15130             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15131             TG3_OTP_MAGIC0_VALID(val)) {
15132                 u64 val64 = (u64) val << 32 | val2;
15133                 u32 ver = 0;
15134                 int i, vlen;
15135
15136                 for (i = 0; i < 7; i++) {
15137                         if ((val64 & 0xff) == 0)
15138                                 break;
15139                         ver = val64 & 0xff;
15140                         val64 >>= 8;
15141                 }
15142                 vlen = strlen(tp->fw_ver);
15143                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15144         }
15145 }
15146
15147 static void tg3_read_fw_ver(struct tg3 *tp)
15148 {
15149         u32 val;
15150         bool vpd_vers = false;
15151
15152         if (tp->fw_ver[0] != 0)
15153                 vpd_vers = true;
15154
15155         if (tg3_flag(tp, NO_NVRAM)) {
15156                 strcat(tp->fw_ver, "sb");
15157                 tg3_read_otp_ver(tp);
15158                 return;
15159         }
15160
15161         if (tg3_nvram_read(tp, 0, &val))
15162                 return;
15163
15164         if (val == TG3_EEPROM_MAGIC)
15165                 tg3_read_bc_ver(tp);
15166         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15167                 tg3_read_sb_ver(tp, val);
15168         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15169                 tg3_read_hwsb_ver(tp);
15170
15171         if (tg3_flag(tp, ENABLE_ASF)) {
15172                 if (tg3_flag(tp, ENABLE_APE)) {
15173                         tg3_probe_ncsi(tp);
15174                         if (!vpd_vers)
15175                                 tg3_read_dash_ver(tp);
15176                 } else if (!vpd_vers) {
15177                         tg3_read_mgmtfw_ver(tp);
15178                 }
15179         }
15180
15181         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15182 }
15183
15184 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15185 {
15186         if (tg3_flag(tp, LRG_PROD_RING_CAP))
15187                 return TG3_RX_RET_MAX_SIZE_5717;
15188         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15189                 return TG3_RX_RET_MAX_SIZE_5700;
15190         else
15191                 return TG3_RX_RET_MAX_SIZE_5705;
15192 }
15193
15194 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15195         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15196         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15197         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15198         { },
15199 };
15200
15201 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15202 {
15203         struct pci_dev *peer;
15204         unsigned int func, devnr = tp->pdev->devfn & ~7;
15205
15206         for (func = 0; func < 8; func++) {
15207                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15208                 if (peer && peer != tp->pdev)
15209                         break;
15210                 pci_dev_put(peer);
15211         }
15212         /* 5704 can be configured in single-port mode, set peer to
15213          * tp->pdev in that case.
15214          */
15215         if (!peer) {
15216                 peer = tp->pdev;
15217                 return peer;
15218         }
15219
15220         /*
15221          * We don't need to keep the refcount elevated; there's no way
15222          * to remove one half of this device without removing the other
15223          */
15224         pci_dev_put(peer);
15225
15226         return peer;
15227 }
15228
15229 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15230 {
15231         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15232         if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15233                 u32 reg;
15234
15235                 /* All devices that use the alternate
15236                  * ASIC REV location have a CPMU.
15237                  */
15238                 tg3_flag_set(tp, CPMU_PRESENT);
15239
15240                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15241                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15242                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15243                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15244                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15245                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15246                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15247                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15248                         reg = TG3PCI_GEN2_PRODID_ASICREV;
15249                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15250                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15251                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15252                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15253                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15254                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15255                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15256                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15257                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15258                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15259                         reg = TG3PCI_GEN15_PRODID_ASICREV;
15260                 else
15261                         reg = TG3PCI_PRODID_ASICREV;
15262
15263                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15264         }
15265
15266         /* Wrong chip ID in 5752 A0. This code can be removed later
15267          * as A0 is not in production.
15268          */
15269         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15270                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15271
15272         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15273                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15274
15275         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15276             tg3_asic_rev(tp) == ASIC_REV_5719 ||
15277             tg3_asic_rev(tp) == ASIC_REV_5720)
15278                 tg3_flag_set(tp, 5717_PLUS);
15279
15280         if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15281             tg3_asic_rev(tp) == ASIC_REV_57766)
15282                 tg3_flag_set(tp, 57765_CLASS);
15283
15284         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15285              tg3_asic_rev(tp) == ASIC_REV_5762)
15286                 tg3_flag_set(tp, 57765_PLUS);
15287
15288         /* Intentionally exclude ASIC_REV_5906 */
15289         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15290             tg3_asic_rev(tp) == ASIC_REV_5787 ||
15291             tg3_asic_rev(tp) == ASIC_REV_5784 ||
15292             tg3_asic_rev(tp) == ASIC_REV_5761 ||
15293             tg3_asic_rev(tp) == ASIC_REV_5785 ||
15294             tg3_asic_rev(tp) == ASIC_REV_57780 ||
15295             tg3_flag(tp, 57765_PLUS))
15296                 tg3_flag_set(tp, 5755_PLUS);
15297
15298         if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15299             tg3_asic_rev(tp) == ASIC_REV_5714)
15300                 tg3_flag_set(tp, 5780_CLASS);
15301
15302         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15303             tg3_asic_rev(tp) == ASIC_REV_5752 ||
15304             tg3_asic_rev(tp) == ASIC_REV_5906 ||
15305             tg3_flag(tp, 5755_PLUS) ||
15306             tg3_flag(tp, 5780_CLASS))
15307                 tg3_flag_set(tp, 5750_PLUS);
15308
15309         if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15310             tg3_flag(tp, 5750_PLUS))
15311                 tg3_flag_set(tp, 5705_PLUS);
15312 }
15313
15314 static bool tg3_10_100_only_device(struct tg3 *tp,
15315                                    const struct pci_device_id *ent)
15316 {
15317         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15318
15319         if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15320              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15321             (tp->phy_flags & TG3_PHYFLG_IS_FET))
15322                 return true;
15323
15324         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15325                 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15326                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15327                                 return true;
15328                 } else {
15329                         return true;
15330                 }
15331         }
15332
15333         return false;
15334 }
15335
15336 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15337 {
15338         u32 misc_ctrl_reg;
15339         u32 pci_state_reg, grc_misc_cfg;
15340         u32 val;
15341         u16 pci_cmd;
15342         int err;
15343
15344         /* Force memory write invalidate off.  If we leave it on,
15345          * then on 5700_BX chips we have to enable a workaround.
15346          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15347          * to match the cacheline size.  The Broadcom driver have this
15348          * workaround but turns MWI off all the times so never uses
15349          * it.  This seems to suggest that the workaround is insufficient.
15350          */
15351         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15352         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15353         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15354
15355         /* Important! -- Make sure register accesses are byteswapped
15356          * correctly.  Also, for those chips that require it, make
15357          * sure that indirect register accesses are enabled before
15358          * the first operation.
15359          */
15360         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15361                               &misc_ctrl_reg);
15362         tp->misc_host_ctrl |= (misc_ctrl_reg &
15363                                MISC_HOST_CTRL_CHIPREV);
15364         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15365                                tp->misc_host_ctrl);
15366
15367         tg3_detect_asic_rev(tp, misc_ctrl_reg);
15368
15369         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15370          * we need to disable memory and use config. cycles
15371          * only to access all registers. The 5702/03 chips
15372          * can mistakenly decode the special cycles from the
15373          * ICH chipsets as memory write cycles, causing corruption
15374          * of register and memory space. Only certain ICH bridges
15375          * will drive special cycles with non-zero data during the
15376          * address phase which can fall within the 5703's address
15377          * range. This is not an ICH bug as the PCI spec allows
15378          * non-zero address during special cycles. However, only
15379          * these ICH bridges are known to drive non-zero addresses
15380          * during special cycles.
15381          *
15382          * Since special cycles do not cross PCI bridges, we only
15383          * enable this workaround if the 5703 is on the secondary
15384          * bus of these ICH bridges.
15385          */
15386         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15387             (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15388                 static struct tg3_dev_id {
15389                         u32     vendor;
15390                         u32     device;
15391                         u32     rev;
15392                 } ich_chipsets[] = {
15393                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15394                           PCI_ANY_ID },
15395                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15396                           PCI_ANY_ID },
15397                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15398                           0xa },
15399                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15400                           PCI_ANY_ID },
15401                         { },
15402                 };
15403                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15404                 struct pci_dev *bridge = NULL;
15405
15406                 while (pci_id->vendor != 0) {
15407                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
15408                                                 bridge);
15409                         if (!bridge) {
15410                                 pci_id++;
15411                                 continue;
15412                         }
15413                         if (pci_id->rev != PCI_ANY_ID) {
15414                                 if (bridge->revision > pci_id->rev)
15415                                         continue;
15416                         }
15417                         if (bridge->subordinate &&
15418                             (bridge->subordinate->number ==
15419                              tp->pdev->bus->number)) {
15420                                 tg3_flag_set(tp, ICH_WORKAROUND);
15421                                 pci_dev_put(bridge);
15422                                 break;
15423                         }
15424                 }
15425         }
15426
15427         if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15428                 static struct tg3_dev_id {
15429                         u32     vendor;
15430                         u32     device;
15431                 } bridge_chipsets[] = {
15432                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15433                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15434                         { },
15435                 };
15436                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15437                 struct pci_dev *bridge = NULL;
15438
15439                 while (pci_id->vendor != 0) {
15440                         bridge = pci_get_device(pci_id->vendor,
15441                                                 pci_id->device,
15442                                                 bridge);
15443                         if (!bridge) {
15444                                 pci_id++;
15445                                 continue;
15446                         }
15447                         if (bridge->subordinate &&
15448                             (bridge->subordinate->number <=
15449                              tp->pdev->bus->number) &&
15450                             (bridge->subordinate->busn_res.end >=
15451                              tp->pdev->bus->number)) {
15452                                 tg3_flag_set(tp, 5701_DMA_BUG);
15453                                 pci_dev_put(bridge);
15454                                 break;
15455                         }
15456                 }
15457         }
15458
15459         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15460          * DMA addresses > 40-bit. This bridge may have other additional
15461          * 57xx devices behind it in some 4-port NIC designs for example.
15462          * Any tg3 device found behind the bridge will also need the 40-bit
15463          * DMA workaround.
15464          */
15465         if (tg3_flag(tp, 5780_CLASS)) {
15466                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15467                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15468         } else {
15469                 struct pci_dev *bridge = NULL;
15470
15471                 do {
15472                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15473                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
15474                                                 bridge);
15475                         if (bridge && bridge->subordinate &&
15476                             (bridge->subordinate->number <=
15477                              tp->pdev->bus->number) &&
15478                             (bridge->subordinate->busn_res.end >=
15479                              tp->pdev->bus->number)) {
15480                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15481                                 pci_dev_put(bridge);
15482                                 break;
15483                         }
15484                 } while (bridge);
15485         }
15486
15487         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15488             tg3_asic_rev(tp) == ASIC_REV_5714)
15489                 tp->pdev_peer = tg3_find_peer(tp);
15490
15491         /* Determine TSO capabilities */
15492         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
15493                 ; /* Do nothing. HW bug. */
15494         else if (tg3_flag(tp, 57765_PLUS))
15495                 tg3_flag_set(tp, HW_TSO_3);
15496         else if (tg3_flag(tp, 5755_PLUS) ||
15497                  tg3_asic_rev(tp) == ASIC_REV_5906)
15498                 tg3_flag_set(tp, HW_TSO_2);
15499         else if (tg3_flag(tp, 5750_PLUS)) {
15500                 tg3_flag_set(tp, HW_TSO_1);
15501                 tg3_flag_set(tp, TSO_BUG);
15502                 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
15503                     tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
15504                         tg3_flag_clear(tp, TSO_BUG);
15505         } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15506                    tg3_asic_rev(tp) != ASIC_REV_5701 &&
15507                    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
15508                 tg3_flag_set(tp, FW_TSO);
15509                 tg3_flag_set(tp, TSO_BUG);
15510                 if (tg3_asic_rev(tp) == ASIC_REV_5705)
15511                         tp->fw_needed = FIRMWARE_TG3TSO5;
15512                 else
15513                         tp->fw_needed = FIRMWARE_TG3TSO;
15514         }
15515
15516         /* Selectively allow TSO based on operating conditions */
15517         if (tg3_flag(tp, HW_TSO_1) ||
15518             tg3_flag(tp, HW_TSO_2) ||
15519             tg3_flag(tp, HW_TSO_3) ||
15520             tg3_flag(tp, FW_TSO)) {
15521                 /* For firmware TSO, assume ASF is disabled.
15522                  * We'll disable TSO later if we discover ASF
15523                  * is enabled in tg3_get_eeprom_hw_cfg().
15524                  */
15525                 tg3_flag_set(tp, TSO_CAPABLE);
15526         } else {
15527                 tg3_flag_clear(tp, TSO_CAPABLE);
15528                 tg3_flag_clear(tp, TSO_BUG);
15529                 tp->fw_needed = NULL;
15530         }
15531
15532         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
15533                 tp->fw_needed = FIRMWARE_TG3;
15534
15535         if (tg3_asic_rev(tp) == ASIC_REV_57766)
15536                 tp->fw_needed = FIRMWARE_TG357766;
15537
15538         tp->irq_max = 1;
15539
15540         if (tg3_flag(tp, 5750_PLUS)) {
15541                 tg3_flag_set(tp, SUPPORT_MSI);
15542                 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
15543                     tg3_chip_rev(tp) == CHIPREV_5750_BX ||
15544                     (tg3_asic_rev(tp) == ASIC_REV_5714 &&
15545                      tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
15546                      tp->pdev_peer == tp->pdev))
15547                         tg3_flag_clear(tp, SUPPORT_MSI);
15548
15549                 if (tg3_flag(tp, 5755_PLUS) ||
15550                     tg3_asic_rev(tp) == ASIC_REV_5906) {
15551                         tg3_flag_set(tp, 1SHOT_MSI);
15552                 }
15553
15554                 if (tg3_flag(tp, 57765_PLUS)) {
15555                         tg3_flag_set(tp, SUPPORT_MSIX);
15556                         tp->irq_max = TG3_IRQ_MAX_VECS;
15557                 }
15558         }
15559
15560         tp->txq_max = 1;
15561         tp->rxq_max = 1;
15562         if (tp->irq_max > 1) {
15563                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15564                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15565
15566                 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15567                     tg3_asic_rev(tp) == ASIC_REV_5720)
15568                         tp->txq_max = tp->irq_max - 1;
15569         }
15570
15571         if (tg3_flag(tp, 5755_PLUS) ||
15572             tg3_asic_rev(tp) == ASIC_REV_5906)
15573                 tg3_flag_set(tp, SHORT_DMA_BUG);
15574
15575         if (tg3_asic_rev(tp) == ASIC_REV_5719)
15576                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15577
15578         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15579             tg3_asic_rev(tp) == ASIC_REV_5719 ||
15580             tg3_asic_rev(tp) == ASIC_REV_5720 ||
15581             tg3_asic_rev(tp) == ASIC_REV_5762)
15582                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15583
15584         if (tg3_flag(tp, 57765_PLUS) &&
15585             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
15586                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15587
15588         if (!tg3_flag(tp, 5705_PLUS) ||
15589             tg3_flag(tp, 5780_CLASS) ||
15590             tg3_flag(tp, USE_JUMBO_BDFLAG))
15591                 tg3_flag_set(tp, JUMBO_CAPABLE);
15592
15593         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15594                               &pci_state_reg);
15595
15596         if (pci_is_pcie(tp->pdev)) {
15597                 u16 lnkctl;
15598
15599                 tg3_flag_set(tp, PCI_EXPRESS);
15600
15601                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15602                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15603                         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15604                                 tg3_flag_clear(tp, HW_TSO_2);
15605                                 tg3_flag_clear(tp, TSO_CAPABLE);
15606                         }
15607                         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
15608                             tg3_asic_rev(tp) == ASIC_REV_5761 ||
15609                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
15610                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
15611                                 tg3_flag_set(tp, CLKREQ_BUG);
15612                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
15613                         tg3_flag_set(tp, L1PLLPD_EN);
15614                 }
15615         } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
15616                 /* BCM5785 devices are effectively PCIe devices, and should
15617                  * follow PCIe codepaths, but do not have a PCIe capabilities
15618                  * section.
15619                  */
15620                 tg3_flag_set(tp, PCI_EXPRESS);
15621         } else if (!tg3_flag(tp, 5705_PLUS) ||
15622                    tg3_flag(tp, 5780_CLASS)) {
15623                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15624                 if (!tp->pcix_cap) {
15625                         dev_err(&tp->pdev->dev,
15626                                 "Cannot find PCI-X capability, aborting\n");
15627                         return -EIO;
15628                 }
15629
15630                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15631                         tg3_flag_set(tp, PCIX_MODE);
15632         }
15633
15634         /* If we have an AMD 762 or VIA K8T800 chipset, write
15635          * reordering to the mailbox registers done by the host
15636          * controller can cause major troubles.  We read back from
15637          * every mailbox register write to force the writes to be
15638          * posted to the chip in order.
15639          */
15640         if (pci_dev_present(tg3_write_reorder_chipsets) &&
15641             !tg3_flag(tp, PCI_EXPRESS))
15642                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
15643
15644         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15645                              &tp->pci_cacheline_sz);
15646         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15647                              &tp->pci_lat_timer);
15648         if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
15649             tp->pci_lat_timer < 64) {
15650                 tp->pci_lat_timer = 64;
15651                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15652                                       tp->pci_lat_timer);
15653         }
15654
15655         /* Important! -- It is critical that the PCI-X hw workaround
15656          * situation is decided before the first MMIO register access.
15657          */
15658         if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
15659                 /* 5700 BX chips need to have their TX producer index
15660                  * mailboxes written twice to workaround a bug.
15661                  */
15662                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
15663
15664                 /* If we are in PCI-X mode, enable register write workaround.
15665                  *
15666                  * The workaround is to use indirect register accesses
15667                  * for all chip writes not to mailbox registers.
15668                  */
15669                 if (tg3_flag(tp, PCIX_MODE)) {
15670                         u32 pm_reg;
15671
15672                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15673
15674                         /* The chip can have it's power management PCI config
15675                          * space registers clobbered due to this bug.
15676                          * So explicitly force the chip into D0 here.
15677                          */
15678                         pci_read_config_dword(tp->pdev,
15679                                               tp->pm_cap + PCI_PM_CTRL,
15680                                               &pm_reg);
15681                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
15682                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
15683                         pci_write_config_dword(tp->pdev,
15684                                                tp->pm_cap + PCI_PM_CTRL,
15685                                                pm_reg);
15686
15687                         /* Also, force SERR#/PERR# in PCI command. */
15688                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15689                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
15690                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15691                 }
15692         }
15693
15694         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
15695                 tg3_flag_set(tp, PCI_HIGH_SPEED);
15696         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
15697                 tg3_flag_set(tp, PCI_32BIT);
15698
15699         /* Chip-specific fixup from Broadcom driver */
15700         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
15701             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
15702                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
15703                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
15704         }
15705
15706         /* Default fast path register access methods */
15707         tp->read32 = tg3_read32;
15708         tp->write32 = tg3_write32;
15709         tp->read32_mbox = tg3_read32;
15710         tp->write32_mbox = tg3_write32;
15711         tp->write32_tx_mbox = tg3_write32;
15712         tp->write32_rx_mbox = tg3_write32;
15713
15714         /* Various workaround register access methods */
15715         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
15716                 tp->write32 = tg3_write_indirect_reg32;
15717         else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
15718                  (tg3_flag(tp, PCI_EXPRESS) &&
15719                   tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
15720                 /*
15721                  * Back to back register writes can cause problems on these
15722                  * chips, the workaround is to read back all reg writes
15723                  * except those to mailbox regs.
15724                  *
15725                  * See tg3_write_indirect_reg32().
15726                  */
15727                 tp->write32 = tg3_write_flush_reg32;
15728         }
15729
15730         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
15731                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
15732                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
15733                         tp->write32_rx_mbox = tg3_write_flush_reg32;
15734         }
15735
15736         if (tg3_flag(tp, ICH_WORKAROUND)) {
15737                 tp->read32 = tg3_read_indirect_reg32;
15738                 tp->write32 = tg3_write_indirect_reg32;
15739                 tp->read32_mbox = tg3_read_indirect_mbox;
15740                 tp->write32_mbox = tg3_write_indirect_mbox;
15741                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
15742                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
15743
15744                 iounmap(tp->regs);
15745                 tp->regs = NULL;
15746
15747                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15748                 pci_cmd &= ~PCI_COMMAND_MEMORY;
15749                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15750         }
15751         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15752                 tp->read32_mbox = tg3_read32_mbox_5906;
15753                 tp->write32_mbox = tg3_write32_mbox_5906;
15754                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
15755                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
15756         }
15757
15758         if (tp->write32 == tg3_write_indirect_reg32 ||
15759             (tg3_flag(tp, PCIX_MODE) &&
15760              (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15761               tg3_asic_rev(tp) == ASIC_REV_5701)))
15762                 tg3_flag_set(tp, SRAM_USE_CONFIG);
15763
15764         /* The memory arbiter has to be enabled in order for SRAM accesses
15765          * to succeed.  Normally on powerup the tg3 chip firmware will make
15766          * sure it is enabled, but other entities such as system netboot
15767          * code might disable it.
15768          */
15769         val = tr32(MEMARB_MODE);
15770         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
15771
15772         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
15773         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15774             tg3_flag(tp, 5780_CLASS)) {
15775                 if (tg3_flag(tp, PCIX_MODE)) {
15776                         pci_read_config_dword(tp->pdev,
15777                                               tp->pcix_cap + PCI_X_STATUS,
15778                                               &val);
15779                         tp->pci_fn = val & 0x7;
15780                 }
15781         } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15782                    tg3_asic_rev(tp) == ASIC_REV_5719 ||
15783                    tg3_asic_rev(tp) == ASIC_REV_5720) {
15784                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
15785                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
15786                         val = tr32(TG3_CPMU_STATUS);
15787
15788                 if (tg3_asic_rev(tp) == ASIC_REV_5717)
15789                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
15790                 else
15791                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
15792                                      TG3_CPMU_STATUS_FSHFT_5719;
15793         }
15794
15795         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
15796                 tp->write32_tx_mbox = tg3_write_flush_reg32;
15797                 tp->write32_rx_mbox = tg3_write_flush_reg32;
15798         }
15799
15800         /* Get eeprom hw config before calling tg3_set_power_state().
15801          * In particular, the TG3_FLAG_IS_NIC flag must be
15802          * determined before calling tg3_set_power_state() so that
15803          * we know whether or not to switch out of Vaux power.
15804          * When the flag is set, it means that GPIO1 is used for eeprom
15805          * write protect and also implies that it is a LOM where GPIOs
15806          * are not used to switch power.
15807          */
15808         tg3_get_eeprom_hw_cfg(tp);
15809
15810         if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
15811                 tg3_flag_clear(tp, TSO_CAPABLE);
15812                 tg3_flag_clear(tp, TSO_BUG);
15813                 tp->fw_needed = NULL;
15814         }
15815
15816         if (tg3_flag(tp, ENABLE_APE)) {
15817                 /* Allow reads and writes to the
15818                  * APE register and memory space.
15819                  */
15820                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
15821                                  PCISTATE_ALLOW_APE_SHMEM_WR |
15822                                  PCISTATE_ALLOW_APE_PSPACE_WR;
15823                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
15824                                        pci_state_reg);
15825
15826                 tg3_ape_lock_init(tp);
15827         }
15828
15829         /* Set up tp->grc_local_ctrl before calling
15830          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
15831          * will bring 5700's external PHY out of reset.
15832          * It is also used as eeprom write protect on LOMs.
15833          */
15834         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
15835         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15836             tg3_flag(tp, EEPROM_WRITE_PROT))
15837                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
15838                                        GRC_LCLCTRL_GPIO_OUTPUT1);
15839         /* Unused GPIO3 must be driven as output on 5752 because there
15840          * are no pull-up resistors on unused GPIO pins.
15841          */
15842         else if (tg3_asic_rev(tp) == ASIC_REV_5752)
15843                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
15844
15845         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15846             tg3_asic_rev(tp) == ASIC_REV_57780 ||
15847             tg3_flag(tp, 57765_CLASS))
15848                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15849
15850         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15851             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
15852                 /* Turn off the debug UART. */
15853                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15854                 if (tg3_flag(tp, IS_NIC))
15855                         /* Keep VMain power. */
15856                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
15857                                               GRC_LCLCTRL_GPIO_OUTPUT0;
15858         }
15859
15860         if (tg3_asic_rev(tp) == ASIC_REV_5762)
15861                 tp->grc_local_ctrl |=
15862                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
15863
15864         /* Switch out of Vaux if it is a NIC */
15865         tg3_pwrsrc_switch_to_vmain(tp);
15866
15867         /* Derive initial jumbo mode from MTU assigned in
15868          * ether_setup() via the alloc_etherdev() call
15869          */
15870         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
15871                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
15872
15873         /* Determine WakeOnLan speed to use. */
15874         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15875             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
15876             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
15877             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
15878                 tg3_flag_clear(tp, WOL_SPEED_100MB);
15879         } else {
15880                 tg3_flag_set(tp, WOL_SPEED_100MB);
15881         }
15882
15883         if (tg3_asic_rev(tp) == ASIC_REV_5906)
15884                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
15885
15886         /* A few boards don't want Ethernet@WireSpeed phy feature */
15887         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15888             (tg3_asic_rev(tp) == ASIC_REV_5705 &&
15889              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
15890              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
15891             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15892             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15893                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
15894
15895         if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
15896             tg3_chip_rev(tp) == CHIPREV_5704_AX)
15897                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
15898         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
15899                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
15900
15901         if (tg3_flag(tp, 5705_PLUS) &&
15902             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
15903             tg3_asic_rev(tp) != ASIC_REV_5785 &&
15904             tg3_asic_rev(tp) != ASIC_REV_57780 &&
15905             !tg3_flag(tp, 57765_PLUS)) {
15906                 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15907                     tg3_asic_rev(tp) == ASIC_REV_5787 ||
15908                     tg3_asic_rev(tp) == ASIC_REV_5784 ||
15909                     tg3_asic_rev(tp) == ASIC_REV_5761) {
15910                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15911                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
15912                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
15913                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
15914                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
15915                 } else
15916                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
15917         }
15918
15919         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15920             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
15921                 tp->phy_otp = tg3_read_otp_phycfg(tp);
15922                 if (tp->phy_otp == 0)
15923                         tp->phy_otp = TG3_OTP_DEFAULT;
15924         }
15925
15926         if (tg3_flag(tp, CPMU_PRESENT))
15927                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15928         else
15929                 tp->mi_mode = MAC_MI_MODE_BASE;
15930
15931         tp->coalesce_mode = 0;
15932         if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
15933             tg3_chip_rev(tp) != CHIPREV_5700_BX)
15934                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15935
15936         /* Set these bits to enable statistics workaround. */
15937         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15938             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
15939             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
15940                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15941                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15942         }
15943
15944         if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
15945             tg3_asic_rev(tp) == ASIC_REV_57780)
15946                 tg3_flag_set(tp, USE_PHYLIB);
15947
15948         err = tg3_mdio_init(tp);
15949         if (err)
15950                 return err;
15951
15952         /* Initialize data/descriptor byte/word swapping. */
15953         val = tr32(GRC_MODE);
15954         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15955             tg3_asic_rev(tp) == ASIC_REV_5762)
15956                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15957                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
15958                         GRC_MODE_B2HRX_ENABLE |
15959                         GRC_MODE_HTX2B_ENABLE |
15960                         GRC_MODE_HOST_STACKUP);
15961         else
15962                 val &= GRC_MODE_HOST_STACKUP;
15963
15964         tw32(GRC_MODE, val | tp->grc_mode);
15965
15966         tg3_switch_clocks(tp);
15967
15968         /* Clear this out for sanity. */
15969         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15970
15971         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15972                               &pci_state_reg);
15973         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
15974             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
15975                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
15976                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
15977                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
15978                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
15979                         void __iomem *sram_base;
15980
15981                         /* Write some dummy words into the SRAM status block
15982                          * area, see if it reads back correctly.  If the return
15983                          * value is bad, force enable the PCIX workaround.
15984                          */
15985                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15986
15987                         writel(0x00000000, sram_base);
15988                         writel(0x00000000, sram_base + 4);
15989                         writel(0xffffffff, sram_base + 4);
15990                         if (readl(sram_base) != 0x00000000)
15991                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15992                 }
15993         }
15994
15995         udelay(50);
15996         tg3_nvram_init(tp);
15997
15998         /* If the device has an NVRAM, no need to load patch firmware */
15999         if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16000             !tg3_flag(tp, NO_NVRAM))
16001                 tp->fw_needed = NULL;
16002
16003         grc_misc_cfg = tr32(GRC_MISC_CFG);
16004         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16005
16006         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16007             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16008              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16009                 tg3_flag_set(tp, IS_5788);
16010
16011         if (!tg3_flag(tp, IS_5788) &&
16012             tg3_asic_rev(tp) != ASIC_REV_5700)
16013                 tg3_flag_set(tp, TAGGED_STATUS);
16014         if (tg3_flag(tp, TAGGED_STATUS)) {
16015                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16016                                       HOSTCC_MODE_CLRTICK_TXBD);
16017
16018                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16019                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16020                                        tp->misc_host_ctrl);
16021         }
16022
16023         /* Preserve the APE MAC_MODE bits */
16024         if (tg3_flag(tp, ENABLE_APE))
16025                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16026         else
16027                 tp->mac_mode = 0;
16028
16029         if (tg3_10_100_only_device(tp, ent))
16030                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16031
16032         err = tg3_phy_probe(tp);
16033         if (err) {
16034                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16035                 /* ... but do not return immediately ... */
16036                 tg3_mdio_fini(tp);
16037         }
16038
16039         tg3_read_vpd(tp);
16040         tg3_read_fw_ver(tp);
16041
16042         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16043                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16044         } else {
16045                 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16046                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16047                 else
16048                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16049         }
16050
16051         /* 5700 {AX,BX} chips have a broken status block link
16052          * change bit implementation, so we must use the
16053          * status register in those cases.
16054          */
16055         if (tg3_asic_rev(tp) == ASIC_REV_5700)
16056                 tg3_flag_set(tp, USE_LINKCHG_REG);
16057         else
16058                 tg3_flag_clear(tp, USE_LINKCHG_REG);
16059
16060         /* The led_ctrl is set during tg3_phy_probe, here we might
16061          * have to force the link status polling mechanism based
16062          * upon subsystem IDs.
16063          */
16064         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16065             tg3_asic_rev(tp) == ASIC_REV_5701 &&
16066             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16067                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16068                 tg3_flag_set(tp, USE_LINKCHG_REG);
16069         }
16070
16071         /* For all SERDES we poll the MAC status register. */
16072         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16073                 tg3_flag_set(tp, POLL_SERDES);
16074         else
16075                 tg3_flag_clear(tp, POLL_SERDES);
16076
16077         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16078         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16079         if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16080             tg3_flag(tp, PCIX_MODE)) {
16081                 tp->rx_offset = NET_SKB_PAD;
16082 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16083                 tp->rx_copy_thresh = ~(u16)0;
16084 #endif
16085         }
16086
16087         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16088         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16089         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16090
16091         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16092
16093         /* Increment the rx prod index on the rx std ring by at most
16094          * 8 for these chips to workaround hw errata.
16095          */
16096         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16097             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16098             tg3_asic_rev(tp) == ASIC_REV_5755)
16099                 tp->rx_std_max_post = 8;
16100
16101         if (tg3_flag(tp, ASPM_WORKAROUND))
16102                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16103                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
16104
16105         return err;
16106 }
16107
16108 #ifdef CONFIG_SPARC
16109 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16110 {
16111         struct net_device *dev = tp->dev;
16112         struct pci_dev *pdev = tp->pdev;
16113         struct device_node *dp = pci_device_to_OF_node(pdev);
16114         const unsigned char *addr;
16115         int len;
16116
16117         addr = of_get_property(dp, "local-mac-address", &len);
16118         if (addr && len == 6) {
16119                 memcpy(dev->dev_addr, addr, 6);
16120                 return 0;
16121         }
16122         return -ENODEV;
16123 }
16124
16125 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16126 {
16127         struct net_device *dev = tp->dev;
16128
16129         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
16130         return 0;
16131 }
16132 #endif
16133
16134 static int tg3_get_device_address(struct tg3 *tp)
16135 {
16136         struct net_device *dev = tp->dev;
16137         u32 hi, lo, mac_offset;
16138         int addr_ok = 0;
16139         int err;
16140
16141 #ifdef CONFIG_SPARC
16142         if (!tg3_get_macaddr_sparc(tp))
16143                 return 0;
16144 #endif
16145
16146         if (tg3_flag(tp, IS_SSB_CORE)) {
16147                 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16148                 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16149                         return 0;
16150         }
16151
16152         mac_offset = 0x7c;
16153         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16154             tg3_flag(tp, 5780_CLASS)) {
16155                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16156                         mac_offset = 0xcc;
16157                 if (tg3_nvram_lock(tp))
16158                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16159                 else
16160                         tg3_nvram_unlock(tp);
16161         } else if (tg3_flag(tp, 5717_PLUS)) {
16162                 if (tp->pci_fn & 1)
16163                         mac_offset = 0xcc;
16164                 if (tp->pci_fn > 1)
16165                         mac_offset += 0x18c;
16166         } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16167                 mac_offset = 0x10;
16168
16169         /* First try to get it from MAC address mailbox. */
16170         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16171         if ((hi >> 16) == 0x484b) {
16172                 dev->dev_addr[0] = (hi >>  8) & 0xff;
16173                 dev->dev_addr[1] = (hi >>  0) & 0xff;
16174
16175                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16176                 dev->dev_addr[2] = (lo >> 24) & 0xff;
16177                 dev->dev_addr[3] = (lo >> 16) & 0xff;
16178                 dev->dev_addr[4] = (lo >>  8) & 0xff;
16179                 dev->dev_addr[5] = (lo >>  0) & 0xff;
16180
16181                 /* Some old bootcode may report a 0 MAC address in SRAM */
16182                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16183         }
16184         if (!addr_ok) {
16185                 /* Next, try NVRAM. */
16186                 if (!tg3_flag(tp, NO_NVRAM) &&
16187                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16188                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16189                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16190                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16191                 }
16192                 /* Finally just fetch it out of the MAC control regs. */
16193                 else {
16194                         hi = tr32(MAC_ADDR_0_HIGH);
16195                         lo = tr32(MAC_ADDR_0_LOW);
16196
16197                         dev->dev_addr[5] = lo & 0xff;
16198                         dev->dev_addr[4] = (lo >> 8) & 0xff;
16199                         dev->dev_addr[3] = (lo >> 16) & 0xff;
16200                         dev->dev_addr[2] = (lo >> 24) & 0xff;
16201                         dev->dev_addr[1] = hi & 0xff;
16202                         dev->dev_addr[0] = (hi >> 8) & 0xff;
16203                 }
16204         }
16205
16206         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16207 #ifdef CONFIG_SPARC
16208                 if (!tg3_get_default_macaddr_sparc(tp))
16209                         return 0;
16210 #endif
16211                 return -EINVAL;
16212         }
16213         return 0;
16214 }
16215
16216 #define BOUNDARY_SINGLE_CACHELINE       1
16217 #define BOUNDARY_MULTI_CACHELINE        2
16218
16219 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16220 {
16221         int cacheline_size;
16222         u8 byte;
16223         int goal;
16224
16225         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16226         if (byte == 0)
16227                 cacheline_size = 1024;
16228         else
16229                 cacheline_size = (int) byte * 4;
16230
16231         /* On 5703 and later chips, the boundary bits have no
16232          * effect.
16233          */
16234         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16235             tg3_asic_rev(tp) != ASIC_REV_5701 &&
16236             !tg3_flag(tp, PCI_EXPRESS))
16237                 goto out;
16238
16239 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16240         goal = BOUNDARY_MULTI_CACHELINE;
16241 #else
16242 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16243         goal = BOUNDARY_SINGLE_CACHELINE;
16244 #else
16245         goal = 0;
16246 #endif
16247 #endif
16248
16249         if (tg3_flag(tp, 57765_PLUS)) {
16250                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16251                 goto out;
16252         }
16253
16254         if (!goal)
16255                 goto out;
16256
16257         /* PCI controllers on most RISC systems tend to disconnect
16258          * when a device tries to burst across a cache-line boundary.
16259          * Therefore, letting tg3 do so just wastes PCI bandwidth.
16260          *
16261          * Unfortunately, for PCI-E there are only limited
16262          * write-side controls for this, and thus for reads
16263          * we will still get the disconnects.  We'll also waste
16264          * these PCI cycles for both read and write for chips
16265          * other than 5700 and 5701 which do not implement the
16266          * boundary bits.
16267          */
16268         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16269                 switch (cacheline_size) {
16270                 case 16:
16271                 case 32:
16272                 case 64:
16273                 case 128:
16274                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16275                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16276                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16277                         } else {
16278                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16279                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16280                         }
16281                         break;
16282
16283                 case 256:
16284                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16285                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16286                         break;
16287
16288                 default:
16289                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16290                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16291                         break;
16292                 }
16293         } else if (tg3_flag(tp, PCI_EXPRESS)) {
16294                 switch (cacheline_size) {
16295                 case 16:
16296                 case 32:
16297                 case 64:
16298                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16299                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16300                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16301                                 break;
16302                         }
16303                         /* fallthrough */
16304                 case 128:
16305                 default:
16306                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16307                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16308                         break;
16309                 }
16310         } else {
16311                 switch (cacheline_size) {
16312                 case 16:
16313                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16314                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16315                                         DMA_RWCTRL_WRITE_BNDRY_16);
16316                                 break;
16317                         }
16318                         /* fallthrough */
16319                 case 32:
16320                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16321                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16322                                         DMA_RWCTRL_WRITE_BNDRY_32);
16323                                 break;
16324                         }
16325                         /* fallthrough */
16326                 case 64:
16327                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16328                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16329                                         DMA_RWCTRL_WRITE_BNDRY_64);
16330                                 break;
16331                         }
16332                         /* fallthrough */
16333                 case 128:
16334                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16335                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16336                                         DMA_RWCTRL_WRITE_BNDRY_128);
16337                                 break;
16338                         }
16339                         /* fallthrough */
16340                 case 256:
16341                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
16342                                 DMA_RWCTRL_WRITE_BNDRY_256);
16343                         break;
16344                 case 512:
16345                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
16346                                 DMA_RWCTRL_WRITE_BNDRY_512);
16347                         break;
16348                 case 1024:
16349                 default:
16350                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16351                                 DMA_RWCTRL_WRITE_BNDRY_1024);
16352                         break;
16353                 }
16354         }
16355
16356 out:
16357         return val;
16358 }
16359
16360 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16361                            int size, int to_device)
16362 {
16363         struct tg3_internal_buffer_desc test_desc;
16364         u32 sram_dma_descs;
16365         int i, ret;
16366
16367         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16368
16369         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16370         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16371         tw32(RDMAC_STATUS, 0);
16372         tw32(WDMAC_STATUS, 0);
16373
16374         tw32(BUFMGR_MODE, 0);
16375         tw32(FTQ_RESET, 0);
16376
16377         test_desc.addr_hi = ((u64) buf_dma) >> 32;
16378         test_desc.addr_lo = buf_dma & 0xffffffff;
16379         test_desc.nic_mbuf = 0x00002100;
16380         test_desc.len = size;
16381
16382         /*
16383          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16384          * the *second* time the tg3 driver was getting loaded after an
16385          * initial scan.
16386          *
16387          * Broadcom tells me:
16388          *   ...the DMA engine is connected to the GRC block and a DMA
16389          *   reset may affect the GRC block in some unpredictable way...
16390          *   The behavior of resets to individual blocks has not been tested.
16391          *
16392          * Broadcom noted the GRC reset will also reset all sub-components.
16393          */
16394         if (to_device) {
16395                 test_desc.cqid_sqid = (13 << 8) | 2;
16396
16397                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16398                 udelay(40);
16399         } else {
16400                 test_desc.cqid_sqid = (16 << 8) | 7;
16401
16402                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16403                 udelay(40);
16404         }
16405         test_desc.flags = 0x00000005;
16406
16407         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16408                 u32 val;
16409
16410                 val = *(((u32 *)&test_desc) + i);
16411                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16412                                        sram_dma_descs + (i * sizeof(u32)));
16413                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16414         }
16415         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16416
16417         if (to_device)
16418                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16419         else
16420                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16421
16422         ret = -ENODEV;
16423         for (i = 0; i < 40; i++) {
16424                 u32 val;
16425
16426                 if (to_device)
16427                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16428                 else
16429                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16430                 if ((val & 0xffff) == sram_dma_descs) {
16431                         ret = 0;
16432                         break;
16433                 }
16434
16435                 udelay(100);
16436         }
16437
16438         return ret;
16439 }
16440
16441 #define TEST_BUFFER_SIZE        0x2000
16442
16443 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16444         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16445         { },
16446 };
16447
16448 static int tg3_test_dma(struct tg3 *tp)
16449 {
16450         dma_addr_t buf_dma;
16451         u32 *buf, saved_dma_rwctrl;
16452         int ret = 0;
16453
16454         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16455                                  &buf_dma, GFP_KERNEL);
16456         if (!buf) {
16457                 ret = -ENOMEM;
16458                 goto out_nofree;
16459         }
16460
16461         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16462                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16463
16464         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16465
16466         if (tg3_flag(tp, 57765_PLUS))
16467                 goto out;
16468
16469         if (tg3_flag(tp, PCI_EXPRESS)) {
16470                 /* DMA read watermark not used on PCIE */
16471                 tp->dma_rwctrl |= 0x00180000;
16472         } else if (!tg3_flag(tp, PCIX_MODE)) {
16473                 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16474                     tg3_asic_rev(tp) == ASIC_REV_5750)
16475                         tp->dma_rwctrl |= 0x003f0000;
16476                 else
16477                         tp->dma_rwctrl |= 0x003f000f;
16478         } else {
16479                 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16480                     tg3_asic_rev(tp) == ASIC_REV_5704) {
16481                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16482                         u32 read_water = 0x7;
16483
16484                         /* If the 5704 is behind the EPB bridge, we can
16485                          * do the less restrictive ONE_DMA workaround for
16486                          * better performance.
16487                          */
16488                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16489                             tg3_asic_rev(tp) == ASIC_REV_5704)
16490                                 tp->dma_rwctrl |= 0x8000;
16491                         else if (ccval == 0x6 || ccval == 0x7)
16492                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16493
16494                         if (tg3_asic_rev(tp) == ASIC_REV_5703)
16495                                 read_water = 4;
16496                         /* Set bit 23 to enable PCIX hw bug fix */
16497                         tp->dma_rwctrl |=
16498                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16499                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16500                                 (1 << 23);
16501                 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
16502                         /* 5780 always in PCIX mode */
16503                         tp->dma_rwctrl |= 0x00144000;
16504                 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
16505                         /* 5714 always in PCIX mode */
16506                         tp->dma_rwctrl |= 0x00148000;
16507                 } else {
16508                         tp->dma_rwctrl |= 0x001b000f;
16509                 }
16510         }
16511         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16512                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16513
16514         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16515             tg3_asic_rev(tp) == ASIC_REV_5704)
16516                 tp->dma_rwctrl &= 0xfffffff0;
16517
16518         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16519             tg3_asic_rev(tp) == ASIC_REV_5701) {
16520                 /* Remove this if it causes problems for some boards. */
16521                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16522
16523                 /* On 5700/5701 chips, we need to set this bit.
16524                  * Otherwise the chip will issue cacheline transactions
16525                  * to streamable DMA memory with not all the byte
16526                  * enables turned on.  This is an error on several
16527                  * RISC PCI controllers, in particular sparc64.
16528                  *
16529                  * On 5703/5704 chips, this bit has been reassigned
16530                  * a different meaning.  In particular, it is used
16531                  * on those chips to enable a PCI-X workaround.
16532                  */
16533                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16534         }
16535
16536         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16537
16538 #if 0
16539         /* Unneeded, already done by tg3_get_invariants.  */
16540         tg3_switch_clocks(tp);
16541 #endif
16542
16543         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16544             tg3_asic_rev(tp) != ASIC_REV_5701)
16545                 goto out;
16546
16547         /* It is best to perform DMA test with maximum write burst size
16548          * to expose the 5700/5701 write DMA bug.
16549          */
16550         saved_dma_rwctrl = tp->dma_rwctrl;
16551         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16552         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16553
16554         while (1) {
16555                 u32 *p = buf, i;
16556
16557                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16558                         p[i] = i;
16559
16560                 /* Send the buffer to the chip. */
16561                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
16562                 if (ret) {
16563                         dev_err(&tp->pdev->dev,
16564                                 "%s: Buffer write failed. err = %d\n",
16565                                 __func__, ret);
16566                         break;
16567                 }
16568
16569 #if 0
16570                 /* validate data reached card RAM correctly. */
16571                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16572                         u32 val;
16573                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
16574                         if (le32_to_cpu(val) != p[i]) {
16575                                 dev_err(&tp->pdev->dev,
16576                                         "%s: Buffer corrupted on device! "
16577                                         "(%d != %d)\n", __func__, val, i);
16578                                 /* ret = -ENODEV here? */
16579                         }
16580                         p[i] = 0;
16581                 }
16582 #endif
16583                 /* Now read it back. */
16584                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
16585                 if (ret) {
16586                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16587                                 "err = %d\n", __func__, ret);
16588                         break;
16589                 }
16590
16591                 /* Verify it. */
16592                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16593                         if (p[i] == i)
16594                                 continue;
16595
16596                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16597                             DMA_RWCTRL_WRITE_BNDRY_16) {
16598                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16599                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16600                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16601                                 break;
16602                         } else {
16603                                 dev_err(&tp->pdev->dev,
16604                                         "%s: Buffer corrupted on read back! "
16605                                         "(%d != %d)\n", __func__, p[i], i);
16606                                 ret = -ENODEV;
16607                                 goto out;
16608                         }
16609                 }
16610
16611                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16612                         /* Success. */
16613                         ret = 0;
16614                         break;
16615                 }
16616         }
16617         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16618             DMA_RWCTRL_WRITE_BNDRY_16) {
16619                 /* DMA test passed without adjusting DMA boundary,
16620                  * now look for chipsets that are known to expose the
16621                  * DMA bug without failing the test.
16622                  */
16623                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16624                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16625                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16626                 } else {
16627                         /* Safe to use the calculated DMA boundary. */
16628                         tp->dma_rwctrl = saved_dma_rwctrl;
16629                 }
16630
16631                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16632         }
16633
16634 out:
16635         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16636 out_nofree:
16637         return ret;
16638 }
16639
16640 static void tg3_init_bufmgr_config(struct tg3 *tp)
16641 {
16642         if (tg3_flag(tp, 57765_PLUS)) {
16643                 tp->bufmgr_config.mbuf_read_dma_low_water =
16644                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16645                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16646                         DEFAULT_MB_MACRX_LOW_WATER_57765;
16647                 tp->bufmgr_config.mbuf_high_water =
16648                         DEFAULT_MB_HIGH_WATER_57765;
16649
16650                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16651                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16652                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16653                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16654                 tp->bufmgr_config.mbuf_high_water_jumbo =
16655                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16656         } else if (tg3_flag(tp, 5705_PLUS)) {
16657                 tp->bufmgr_config.mbuf_read_dma_low_water =
16658                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16659                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16660                         DEFAULT_MB_MACRX_LOW_WATER_5705;
16661                 tp->bufmgr_config.mbuf_high_water =
16662                         DEFAULT_MB_HIGH_WATER_5705;
16663                 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16664                         tp->bufmgr_config.mbuf_mac_rx_low_water =
16665                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
16666                         tp->bufmgr_config.mbuf_high_water =
16667                                 DEFAULT_MB_HIGH_WATER_5906;
16668                 }
16669
16670                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16671                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
16672                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16673                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
16674                 tp->bufmgr_config.mbuf_high_water_jumbo =
16675                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
16676         } else {
16677                 tp->bufmgr_config.mbuf_read_dma_low_water =
16678                         DEFAULT_MB_RDMA_LOW_WATER;
16679                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16680                         DEFAULT_MB_MACRX_LOW_WATER;
16681                 tp->bufmgr_config.mbuf_high_water =
16682                         DEFAULT_MB_HIGH_WATER;
16683
16684                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16685                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
16686                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16687                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
16688                 tp->bufmgr_config.mbuf_high_water_jumbo =
16689                         DEFAULT_MB_HIGH_WATER_JUMBO;
16690         }
16691
16692         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
16693         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
16694 }
16695
16696 static char *tg3_phy_string(struct tg3 *tp)
16697 {
16698         switch (tp->phy_id & TG3_PHY_ID_MASK) {
16699         case TG3_PHY_ID_BCM5400:        return "5400";
16700         case TG3_PHY_ID_BCM5401:        return "5401";
16701         case TG3_PHY_ID_BCM5411:        return "5411";
16702         case TG3_PHY_ID_BCM5701:        return "5701";
16703         case TG3_PHY_ID_BCM5703:        return "5703";
16704         case TG3_PHY_ID_BCM5704:        return "5704";
16705         case TG3_PHY_ID_BCM5705:        return "5705";
16706         case TG3_PHY_ID_BCM5750:        return "5750";
16707         case TG3_PHY_ID_BCM5752:        return "5752";
16708         case TG3_PHY_ID_BCM5714:        return "5714";
16709         case TG3_PHY_ID_BCM5780:        return "5780";
16710         case TG3_PHY_ID_BCM5755:        return "5755";
16711         case TG3_PHY_ID_BCM5787:        return "5787";
16712         case TG3_PHY_ID_BCM5784:        return "5784";
16713         case TG3_PHY_ID_BCM5756:        return "5722/5756";
16714         case TG3_PHY_ID_BCM5906:        return "5906";
16715         case TG3_PHY_ID_BCM5761:        return "5761";
16716         case TG3_PHY_ID_BCM5718C:       return "5718C";
16717         case TG3_PHY_ID_BCM5718S:       return "5718S";
16718         case TG3_PHY_ID_BCM57765:       return "57765";
16719         case TG3_PHY_ID_BCM5719C:       return "5719C";
16720         case TG3_PHY_ID_BCM5720C:       return "5720C";
16721         case TG3_PHY_ID_BCM5762:        return "5762C";
16722         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
16723         case 0:                 return "serdes";
16724         default:                return "unknown";
16725         }
16726 }
16727
16728 static char *tg3_bus_string(struct tg3 *tp, char *str)
16729 {
16730         if (tg3_flag(tp, PCI_EXPRESS)) {
16731                 strcpy(str, "PCI Express");
16732                 return str;
16733         } else if (tg3_flag(tp, PCIX_MODE)) {
16734                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
16735
16736                 strcpy(str, "PCIX:");
16737
16738                 if ((clock_ctrl == 7) ||
16739                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
16740                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
16741                         strcat(str, "133MHz");
16742                 else if (clock_ctrl == 0)
16743                         strcat(str, "33MHz");
16744                 else if (clock_ctrl == 2)
16745                         strcat(str, "50MHz");
16746                 else if (clock_ctrl == 4)
16747                         strcat(str, "66MHz");
16748                 else if (clock_ctrl == 6)
16749                         strcat(str, "100MHz");
16750         } else {
16751                 strcpy(str, "PCI:");
16752                 if (tg3_flag(tp, PCI_HIGH_SPEED))
16753                         strcat(str, "66MHz");
16754                 else
16755                         strcat(str, "33MHz");
16756         }
16757         if (tg3_flag(tp, PCI_32BIT))
16758                 strcat(str, ":32-bit");
16759         else
16760                 strcat(str, ":64-bit");
16761         return str;
16762 }
16763
16764 static void tg3_init_coal(struct tg3 *tp)
16765 {
16766         struct ethtool_coalesce *ec = &tp->coal;
16767
16768         memset(ec, 0, sizeof(*ec));
16769         ec->cmd = ETHTOOL_GCOALESCE;
16770         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
16771         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
16772         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
16773         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
16774         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
16775         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
16776         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
16777         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
16778         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
16779
16780         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
16781                                  HOSTCC_MODE_CLRTICK_TXBD)) {
16782                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
16783                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
16784                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
16785                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
16786         }
16787
16788         if (tg3_flag(tp, 5705_PLUS)) {
16789                 ec->rx_coalesce_usecs_irq = 0;
16790                 ec->tx_coalesce_usecs_irq = 0;
16791                 ec->stats_block_coalesce_usecs = 0;
16792         }
16793 }
16794
16795 static int tg3_init_one(struct pci_dev *pdev,
16796                                   const struct pci_device_id *ent)
16797 {
16798         struct net_device *dev;
16799         struct tg3 *tp;
16800         int i, err, pm_cap;
16801         u32 sndmbx, rcvmbx, intmbx;
16802         char str[40];
16803         u64 dma_mask, persist_dma_mask;
16804         netdev_features_t features = 0;
16805
16806         printk_once(KERN_INFO "%s\n", version);
16807
16808         err = pci_enable_device(pdev);
16809         if (err) {
16810                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
16811                 return err;
16812         }
16813
16814         err = pci_request_regions(pdev, DRV_MODULE_NAME);
16815         if (err) {
16816                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
16817                 goto err_out_disable_pdev;
16818         }
16819
16820         pci_set_master(pdev);
16821
16822         /* Find power-management capability. */
16823         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
16824         if (pm_cap == 0) {
16825                 dev_err(&pdev->dev,
16826                         "Cannot find Power Management capability, aborting\n");
16827                 err = -EIO;
16828                 goto err_out_free_res;
16829         }
16830
16831         err = pci_set_power_state(pdev, PCI_D0);
16832         if (err) {
16833                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
16834                 goto err_out_free_res;
16835         }
16836
16837         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
16838         if (!dev) {
16839                 err = -ENOMEM;
16840                 goto err_out_power_down;
16841         }
16842
16843         SET_NETDEV_DEV(dev, &pdev->dev);
16844
16845         tp = netdev_priv(dev);
16846         tp->pdev = pdev;
16847         tp->dev = dev;
16848         tp->pm_cap = pm_cap;
16849         tp->rx_mode = TG3_DEF_RX_MODE;
16850         tp->tx_mode = TG3_DEF_TX_MODE;
16851         tp->irq_sync = 1;
16852
16853         if (tg3_debug > 0)
16854                 tp->msg_enable = tg3_debug;
16855         else
16856                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
16857
16858         if (pdev_is_ssb_gige_core(pdev)) {
16859                 tg3_flag_set(tp, IS_SSB_CORE);
16860                 if (ssb_gige_must_flush_posted_writes(pdev))
16861                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
16862                 if (ssb_gige_one_dma_at_once(pdev))
16863                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
16864                 if (ssb_gige_have_roboswitch(pdev))
16865                         tg3_flag_set(tp, ROBOSWITCH);
16866                 if (ssb_gige_is_rgmii(pdev))
16867                         tg3_flag_set(tp, RGMII_MODE);
16868         }
16869
16870         /* The word/byte swap controls here control register access byte
16871          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
16872          * setting below.
16873          */
16874         tp->misc_host_ctrl =
16875                 MISC_HOST_CTRL_MASK_PCI_INT |
16876                 MISC_HOST_CTRL_WORD_SWAP |
16877                 MISC_HOST_CTRL_INDIR_ACCESS |
16878                 MISC_HOST_CTRL_PCISTATE_RW;
16879
16880         /* The NONFRM (non-frame) byte/word swap controls take effect
16881          * on descriptor entries, anything which isn't packet data.
16882          *
16883          * The StrongARM chips on the board (one for tx, one for rx)
16884          * are running in big-endian mode.
16885          */
16886         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
16887                         GRC_MODE_WSWAP_NONFRM_DATA);
16888 #ifdef __BIG_ENDIAN
16889         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
16890 #endif
16891         spin_lock_init(&tp->lock);
16892         spin_lock_init(&tp->indirect_lock);
16893         INIT_WORK(&tp->reset_task, tg3_reset_task);
16894
16895         tp->regs = pci_ioremap_bar(pdev, BAR_0);
16896         if (!tp->regs) {
16897                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
16898                 err = -ENOMEM;
16899                 goto err_out_free_dev;
16900         }
16901
16902         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16903             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16904             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16905             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16906             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16907             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16908             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16909             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16910             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16911             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16912             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16913             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
16914                 tg3_flag_set(tp, ENABLE_APE);
16915                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16916                 if (!tp->aperegs) {
16917                         dev_err(&pdev->dev,
16918                                 "Cannot map APE registers, aborting\n");
16919                         err = -ENOMEM;
16920                         goto err_out_iounmap;
16921                 }
16922         }
16923
16924         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16925         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
16926
16927         dev->ethtool_ops = &tg3_ethtool_ops;
16928         dev->watchdog_timeo = TG3_TX_TIMEOUT;
16929         dev->netdev_ops = &tg3_netdev_ops;
16930         dev->irq = pdev->irq;
16931
16932         err = tg3_get_invariants(tp, ent);
16933         if (err) {
16934                 dev_err(&pdev->dev,
16935                         "Problem fetching invariants of chip, aborting\n");
16936                 goto err_out_apeunmap;
16937         }
16938
16939         /* The EPB bridge inside 5714, 5715, and 5780 and any
16940          * device behind the EPB cannot support DMA addresses > 40-bit.
16941          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16942          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16943          * do DMA address check in tg3_start_xmit().
16944          */
16945         if (tg3_flag(tp, IS_5788))
16946                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
16947         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
16948                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
16949 #ifdef CONFIG_HIGHMEM
16950                 dma_mask = DMA_BIT_MASK(64);
16951 #endif
16952         } else
16953                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
16954
16955         /* Configure DMA attributes. */
16956         if (dma_mask > DMA_BIT_MASK(32)) {
16957                 err = pci_set_dma_mask(pdev, dma_mask);
16958                 if (!err) {
16959                         features |= NETIF_F_HIGHDMA;
16960                         err = pci_set_consistent_dma_mask(pdev,
16961                                                           persist_dma_mask);
16962                         if (err < 0) {
16963                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
16964                                         "DMA for consistent allocations\n");
16965                                 goto err_out_apeunmap;
16966                         }
16967                 }
16968         }
16969         if (err || dma_mask == DMA_BIT_MASK(32)) {
16970                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
16971                 if (err) {
16972                         dev_err(&pdev->dev,
16973                                 "No usable DMA configuration, aborting\n");
16974                         goto err_out_apeunmap;
16975                 }
16976         }
16977
16978         tg3_init_bufmgr_config(tp);
16979
16980         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16981
16982         /* 5700 B0 chips do not support checksumming correctly due
16983          * to hardware bugs.
16984          */
16985         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
16986                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16987
16988                 if (tg3_flag(tp, 5755_PLUS))
16989                         features |= NETIF_F_IPV6_CSUM;
16990         }
16991
16992         /* TSO is on by default on chips that support hardware TSO.
16993          * Firmware TSO on older chips gives lower performance, so it
16994          * is off by default, but can be enabled using ethtool.
16995          */
16996         if ((tg3_flag(tp, HW_TSO_1) ||
16997              tg3_flag(tp, HW_TSO_2) ||
16998              tg3_flag(tp, HW_TSO_3)) &&
16999             (features & NETIF_F_IP_CSUM))
17000                 features |= NETIF_F_TSO;
17001         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17002                 if (features & NETIF_F_IPV6_CSUM)
17003                         features |= NETIF_F_TSO6;
17004                 if (tg3_flag(tp, HW_TSO_3) ||
17005                     tg3_asic_rev(tp) == ASIC_REV_5761 ||
17006                     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17007                      tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17008                     tg3_asic_rev(tp) == ASIC_REV_5785 ||
17009                     tg3_asic_rev(tp) == ASIC_REV_57780)
17010                         features |= NETIF_F_TSO_ECN;
17011         }
17012
17013         dev->features |= features;
17014         dev->vlan_features |= features;
17015
17016         /*
17017          * Add loopback capability only for a subset of devices that support
17018          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17019          * loopback for the remaining devices.
17020          */
17021         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17022             !tg3_flag(tp, CPMU_PRESENT))
17023                 /* Add the loopback capability */
17024                 features |= NETIF_F_LOOPBACK;
17025
17026         dev->hw_features |= features;
17027
17028         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17029             !tg3_flag(tp, TSO_CAPABLE) &&
17030             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17031                 tg3_flag_set(tp, MAX_RXPEND_64);
17032                 tp->rx_pending = 63;
17033         }
17034
17035         err = tg3_get_device_address(tp);
17036         if (err) {
17037                 dev_err(&pdev->dev,
17038                         "Could not obtain valid ethernet address, aborting\n");
17039                 goto err_out_apeunmap;
17040         }
17041
17042         /*
17043          * Reset chip in case UNDI or EFI driver did not shutdown
17044          * DMA self test will enable WDMAC and we'll see (spurious)
17045          * pending DMA on the PCI bus at that point.
17046          */
17047         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17048             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17049                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17050                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17051         }
17052
17053         err = tg3_test_dma(tp);
17054         if (err) {
17055                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17056                 goto err_out_apeunmap;
17057         }
17058
17059         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17060         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17061         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17062         for (i = 0; i < tp->irq_max; i++) {
17063                 struct tg3_napi *tnapi = &tp->napi[i];
17064
17065                 tnapi->tp = tp;
17066                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17067
17068                 tnapi->int_mbox = intmbx;
17069                 if (i <= 4)
17070                         intmbx += 0x8;
17071                 else
17072                         intmbx += 0x4;
17073
17074                 tnapi->consmbox = rcvmbx;
17075                 tnapi->prodmbox = sndmbx;
17076
17077                 if (i)
17078                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17079                 else
17080                         tnapi->coal_now = HOSTCC_MODE_NOW;
17081
17082                 if (!tg3_flag(tp, SUPPORT_MSIX))
17083                         break;
17084
17085                 /*
17086                  * If we support MSIX, we'll be using RSS.  If we're using
17087                  * RSS, the first vector only handles link interrupts and the
17088                  * remaining vectors handle rx and tx interrupts.  Reuse the
17089                  * mailbox values for the next iteration.  The values we setup
17090                  * above are still useful for the single vectored mode.
17091                  */
17092                 if (!i)
17093                         continue;
17094
17095                 rcvmbx += 0x8;
17096
17097                 if (sndmbx & 0x4)
17098                         sndmbx -= 0x4;
17099                 else
17100                         sndmbx += 0xc;
17101         }
17102
17103         tg3_init_coal(tp);
17104
17105         pci_set_drvdata(pdev, dev);
17106
17107         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17108             tg3_asic_rev(tp) == ASIC_REV_5720 ||
17109             tg3_asic_rev(tp) == ASIC_REV_5762)
17110                 tg3_flag_set(tp, PTP_CAPABLE);
17111
17112         if (tg3_flag(tp, 5717_PLUS)) {
17113                 /* Resume a low-power mode */
17114                 tg3_frob_aux_power(tp, false);
17115         }
17116
17117         tg3_timer_init(tp);
17118
17119         tg3_carrier_off(tp);
17120
17121         err = register_netdev(dev);
17122         if (err) {
17123                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17124                 goto err_out_apeunmap;
17125         }
17126
17127         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17128                     tp->board_part_number,
17129                     tg3_chip_rev_id(tp),
17130                     tg3_bus_string(tp, str),
17131                     dev->dev_addr);
17132
17133         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17134                 struct phy_device *phydev;
17135                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
17136                 netdev_info(dev,
17137                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17138                             phydev->drv->name, dev_name(&phydev->dev));
17139         } else {
17140                 char *ethtype;
17141
17142                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17143                         ethtype = "10/100Base-TX";
17144                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17145                         ethtype = "1000Base-SX";
17146                 else
17147                         ethtype = "10/100/1000Base-T";
17148
17149                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17150                             "(WireSpeed[%d], EEE[%d])\n",
17151                             tg3_phy_string(tp), ethtype,
17152                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17153                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17154         }
17155
17156         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17157                     (dev->features & NETIF_F_RXCSUM) != 0,
17158                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
17159                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17160                     tg3_flag(tp, ENABLE_ASF) != 0,
17161                     tg3_flag(tp, TSO_CAPABLE) != 0);
17162         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17163                     tp->dma_rwctrl,
17164                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17165                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17166
17167         pci_save_state(pdev);
17168
17169         return 0;
17170
17171 err_out_apeunmap:
17172         if (tp->aperegs) {
17173                 iounmap(tp->aperegs);
17174                 tp->aperegs = NULL;
17175         }
17176
17177 err_out_iounmap:
17178         if (tp->regs) {
17179                 iounmap(tp->regs);
17180                 tp->regs = NULL;
17181         }
17182
17183 err_out_free_dev:
17184         free_netdev(dev);
17185
17186 err_out_power_down:
17187         pci_set_power_state(pdev, PCI_D3hot);
17188
17189 err_out_free_res:
17190         pci_release_regions(pdev);
17191
17192 err_out_disable_pdev:
17193         pci_disable_device(pdev);
17194         pci_set_drvdata(pdev, NULL);
17195         return err;
17196 }
17197
17198 static void tg3_remove_one(struct pci_dev *pdev)
17199 {
17200         struct net_device *dev = pci_get_drvdata(pdev);
17201
17202         if (dev) {
17203                 struct tg3 *tp = netdev_priv(dev);
17204
17205                 release_firmware(tp->fw);
17206
17207                 tg3_reset_task_cancel(tp);
17208
17209                 if (tg3_flag(tp, USE_PHYLIB)) {
17210                         tg3_phy_fini(tp);
17211                         tg3_mdio_fini(tp);
17212                 }
17213
17214                 unregister_netdev(dev);
17215                 if (tp->aperegs) {
17216                         iounmap(tp->aperegs);
17217                         tp->aperegs = NULL;
17218                 }
17219                 if (tp->regs) {
17220                         iounmap(tp->regs);
17221                         tp->regs = NULL;
17222                 }
17223                 free_netdev(dev);
17224                 pci_release_regions(pdev);
17225                 pci_disable_device(pdev);
17226                 pci_set_drvdata(pdev, NULL);
17227         }
17228 }
17229
17230 #ifdef CONFIG_PM_SLEEP
17231 static int tg3_suspend(struct device *device)
17232 {
17233         struct pci_dev *pdev = to_pci_dev(device);
17234         struct net_device *dev = pci_get_drvdata(pdev);
17235         struct tg3 *tp = netdev_priv(dev);
17236         int err;
17237
17238         if (!netif_running(dev))
17239                 return 0;
17240
17241         tg3_reset_task_cancel(tp);
17242         tg3_phy_stop(tp);
17243         tg3_netif_stop(tp);
17244
17245         tg3_timer_stop(tp);
17246
17247         tg3_full_lock(tp, 1);
17248         tg3_disable_ints(tp);
17249         tg3_full_unlock(tp);
17250
17251         netif_device_detach(dev);
17252
17253         tg3_full_lock(tp, 0);
17254         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17255         tg3_flag_clear(tp, INIT_COMPLETE);
17256         tg3_full_unlock(tp);
17257
17258         err = tg3_power_down_prepare(tp);
17259         if (err) {
17260                 int err2;
17261
17262                 tg3_full_lock(tp, 0);
17263
17264                 tg3_flag_set(tp, INIT_COMPLETE);
17265                 err2 = tg3_restart_hw(tp, 1);
17266                 if (err2)
17267                         goto out;
17268
17269                 tg3_timer_start(tp);
17270
17271                 netif_device_attach(dev);
17272                 tg3_netif_start(tp);
17273
17274 out:
17275                 tg3_full_unlock(tp);
17276
17277                 if (!err2)
17278                         tg3_phy_start(tp);
17279         }
17280
17281         return err;
17282 }
17283
17284 static int tg3_resume(struct device *device)
17285 {
17286         struct pci_dev *pdev = to_pci_dev(device);
17287         struct net_device *dev = pci_get_drvdata(pdev);
17288         struct tg3 *tp = netdev_priv(dev);
17289         int err;
17290
17291         if (!netif_running(dev))
17292                 return 0;
17293
17294         netif_device_attach(dev);
17295
17296         tg3_full_lock(tp, 0);
17297
17298         tg3_flag_set(tp, INIT_COMPLETE);
17299         err = tg3_restart_hw(tp, 1);
17300         if (err)
17301                 goto out;
17302
17303         tg3_timer_start(tp);
17304
17305         tg3_netif_start(tp);
17306
17307 out:
17308         tg3_full_unlock(tp);
17309
17310         if (!err)
17311                 tg3_phy_start(tp);
17312
17313         return err;
17314 }
17315
17316 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17317 #define TG3_PM_OPS (&tg3_pm_ops)
17318
17319 #else
17320
17321 #define TG3_PM_OPS NULL
17322
17323 #endif /* CONFIG_PM_SLEEP */
17324
17325 /**
17326  * tg3_io_error_detected - called when PCI error is detected
17327  * @pdev: Pointer to PCI device
17328  * @state: The current pci connection state
17329  *
17330  * This function is called after a PCI bus error affecting
17331  * this device has been detected.
17332  */
17333 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17334                                               pci_channel_state_t state)
17335 {
17336         struct net_device *netdev = pci_get_drvdata(pdev);
17337         struct tg3 *tp = netdev_priv(netdev);
17338         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17339
17340         netdev_info(netdev, "PCI I/O error detected\n");
17341
17342         rtnl_lock();
17343
17344         if (!netif_running(netdev))
17345                 goto done;
17346
17347         tg3_phy_stop(tp);
17348
17349         tg3_netif_stop(tp);
17350
17351         tg3_timer_stop(tp);
17352
17353         /* Want to make sure that the reset task doesn't run */
17354         tg3_reset_task_cancel(tp);
17355
17356         netif_device_detach(netdev);
17357
17358         /* Clean up software state, even if MMIO is blocked */
17359         tg3_full_lock(tp, 0);
17360         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17361         tg3_full_unlock(tp);
17362
17363 done:
17364         if (state == pci_channel_io_perm_failure)
17365                 err = PCI_ERS_RESULT_DISCONNECT;
17366         else
17367                 pci_disable_device(pdev);
17368
17369         rtnl_unlock();
17370
17371         return err;
17372 }
17373
17374 /**
17375  * tg3_io_slot_reset - called after the pci bus has been reset.
17376  * @pdev: Pointer to PCI device
17377  *
17378  * Restart the card from scratch, as if from a cold-boot.
17379  * At this point, the card has exprienced a hard reset,
17380  * followed by fixups by BIOS, and has its config space
17381  * set up identically to what it was at cold boot.
17382  */
17383 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17384 {
17385         struct net_device *netdev = pci_get_drvdata(pdev);
17386         struct tg3 *tp = netdev_priv(netdev);
17387         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17388         int err;
17389
17390         rtnl_lock();
17391
17392         if (pci_enable_device(pdev)) {
17393                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17394                 goto done;
17395         }
17396
17397         pci_set_master(pdev);
17398         pci_restore_state(pdev);
17399         pci_save_state(pdev);
17400
17401         if (!netif_running(netdev)) {
17402                 rc = PCI_ERS_RESULT_RECOVERED;
17403                 goto done;
17404         }
17405
17406         err = tg3_power_up(tp);
17407         if (err)
17408                 goto done;
17409
17410         rc = PCI_ERS_RESULT_RECOVERED;
17411
17412 done:
17413         rtnl_unlock();
17414
17415         return rc;
17416 }
17417
17418 /**
17419  * tg3_io_resume - called when traffic can start flowing again.
17420  * @pdev: Pointer to PCI device
17421  *
17422  * This callback is called when the error recovery driver tells
17423  * us that its OK to resume normal operation.
17424  */
17425 static void tg3_io_resume(struct pci_dev *pdev)
17426 {
17427         struct net_device *netdev = pci_get_drvdata(pdev);
17428         struct tg3 *tp = netdev_priv(netdev);
17429         int err;
17430
17431         rtnl_lock();
17432
17433         if (!netif_running(netdev))
17434                 goto done;
17435
17436         tg3_full_lock(tp, 0);
17437         tg3_flag_set(tp, INIT_COMPLETE);
17438         err = tg3_restart_hw(tp, 1);
17439         if (err) {
17440                 tg3_full_unlock(tp);
17441                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17442                 goto done;
17443         }
17444
17445         netif_device_attach(netdev);
17446
17447         tg3_timer_start(tp);
17448
17449         tg3_netif_start(tp);
17450
17451         tg3_full_unlock(tp);
17452
17453         tg3_phy_start(tp);
17454
17455 done:
17456         rtnl_unlock();
17457 }
17458
17459 static const struct pci_error_handlers tg3_err_handler = {
17460         .error_detected = tg3_io_error_detected,
17461         .slot_reset     = tg3_io_slot_reset,
17462         .resume         = tg3_io_resume
17463 };
17464
17465 static struct pci_driver tg3_driver = {
17466         .name           = DRV_MODULE_NAME,
17467         .id_table       = tg3_pci_tbl,
17468         .probe          = tg3_init_one,
17469         .remove         = tg3_remove_one,
17470         .err_handler    = &tg3_err_handler,
17471         .driver.pm      = TG3_PM_OPS,
17472 };
17473
17474 static int __init tg3_init(void)
17475 {
17476         return pci_register_driver(&tg3_driver);
17477 }
17478
17479 static void __exit tg3_cleanup(void)
17480 {
17481         pci_unregister_driver(&tg3_driver);
17482 }
17483
17484 module_init(tg3_init);
17485 module_exit(tg3_cleanup);