]> Pileus Git - ~andy/linux/blob - drivers/net/ethernet/broadcom/tg3.c
tg3: Remove unnecessary phy reset during ethtool commands
[~andy/linux] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2013 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50
51 #include <net/checksum.h>
52 #include <net/ip.h>
53
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65
66 #define BAR_0   0
67 #define BAR_2   2
68
69 #include "tg3.h"
70
71 /* Functions & macros to verify TG3_FLAGS types */
72
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         return test_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         set_bit(flag, bits);
81 }
82
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85         clear_bit(flag, bits);
86 }
87
88 #define tg3_flag(tp, flag)                              \
89         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag)                          \
91         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag)                        \
93         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94
95 #define DRV_MODULE_NAME         "tg3"
96 #define TG3_MAJ_NUM                     3
97 #define TG3_MIN_NUM                     130
98 #define DRV_MODULE_VERSION      \
99         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE      "February 14, 2013"
101
102 #define RESET_KIND_SHUTDOWN     0
103 #define RESET_KIND_INIT         1
104 #define RESET_KIND_SUSPEND      2
105
106 #define TG3_DEF_RX_MODE         0
107 #define TG3_DEF_TX_MODE         0
108 #define TG3_DEF_MSG_ENABLE        \
109         (NETIF_MSG_DRV          | \
110          NETIF_MSG_PROBE        | \
111          NETIF_MSG_LINK         | \
112          NETIF_MSG_TIMER        | \
113          NETIF_MSG_IFDOWN       | \
114          NETIF_MSG_IFUP         | \
115          NETIF_MSG_RX_ERR       | \
116          NETIF_MSG_TX_ERR)
117
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
119
120 /* length of time before we decide the hardware is borked,
121  * and dev->tx_timeout() should be called to fix the problem
122  */
123
124 #define TG3_TX_TIMEOUT                  (5 * HZ)
125
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU                     60
128 #define TG3_MAX_MTU(tp) \
129         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132  * You can't change the ring sizes, but you can change where you place
133  * them in the NIC onboard memory.
134  */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING         200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
143
144 /* Do not place this n-ring entries value into the tp struct itself,
145  * we really want to expose these constants to GCC so that modulo et
146  * al.  operations are done with shifts and masks instead of with
147  * hw multiply/modulo instructions.  Another solution would be to
148  * replace things like '% foo' with '& (foo - 1)'.
149  */
150
151 #define TG3_TX_RING_SIZE                512
152 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
153
154 #define TG3_RX_STD_RING_BYTES(tp) \
155         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
161                                  TG3_TX_RING_SIZE)
162 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
163
164 #define TG3_DMA_BYTE_ENAB               64
165
166 #define TG3_RX_STD_DMA_SZ               1536
167 #define TG3_RX_JMB_DMA_SZ               9046
168
169 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
170
171 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181  * that are at least dword aligned when used in PCIX mode.  The driver
182  * works around this bug by double copying the packet.  This workaround
183  * is built into the normal double copy length check for efficiency.
184  *
185  * However, the double copy is only necessary on those architectures
186  * where unaligned memory accesses are inefficient.  For those architectures
187  * where unaligned memory accesses incur little penalty, we can reintegrate
188  * the 5701 in the normal rx path.  Doing so saves a device structure
189  * dereference by hardcoding the double copy threshold in place.
190  */
191 #define TG3_RX_COPY_THRESHOLD           256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
194 #else
195         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
196 #endif
197
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
202 #endif
203
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K            2048
207 #define TG3_TX_BD_DMA_MAX_4K            4096
208
209 #define TG3_RAW_IP_ALIGN 2
210
211 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
212 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213
214 #define FIRMWARE_TG3            "tigon/tg3.bin"
215 #define FIRMWARE_TG357766       "tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
218
219 static char version[] =
220         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
221
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
229
230 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
233
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
236
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258                         TG3_DRV_DATA_FLAG_5705_10_100},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261                         TG3_DRV_DATA_FLAG_5705_10_100},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265                         TG3_DRV_DATA_FLAG_5705_10_100},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287                         PCI_VENDOR_ID_LENOVO,
288                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
341         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
342         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
343         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
344         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
345         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
346         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
347         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
348         {}
349 };
350
351 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
352
353 static const struct {
354         const char string[ETH_GSTRING_LEN];
355 } ethtool_stats_keys[] = {
356         { "rx_octets" },
357         { "rx_fragments" },
358         { "rx_ucast_packets" },
359         { "rx_mcast_packets" },
360         { "rx_bcast_packets" },
361         { "rx_fcs_errors" },
362         { "rx_align_errors" },
363         { "rx_xon_pause_rcvd" },
364         { "rx_xoff_pause_rcvd" },
365         { "rx_mac_ctrl_rcvd" },
366         { "rx_xoff_entered" },
367         { "rx_frame_too_long_errors" },
368         { "rx_jabbers" },
369         { "rx_undersize_packets" },
370         { "rx_in_length_errors" },
371         { "rx_out_length_errors" },
372         { "rx_64_or_less_octet_packets" },
373         { "rx_65_to_127_octet_packets" },
374         { "rx_128_to_255_octet_packets" },
375         { "rx_256_to_511_octet_packets" },
376         { "rx_512_to_1023_octet_packets" },
377         { "rx_1024_to_1522_octet_packets" },
378         { "rx_1523_to_2047_octet_packets" },
379         { "rx_2048_to_4095_octet_packets" },
380         { "rx_4096_to_8191_octet_packets" },
381         { "rx_8192_to_9022_octet_packets" },
382
383         { "tx_octets" },
384         { "tx_collisions" },
385
386         { "tx_xon_sent" },
387         { "tx_xoff_sent" },
388         { "tx_flow_control" },
389         { "tx_mac_errors" },
390         { "tx_single_collisions" },
391         { "tx_mult_collisions" },
392         { "tx_deferred" },
393         { "tx_excessive_collisions" },
394         { "tx_late_collisions" },
395         { "tx_collide_2times" },
396         { "tx_collide_3times" },
397         { "tx_collide_4times" },
398         { "tx_collide_5times" },
399         { "tx_collide_6times" },
400         { "tx_collide_7times" },
401         { "tx_collide_8times" },
402         { "tx_collide_9times" },
403         { "tx_collide_10times" },
404         { "tx_collide_11times" },
405         { "tx_collide_12times" },
406         { "tx_collide_13times" },
407         { "tx_collide_14times" },
408         { "tx_collide_15times" },
409         { "tx_ucast_packets" },
410         { "tx_mcast_packets" },
411         { "tx_bcast_packets" },
412         { "tx_carrier_sense_errors" },
413         { "tx_discards" },
414         { "tx_errors" },
415
416         { "dma_writeq_full" },
417         { "dma_write_prioq_full" },
418         { "rxbds_empty" },
419         { "rx_discards" },
420         { "rx_errors" },
421         { "rx_threshold_hit" },
422
423         { "dma_readq_full" },
424         { "dma_read_prioq_full" },
425         { "tx_comp_queue_full" },
426
427         { "ring_set_send_prod_index" },
428         { "ring_status_update" },
429         { "nic_irqs" },
430         { "nic_avoided_irqs" },
431         { "nic_tx_threshold_hit" },
432
433         { "mbuf_lwm_thresh_hit" },
434 };
435
436 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST          0
438 #define TG3_LINK_TEST           1
439 #define TG3_REGISTER_TEST       2
440 #define TG3_MEMORY_TEST         3
441 #define TG3_MAC_LOOPB_TEST      4
442 #define TG3_PHY_LOOPB_TEST      5
443 #define TG3_EXT_LOOPB_TEST      6
444 #define TG3_INTERRUPT_TEST      7
445
446
447 static const struct {
448         const char string[ETH_GSTRING_LEN];
449 } ethtool_test_keys[] = {
450         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
451         [TG3_LINK_TEST]         = { "link test         (online) " },
452         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
453         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
454         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
455         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
456         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
457         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
458 };
459
460 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
461
462
463 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
464 {
465         writel(val, tp->regs + off);
466 }
467
468 static u32 tg3_read32(struct tg3 *tp, u32 off)
469 {
470         return readl(tp->regs + off);
471 }
472
473 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
474 {
475         writel(val, tp->aperegs + off);
476 }
477
478 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
479 {
480         return readl(tp->aperegs + off);
481 }
482
483 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
484 {
485         unsigned long flags;
486
487         spin_lock_irqsave(&tp->indirect_lock, flags);
488         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
489         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
490         spin_unlock_irqrestore(&tp->indirect_lock, flags);
491 }
492
493 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
494 {
495         writel(val, tp->regs + off);
496         readl(tp->regs + off);
497 }
498
499 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
500 {
501         unsigned long flags;
502         u32 val;
503
504         spin_lock_irqsave(&tp->indirect_lock, flags);
505         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
506         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507         spin_unlock_irqrestore(&tp->indirect_lock, flags);
508         return val;
509 }
510
511 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
512 {
513         unsigned long flags;
514
515         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
516                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
517                                        TG3_64BIT_REG_LOW, val);
518                 return;
519         }
520         if (off == TG3_RX_STD_PROD_IDX_REG) {
521                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
522                                        TG3_64BIT_REG_LOW, val);
523                 return;
524         }
525
526         spin_lock_irqsave(&tp->indirect_lock, flags);
527         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
528         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
529         spin_unlock_irqrestore(&tp->indirect_lock, flags);
530
531         /* In indirect mode when disabling interrupts, we also need
532          * to clear the interrupt bit in the GRC local ctrl register.
533          */
534         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
535             (val == 0x1)) {
536                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
537                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
538         }
539 }
540
541 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
542 {
543         unsigned long flags;
544         u32 val;
545
546         spin_lock_irqsave(&tp->indirect_lock, flags);
547         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
548         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
549         spin_unlock_irqrestore(&tp->indirect_lock, flags);
550         return val;
551 }
552
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554  * where it is unsafe to read back the register without some delay.
555  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
557  */
558 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
559 {
560         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
561                 /* Non-posted methods */
562                 tp->write32(tp, off, val);
563         else {
564                 /* Posted method */
565                 tg3_write32(tp, off, val);
566                 if (usec_wait)
567                         udelay(usec_wait);
568                 tp->read32(tp, off);
569         }
570         /* Wait again after the read for the posted method to guarantee that
571          * the wait time is met.
572          */
573         if (usec_wait)
574                 udelay(usec_wait);
575 }
576
577 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
578 {
579         tp->write32_mbox(tp, off, val);
580         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
581             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
582              !tg3_flag(tp, ICH_WORKAROUND)))
583                 tp->read32_mbox(tp, off);
584 }
585
586 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
587 {
588         void __iomem *mbox = tp->regs + off;
589         writel(val, mbox);
590         if (tg3_flag(tp, TXD_MBOX_HWBUG))
591                 writel(val, mbox);
592         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
593             tg3_flag(tp, FLUSH_POSTED_WRITES))
594                 readl(mbox);
595 }
596
597 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
598 {
599         return readl(tp->regs + off + GRCMBOX_BASE);
600 }
601
602 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
603 {
604         writel(val, tp->regs + off + GRCMBOX_BASE);
605 }
606
607 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
612
613 #define tw32(reg, val)                  tp->write32(tp, reg, val)
614 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg)                       tp->read32(tp, reg)
617
618 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
619 {
620         unsigned long flags;
621
622         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
623             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
624                 return;
625
626         spin_lock_irqsave(&tp->indirect_lock, flags);
627         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
628                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
629                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
630
631                 /* Always leave this as zero. */
632                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
633         } else {
634                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
635                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
636
637                 /* Always leave this as zero. */
638                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
639         }
640         spin_unlock_irqrestore(&tp->indirect_lock, flags);
641 }
642
643 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
644 {
645         unsigned long flags;
646
647         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
648             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
649                 *val = 0;
650                 return;
651         }
652
653         spin_lock_irqsave(&tp->indirect_lock, flags);
654         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
655                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
656                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
657
658                 /* Always leave this as zero. */
659                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
660         } else {
661                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
662                 *val = tr32(TG3PCI_MEM_WIN_DATA);
663
664                 /* Always leave this as zero. */
665                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
666         }
667         spin_unlock_irqrestore(&tp->indirect_lock, flags);
668 }
669
670 static void tg3_ape_lock_init(struct tg3 *tp)
671 {
672         int i;
673         u32 regbase, bit;
674
675         if (tg3_asic_rev(tp) == ASIC_REV_5761)
676                 regbase = TG3_APE_LOCK_GRANT;
677         else
678                 regbase = TG3_APE_PER_LOCK_GRANT;
679
680         /* Make sure the driver hasn't any stale locks. */
681         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
682                 switch (i) {
683                 case TG3_APE_LOCK_PHY0:
684                 case TG3_APE_LOCK_PHY1:
685                 case TG3_APE_LOCK_PHY2:
686                 case TG3_APE_LOCK_PHY3:
687                         bit = APE_LOCK_GRANT_DRIVER;
688                         break;
689                 default:
690                         if (!tp->pci_fn)
691                                 bit = APE_LOCK_GRANT_DRIVER;
692                         else
693                                 bit = 1 << tp->pci_fn;
694                 }
695                 tg3_ape_write32(tp, regbase + 4 * i, bit);
696         }
697
698 }
699
700 static int tg3_ape_lock(struct tg3 *tp, int locknum)
701 {
702         int i, off;
703         int ret = 0;
704         u32 status, req, gnt, bit;
705
706         if (!tg3_flag(tp, ENABLE_APE))
707                 return 0;
708
709         switch (locknum) {
710         case TG3_APE_LOCK_GPIO:
711                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
712                         return 0;
713         case TG3_APE_LOCK_GRC:
714         case TG3_APE_LOCK_MEM:
715                 if (!tp->pci_fn)
716                         bit = APE_LOCK_REQ_DRIVER;
717                 else
718                         bit = 1 << tp->pci_fn;
719                 break;
720         case TG3_APE_LOCK_PHY0:
721         case TG3_APE_LOCK_PHY1:
722         case TG3_APE_LOCK_PHY2:
723         case TG3_APE_LOCK_PHY3:
724                 bit = APE_LOCK_REQ_DRIVER;
725                 break;
726         default:
727                 return -EINVAL;
728         }
729
730         if (tg3_asic_rev(tp) == ASIC_REV_5761) {
731                 req = TG3_APE_LOCK_REQ;
732                 gnt = TG3_APE_LOCK_GRANT;
733         } else {
734                 req = TG3_APE_PER_LOCK_REQ;
735                 gnt = TG3_APE_PER_LOCK_GRANT;
736         }
737
738         off = 4 * locknum;
739
740         tg3_ape_write32(tp, req + off, bit);
741
742         /* Wait for up to 1 millisecond to acquire lock. */
743         for (i = 0; i < 100; i++) {
744                 status = tg3_ape_read32(tp, gnt + off);
745                 if (status == bit)
746                         break;
747                 udelay(10);
748         }
749
750         if (status != bit) {
751                 /* Revoke the lock request. */
752                 tg3_ape_write32(tp, gnt + off, bit);
753                 ret = -EBUSY;
754         }
755
756         return ret;
757 }
758
759 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
760 {
761         u32 gnt, bit;
762
763         if (!tg3_flag(tp, ENABLE_APE))
764                 return;
765
766         switch (locknum) {
767         case TG3_APE_LOCK_GPIO:
768                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
769                         return;
770         case TG3_APE_LOCK_GRC:
771         case TG3_APE_LOCK_MEM:
772                 if (!tp->pci_fn)
773                         bit = APE_LOCK_GRANT_DRIVER;
774                 else
775                         bit = 1 << tp->pci_fn;
776                 break;
777         case TG3_APE_LOCK_PHY0:
778         case TG3_APE_LOCK_PHY1:
779         case TG3_APE_LOCK_PHY2:
780         case TG3_APE_LOCK_PHY3:
781                 bit = APE_LOCK_GRANT_DRIVER;
782                 break;
783         default:
784                 return;
785         }
786
787         if (tg3_asic_rev(tp) == ASIC_REV_5761)
788                 gnt = TG3_APE_LOCK_GRANT;
789         else
790                 gnt = TG3_APE_PER_LOCK_GRANT;
791
792         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
793 }
794
795 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
796 {
797         u32 apedata;
798
799         while (timeout_us) {
800                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
801                         return -EBUSY;
802
803                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
804                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
805                         break;
806
807                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
808
809                 udelay(10);
810                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
811         }
812
813         return timeout_us ? 0 : -EBUSY;
814 }
815
816 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
817 {
818         u32 i, apedata;
819
820         for (i = 0; i < timeout_us / 10; i++) {
821                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
822
823                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
824                         break;
825
826                 udelay(10);
827         }
828
829         return i == timeout_us / 10;
830 }
831
832 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
833                                    u32 len)
834 {
835         int err;
836         u32 i, bufoff, msgoff, maxlen, apedata;
837
838         if (!tg3_flag(tp, APE_HAS_NCSI))
839                 return 0;
840
841         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
842         if (apedata != APE_SEG_SIG_MAGIC)
843                 return -ENODEV;
844
845         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
846         if (!(apedata & APE_FW_STATUS_READY))
847                 return -EAGAIN;
848
849         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
850                  TG3_APE_SHMEM_BASE;
851         msgoff = bufoff + 2 * sizeof(u32);
852         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
853
854         while (len) {
855                 u32 length;
856
857                 /* Cap xfer sizes to scratchpad limits. */
858                 length = (len > maxlen) ? maxlen : len;
859                 len -= length;
860
861                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
862                 if (!(apedata & APE_FW_STATUS_READY))
863                         return -EAGAIN;
864
865                 /* Wait for up to 1 msec for APE to service previous event. */
866                 err = tg3_ape_event_lock(tp, 1000);
867                 if (err)
868                         return err;
869
870                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
871                           APE_EVENT_STATUS_SCRTCHPD_READ |
872                           APE_EVENT_STATUS_EVENT_PENDING;
873                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
874
875                 tg3_ape_write32(tp, bufoff, base_off);
876                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
877
878                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
879                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
880
881                 base_off += length;
882
883                 if (tg3_ape_wait_for_event(tp, 30000))
884                         return -EAGAIN;
885
886                 for (i = 0; length; i += 4, length -= 4) {
887                         u32 val = tg3_ape_read32(tp, msgoff + i);
888                         memcpy(data, &val, sizeof(u32));
889                         data++;
890                 }
891         }
892
893         return 0;
894 }
895
896 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
897 {
898         int err;
899         u32 apedata;
900
901         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
902         if (apedata != APE_SEG_SIG_MAGIC)
903                 return -EAGAIN;
904
905         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
906         if (!(apedata & APE_FW_STATUS_READY))
907                 return -EAGAIN;
908
909         /* Wait for up to 1 millisecond for APE to service previous event. */
910         err = tg3_ape_event_lock(tp, 1000);
911         if (err)
912                 return err;
913
914         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
915                         event | APE_EVENT_STATUS_EVENT_PENDING);
916
917         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
918         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
919
920         return 0;
921 }
922
923 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
924 {
925         u32 event;
926         u32 apedata;
927
928         if (!tg3_flag(tp, ENABLE_APE))
929                 return;
930
931         switch (kind) {
932         case RESET_KIND_INIT:
933                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
934                                 APE_HOST_SEG_SIG_MAGIC);
935                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
936                                 APE_HOST_SEG_LEN_MAGIC);
937                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
938                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
939                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
940                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
941                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
942                                 APE_HOST_BEHAV_NO_PHYLOCK);
943                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
944                                     TG3_APE_HOST_DRVR_STATE_START);
945
946                 event = APE_EVENT_STATUS_STATE_START;
947                 break;
948         case RESET_KIND_SHUTDOWN:
949                 /* With the interface we are currently using,
950                  * APE does not track driver state.  Wiping
951                  * out the HOST SEGMENT SIGNATURE forces
952                  * the APE to assume OS absent status.
953                  */
954                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
955
956                 if (device_may_wakeup(&tp->pdev->dev) &&
957                     tg3_flag(tp, WOL_ENABLE)) {
958                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
959                                             TG3_APE_HOST_WOL_SPEED_AUTO);
960                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
961                 } else
962                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
963
964                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
965
966                 event = APE_EVENT_STATUS_STATE_UNLOAD;
967                 break;
968         case RESET_KIND_SUSPEND:
969                 event = APE_EVENT_STATUS_STATE_SUSPEND;
970                 break;
971         default:
972                 return;
973         }
974
975         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
976
977         tg3_ape_send_event(tp, event);
978 }
979
980 static void tg3_disable_ints(struct tg3 *tp)
981 {
982         int i;
983
984         tw32(TG3PCI_MISC_HOST_CTRL,
985              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
986         for (i = 0; i < tp->irq_max; i++)
987                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
988 }
989
990 static void tg3_enable_ints(struct tg3 *tp)
991 {
992         int i;
993
994         tp->irq_sync = 0;
995         wmb();
996
997         tw32(TG3PCI_MISC_HOST_CTRL,
998              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
999
1000         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1001         for (i = 0; i < tp->irq_cnt; i++) {
1002                 struct tg3_napi *tnapi = &tp->napi[i];
1003
1004                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1005                 if (tg3_flag(tp, 1SHOT_MSI))
1006                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1007
1008                 tp->coal_now |= tnapi->coal_now;
1009         }
1010
1011         /* Force an initial interrupt */
1012         if (!tg3_flag(tp, TAGGED_STATUS) &&
1013             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1014                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1015         else
1016                 tw32(HOSTCC_MODE, tp->coal_now);
1017
1018         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1019 }
1020
1021 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1022 {
1023         struct tg3 *tp = tnapi->tp;
1024         struct tg3_hw_status *sblk = tnapi->hw_status;
1025         unsigned int work_exists = 0;
1026
1027         /* check for phy events */
1028         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1029                 if (sblk->status & SD_STATUS_LINK_CHG)
1030                         work_exists = 1;
1031         }
1032
1033         /* check for TX work to do */
1034         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1035                 work_exists = 1;
1036
1037         /* check for RX work to do */
1038         if (tnapi->rx_rcb_prod_idx &&
1039             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1040                 work_exists = 1;
1041
1042         return work_exists;
1043 }
1044
1045 /* tg3_int_reenable
1046  *  similar to tg3_enable_ints, but it accurately determines whether there
1047  *  is new work pending and can return without flushing the PIO write
1048  *  which reenables interrupts
1049  */
1050 static void tg3_int_reenable(struct tg3_napi *tnapi)
1051 {
1052         struct tg3 *tp = tnapi->tp;
1053
1054         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1055         mmiowb();
1056
1057         /* When doing tagged status, this work check is unnecessary.
1058          * The last_tag we write above tells the chip which piece of
1059          * work we've completed.
1060          */
1061         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1062                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1063                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1064 }
1065
1066 static void tg3_switch_clocks(struct tg3 *tp)
1067 {
1068         u32 clock_ctrl;
1069         u32 orig_clock_ctrl;
1070
1071         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1072                 return;
1073
1074         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1075
1076         orig_clock_ctrl = clock_ctrl;
1077         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1078                        CLOCK_CTRL_CLKRUN_OENABLE |
1079                        0x1f);
1080         tp->pci_clock_ctrl = clock_ctrl;
1081
1082         if (tg3_flag(tp, 5705_PLUS)) {
1083                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1084                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1085                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1086                 }
1087         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1088                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1089                             clock_ctrl |
1090                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1091                             40);
1092                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1093                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1094                             40);
1095         }
1096         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1097 }
1098
1099 #define PHY_BUSY_LOOPS  5000
1100
1101 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1102                          u32 *val)
1103 {
1104         u32 frame_val;
1105         unsigned int loops;
1106         int ret;
1107
1108         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1109                 tw32_f(MAC_MI_MODE,
1110                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1111                 udelay(80);
1112         }
1113
1114         tg3_ape_lock(tp, tp->phy_ape_lock);
1115
1116         *val = 0x0;
1117
1118         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1119                       MI_COM_PHY_ADDR_MASK);
1120         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1121                       MI_COM_REG_ADDR_MASK);
1122         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1123
1124         tw32_f(MAC_MI_COM, frame_val);
1125
1126         loops = PHY_BUSY_LOOPS;
1127         while (loops != 0) {
1128                 udelay(10);
1129                 frame_val = tr32(MAC_MI_COM);
1130
1131                 if ((frame_val & MI_COM_BUSY) == 0) {
1132                         udelay(5);
1133                         frame_val = tr32(MAC_MI_COM);
1134                         break;
1135                 }
1136                 loops -= 1;
1137         }
1138
1139         ret = -EBUSY;
1140         if (loops != 0) {
1141                 *val = frame_val & MI_COM_DATA_MASK;
1142                 ret = 0;
1143         }
1144
1145         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1146                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1147                 udelay(80);
1148         }
1149
1150         tg3_ape_unlock(tp, tp->phy_ape_lock);
1151
1152         return ret;
1153 }
1154
1155 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1156 {
1157         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1158 }
1159
1160 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1161                           u32 val)
1162 {
1163         u32 frame_val;
1164         unsigned int loops;
1165         int ret;
1166
1167         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1168             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1169                 return 0;
1170
1171         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1172                 tw32_f(MAC_MI_MODE,
1173                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1174                 udelay(80);
1175         }
1176
1177         tg3_ape_lock(tp, tp->phy_ape_lock);
1178
1179         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1180                       MI_COM_PHY_ADDR_MASK);
1181         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1182                       MI_COM_REG_ADDR_MASK);
1183         frame_val |= (val & MI_COM_DATA_MASK);
1184         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1185
1186         tw32_f(MAC_MI_COM, frame_val);
1187
1188         loops = PHY_BUSY_LOOPS;
1189         while (loops != 0) {
1190                 udelay(10);
1191                 frame_val = tr32(MAC_MI_COM);
1192                 if ((frame_val & MI_COM_BUSY) == 0) {
1193                         udelay(5);
1194                         frame_val = tr32(MAC_MI_COM);
1195                         break;
1196                 }
1197                 loops -= 1;
1198         }
1199
1200         ret = -EBUSY;
1201         if (loops != 0)
1202                 ret = 0;
1203
1204         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1205                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1206                 udelay(80);
1207         }
1208
1209         tg3_ape_unlock(tp, tp->phy_ape_lock);
1210
1211         return ret;
1212 }
1213
1214 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1215 {
1216         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1217 }
1218
1219 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1220 {
1221         int err;
1222
1223         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1224         if (err)
1225                 goto done;
1226
1227         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1228         if (err)
1229                 goto done;
1230
1231         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1232                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1233         if (err)
1234                 goto done;
1235
1236         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1237
1238 done:
1239         return err;
1240 }
1241
1242 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1243 {
1244         int err;
1245
1246         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1247         if (err)
1248                 goto done;
1249
1250         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1251         if (err)
1252                 goto done;
1253
1254         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1255                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1256         if (err)
1257                 goto done;
1258
1259         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1260
1261 done:
1262         return err;
1263 }
1264
1265 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1266 {
1267         int err;
1268
1269         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1270         if (!err)
1271                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1272
1273         return err;
1274 }
1275
1276 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1277 {
1278         int err;
1279
1280         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1281         if (!err)
1282                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1283
1284         return err;
1285 }
1286
1287 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1288 {
1289         int err;
1290
1291         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1292                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1293                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1294         if (!err)
1295                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1296
1297         return err;
1298 }
1299
1300 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1301 {
1302         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1303                 set |= MII_TG3_AUXCTL_MISC_WREN;
1304
1305         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1306 }
1307
1308 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1309 {
1310         u32 val;
1311         int err;
1312
1313         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1314
1315         if (err)
1316                 return err;
1317         if (enable)
1318
1319                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1320         else
1321                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1322
1323         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1324                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1325
1326         return err;
1327 }
1328
1329 static int tg3_bmcr_reset(struct tg3 *tp)
1330 {
1331         u32 phy_control;
1332         int limit, err;
1333
1334         /* OK, reset it, and poll the BMCR_RESET bit until it
1335          * clears or we time out.
1336          */
1337         phy_control = BMCR_RESET;
1338         err = tg3_writephy(tp, MII_BMCR, phy_control);
1339         if (err != 0)
1340                 return -EBUSY;
1341
1342         limit = 5000;
1343         while (limit--) {
1344                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1345                 if (err != 0)
1346                         return -EBUSY;
1347
1348                 if ((phy_control & BMCR_RESET) == 0) {
1349                         udelay(40);
1350                         break;
1351                 }
1352                 udelay(10);
1353         }
1354         if (limit < 0)
1355                 return -EBUSY;
1356
1357         return 0;
1358 }
1359
1360 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1361 {
1362         struct tg3 *tp = bp->priv;
1363         u32 val;
1364
1365         spin_lock_bh(&tp->lock);
1366
1367         if (tg3_readphy(tp, reg, &val))
1368                 val = -EIO;
1369
1370         spin_unlock_bh(&tp->lock);
1371
1372         return val;
1373 }
1374
1375 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1376 {
1377         struct tg3 *tp = bp->priv;
1378         u32 ret = 0;
1379
1380         spin_lock_bh(&tp->lock);
1381
1382         if (tg3_writephy(tp, reg, val))
1383                 ret = -EIO;
1384
1385         spin_unlock_bh(&tp->lock);
1386
1387         return ret;
1388 }
1389
1390 static int tg3_mdio_reset(struct mii_bus *bp)
1391 {
1392         return 0;
1393 }
1394
1395 static void tg3_mdio_config_5785(struct tg3 *tp)
1396 {
1397         u32 val;
1398         struct phy_device *phydev;
1399
1400         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1401         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1402         case PHY_ID_BCM50610:
1403         case PHY_ID_BCM50610M:
1404                 val = MAC_PHYCFG2_50610_LED_MODES;
1405                 break;
1406         case PHY_ID_BCMAC131:
1407                 val = MAC_PHYCFG2_AC131_LED_MODES;
1408                 break;
1409         case PHY_ID_RTL8211C:
1410                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1411                 break;
1412         case PHY_ID_RTL8201E:
1413                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1414                 break;
1415         default:
1416                 return;
1417         }
1418
1419         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1420                 tw32(MAC_PHYCFG2, val);
1421
1422                 val = tr32(MAC_PHYCFG1);
1423                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1424                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1425                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1426                 tw32(MAC_PHYCFG1, val);
1427
1428                 return;
1429         }
1430
1431         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1432                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1433                        MAC_PHYCFG2_FMODE_MASK_MASK |
1434                        MAC_PHYCFG2_GMODE_MASK_MASK |
1435                        MAC_PHYCFG2_ACT_MASK_MASK   |
1436                        MAC_PHYCFG2_QUAL_MASK_MASK |
1437                        MAC_PHYCFG2_INBAND_ENABLE;
1438
1439         tw32(MAC_PHYCFG2, val);
1440
1441         val = tr32(MAC_PHYCFG1);
1442         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1443                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1444         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1445                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1446                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1447                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1448                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1449         }
1450         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1451                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1452         tw32(MAC_PHYCFG1, val);
1453
1454         val = tr32(MAC_EXT_RGMII_MODE);
1455         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1456                  MAC_RGMII_MODE_RX_QUALITY |
1457                  MAC_RGMII_MODE_RX_ACTIVITY |
1458                  MAC_RGMII_MODE_RX_ENG_DET |
1459                  MAC_RGMII_MODE_TX_ENABLE |
1460                  MAC_RGMII_MODE_TX_LOWPWR |
1461                  MAC_RGMII_MODE_TX_RESET);
1462         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1463                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1464                         val |= MAC_RGMII_MODE_RX_INT_B |
1465                                MAC_RGMII_MODE_RX_QUALITY |
1466                                MAC_RGMII_MODE_RX_ACTIVITY |
1467                                MAC_RGMII_MODE_RX_ENG_DET;
1468                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1469                         val |= MAC_RGMII_MODE_TX_ENABLE |
1470                                MAC_RGMII_MODE_TX_LOWPWR |
1471                                MAC_RGMII_MODE_TX_RESET;
1472         }
1473         tw32(MAC_EXT_RGMII_MODE, val);
1474 }
1475
1476 static void tg3_mdio_start(struct tg3 *tp)
1477 {
1478         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1479         tw32_f(MAC_MI_MODE, tp->mi_mode);
1480         udelay(80);
1481
1482         if (tg3_flag(tp, MDIOBUS_INITED) &&
1483             tg3_asic_rev(tp) == ASIC_REV_5785)
1484                 tg3_mdio_config_5785(tp);
1485 }
1486
1487 static int tg3_mdio_init(struct tg3 *tp)
1488 {
1489         int i;
1490         u32 reg;
1491         struct phy_device *phydev;
1492
1493         if (tg3_flag(tp, 5717_PLUS)) {
1494                 u32 is_serdes;
1495
1496                 tp->phy_addr = tp->pci_fn + 1;
1497
1498                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1499                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1500                 else
1501                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1502                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1503                 if (is_serdes)
1504                         tp->phy_addr += 7;
1505         } else
1506                 tp->phy_addr = TG3_PHY_MII_ADDR;
1507
1508         tg3_mdio_start(tp);
1509
1510         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1511                 return 0;
1512
1513         tp->mdio_bus = mdiobus_alloc();
1514         if (tp->mdio_bus == NULL)
1515                 return -ENOMEM;
1516
1517         tp->mdio_bus->name     = "tg3 mdio bus";
1518         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1519                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1520         tp->mdio_bus->priv     = tp;
1521         tp->mdio_bus->parent   = &tp->pdev->dev;
1522         tp->mdio_bus->read     = &tg3_mdio_read;
1523         tp->mdio_bus->write    = &tg3_mdio_write;
1524         tp->mdio_bus->reset    = &tg3_mdio_reset;
1525         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1526         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1527
1528         for (i = 0; i < PHY_MAX_ADDR; i++)
1529                 tp->mdio_bus->irq[i] = PHY_POLL;
1530
1531         /* The bus registration will look for all the PHYs on the mdio bus.
1532          * Unfortunately, it does not ensure the PHY is powered up before
1533          * accessing the PHY ID registers.  A chip reset is the
1534          * quickest way to bring the device back to an operational state..
1535          */
1536         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1537                 tg3_bmcr_reset(tp);
1538
1539         i = mdiobus_register(tp->mdio_bus);
1540         if (i) {
1541                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1542                 mdiobus_free(tp->mdio_bus);
1543                 return i;
1544         }
1545
1546         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1547
1548         if (!phydev || !phydev->drv) {
1549                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1550                 mdiobus_unregister(tp->mdio_bus);
1551                 mdiobus_free(tp->mdio_bus);
1552                 return -ENODEV;
1553         }
1554
1555         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1556         case PHY_ID_BCM57780:
1557                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1558                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1559                 break;
1560         case PHY_ID_BCM50610:
1561         case PHY_ID_BCM50610M:
1562                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1563                                      PHY_BRCM_RX_REFCLK_UNUSED |
1564                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1565                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1566                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1567                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1568                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1569                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1570                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1571                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1572                 /* fallthru */
1573         case PHY_ID_RTL8211C:
1574                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1575                 break;
1576         case PHY_ID_RTL8201E:
1577         case PHY_ID_BCMAC131:
1578                 phydev->interface = PHY_INTERFACE_MODE_MII;
1579                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1580                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1581                 break;
1582         }
1583
1584         tg3_flag_set(tp, MDIOBUS_INITED);
1585
1586         if (tg3_asic_rev(tp) == ASIC_REV_5785)
1587                 tg3_mdio_config_5785(tp);
1588
1589         return 0;
1590 }
1591
1592 static void tg3_mdio_fini(struct tg3 *tp)
1593 {
1594         if (tg3_flag(tp, MDIOBUS_INITED)) {
1595                 tg3_flag_clear(tp, MDIOBUS_INITED);
1596                 mdiobus_unregister(tp->mdio_bus);
1597                 mdiobus_free(tp->mdio_bus);
1598         }
1599 }
1600
1601 /* tp->lock is held. */
1602 static inline void tg3_generate_fw_event(struct tg3 *tp)
1603 {
1604         u32 val;
1605
1606         val = tr32(GRC_RX_CPU_EVENT);
1607         val |= GRC_RX_CPU_DRIVER_EVENT;
1608         tw32_f(GRC_RX_CPU_EVENT, val);
1609
1610         tp->last_event_jiffies = jiffies;
1611 }
1612
1613 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1614
1615 /* tp->lock is held. */
1616 static void tg3_wait_for_event_ack(struct tg3 *tp)
1617 {
1618         int i;
1619         unsigned int delay_cnt;
1620         long time_remain;
1621
1622         /* If enough time has passed, no wait is necessary. */
1623         time_remain = (long)(tp->last_event_jiffies + 1 +
1624                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1625                       (long)jiffies;
1626         if (time_remain < 0)
1627                 return;
1628
1629         /* Check if we can shorten the wait time. */
1630         delay_cnt = jiffies_to_usecs(time_remain);
1631         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1632                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1633         delay_cnt = (delay_cnt >> 3) + 1;
1634
1635         for (i = 0; i < delay_cnt; i++) {
1636                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1637                         break;
1638                 udelay(8);
1639         }
1640 }
1641
1642 /* tp->lock is held. */
1643 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1644 {
1645         u32 reg, val;
1646
1647         val = 0;
1648         if (!tg3_readphy(tp, MII_BMCR, &reg))
1649                 val = reg << 16;
1650         if (!tg3_readphy(tp, MII_BMSR, &reg))
1651                 val |= (reg & 0xffff);
1652         *data++ = val;
1653
1654         val = 0;
1655         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1656                 val = reg << 16;
1657         if (!tg3_readphy(tp, MII_LPA, &reg))
1658                 val |= (reg & 0xffff);
1659         *data++ = val;
1660
1661         val = 0;
1662         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1663                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1664                         val = reg << 16;
1665                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1666                         val |= (reg & 0xffff);
1667         }
1668         *data++ = val;
1669
1670         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1671                 val = reg << 16;
1672         else
1673                 val = 0;
1674         *data++ = val;
1675 }
1676
1677 /* tp->lock is held. */
1678 static void tg3_ump_link_report(struct tg3 *tp)
1679 {
1680         u32 data[4];
1681
1682         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1683                 return;
1684
1685         tg3_phy_gather_ump_data(tp, data);
1686
1687         tg3_wait_for_event_ack(tp);
1688
1689         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1690         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1691         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1692         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1693         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1694         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1695
1696         tg3_generate_fw_event(tp);
1697 }
1698
1699 /* tp->lock is held. */
1700 static void tg3_stop_fw(struct tg3 *tp)
1701 {
1702         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1703                 /* Wait for RX cpu to ACK the previous event. */
1704                 tg3_wait_for_event_ack(tp);
1705
1706                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1707
1708                 tg3_generate_fw_event(tp);
1709
1710                 /* Wait for RX cpu to ACK this event. */
1711                 tg3_wait_for_event_ack(tp);
1712         }
1713 }
1714
1715 /* tp->lock is held. */
1716 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1717 {
1718         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1719                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1720
1721         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1722                 switch (kind) {
1723                 case RESET_KIND_INIT:
1724                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1725                                       DRV_STATE_START);
1726                         break;
1727
1728                 case RESET_KIND_SHUTDOWN:
1729                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1730                                       DRV_STATE_UNLOAD);
1731                         break;
1732
1733                 case RESET_KIND_SUSPEND:
1734                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1735                                       DRV_STATE_SUSPEND);
1736                         break;
1737
1738                 default:
1739                         break;
1740                 }
1741         }
1742
1743         if (kind == RESET_KIND_INIT ||
1744             kind == RESET_KIND_SUSPEND)
1745                 tg3_ape_driver_state_change(tp, kind);
1746 }
1747
1748 /* tp->lock is held. */
1749 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1750 {
1751         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1752                 switch (kind) {
1753                 case RESET_KIND_INIT:
1754                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1755                                       DRV_STATE_START_DONE);
1756                         break;
1757
1758                 case RESET_KIND_SHUTDOWN:
1759                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1760                                       DRV_STATE_UNLOAD_DONE);
1761                         break;
1762
1763                 default:
1764                         break;
1765                 }
1766         }
1767
1768         if (kind == RESET_KIND_SHUTDOWN)
1769                 tg3_ape_driver_state_change(tp, kind);
1770 }
1771
1772 /* tp->lock is held. */
1773 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1774 {
1775         if (tg3_flag(tp, ENABLE_ASF)) {
1776                 switch (kind) {
1777                 case RESET_KIND_INIT:
1778                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1779                                       DRV_STATE_START);
1780                         break;
1781
1782                 case RESET_KIND_SHUTDOWN:
1783                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1784                                       DRV_STATE_UNLOAD);
1785                         break;
1786
1787                 case RESET_KIND_SUSPEND:
1788                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1789                                       DRV_STATE_SUSPEND);
1790                         break;
1791
1792                 default:
1793                         break;
1794                 }
1795         }
1796 }
1797
1798 static int tg3_poll_fw(struct tg3 *tp)
1799 {
1800         int i;
1801         u32 val;
1802
1803         if (tg3_flag(tp, IS_SSB_CORE)) {
1804                 /* We don't use firmware. */
1805                 return 0;
1806         }
1807
1808         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1809                 /* Wait up to 20ms for init done. */
1810                 for (i = 0; i < 200; i++) {
1811                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1812                                 return 0;
1813                         udelay(100);
1814                 }
1815                 return -ENODEV;
1816         }
1817
1818         /* Wait for firmware initialization to complete. */
1819         for (i = 0; i < 100000; i++) {
1820                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1821                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1822                         break;
1823                 udelay(10);
1824         }
1825
1826         /* Chip might not be fitted with firmware.  Some Sun onboard
1827          * parts are configured like that.  So don't signal the timeout
1828          * of the above loop as an error, but do report the lack of
1829          * running firmware once.
1830          */
1831         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1832                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1833
1834                 netdev_info(tp->dev, "No firmware running\n");
1835         }
1836
1837         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1838                 /* The 57765 A0 needs a little more
1839                  * time to do some important work.
1840                  */
1841                 mdelay(10);
1842         }
1843
1844         return 0;
1845 }
1846
1847 static void tg3_link_report(struct tg3 *tp)
1848 {
1849         if (!netif_carrier_ok(tp->dev)) {
1850                 netif_info(tp, link, tp->dev, "Link is down\n");
1851                 tg3_ump_link_report(tp);
1852         } else if (netif_msg_link(tp)) {
1853                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1854                             (tp->link_config.active_speed == SPEED_1000 ?
1855                              1000 :
1856                              (tp->link_config.active_speed == SPEED_100 ?
1857                               100 : 10)),
1858                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1859                              "full" : "half"));
1860
1861                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1862                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1863                             "on" : "off",
1864                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1865                             "on" : "off");
1866
1867                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1868                         netdev_info(tp->dev, "EEE is %s\n",
1869                                     tp->setlpicnt ? "enabled" : "disabled");
1870
1871                 tg3_ump_link_report(tp);
1872         }
1873
1874         tp->link_up = netif_carrier_ok(tp->dev);
1875 }
1876
1877 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1878 {
1879         u16 miireg;
1880
1881         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1882                 miireg = ADVERTISE_1000XPAUSE;
1883         else if (flow_ctrl & FLOW_CTRL_TX)
1884                 miireg = ADVERTISE_1000XPSE_ASYM;
1885         else if (flow_ctrl & FLOW_CTRL_RX)
1886                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1887         else
1888                 miireg = 0;
1889
1890         return miireg;
1891 }
1892
1893 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1894 {
1895         u8 cap = 0;
1896
1897         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1898                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1899         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1900                 if (lcladv & ADVERTISE_1000XPAUSE)
1901                         cap = FLOW_CTRL_RX;
1902                 if (rmtadv & ADVERTISE_1000XPAUSE)
1903                         cap = FLOW_CTRL_TX;
1904         }
1905
1906         return cap;
1907 }
1908
1909 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1910 {
1911         u8 autoneg;
1912         u8 flowctrl = 0;
1913         u32 old_rx_mode = tp->rx_mode;
1914         u32 old_tx_mode = tp->tx_mode;
1915
1916         if (tg3_flag(tp, USE_PHYLIB))
1917                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1918         else
1919                 autoneg = tp->link_config.autoneg;
1920
1921         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1922                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1923                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1924                 else
1925                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1926         } else
1927                 flowctrl = tp->link_config.flowctrl;
1928
1929         tp->link_config.active_flowctrl = flowctrl;
1930
1931         if (flowctrl & FLOW_CTRL_RX)
1932                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1933         else
1934                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1935
1936         if (old_rx_mode != tp->rx_mode)
1937                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1938
1939         if (flowctrl & FLOW_CTRL_TX)
1940                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1941         else
1942                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1943
1944         if (old_tx_mode != tp->tx_mode)
1945                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1946 }
1947
1948 static void tg3_adjust_link(struct net_device *dev)
1949 {
1950         u8 oldflowctrl, linkmesg = 0;
1951         u32 mac_mode, lcl_adv, rmt_adv;
1952         struct tg3 *tp = netdev_priv(dev);
1953         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1954
1955         spin_lock_bh(&tp->lock);
1956
1957         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1958                                     MAC_MODE_HALF_DUPLEX);
1959
1960         oldflowctrl = tp->link_config.active_flowctrl;
1961
1962         if (phydev->link) {
1963                 lcl_adv = 0;
1964                 rmt_adv = 0;
1965
1966                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1967                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1968                 else if (phydev->speed == SPEED_1000 ||
1969                          tg3_asic_rev(tp) != ASIC_REV_5785)
1970                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1971                 else
1972                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1973
1974                 if (phydev->duplex == DUPLEX_HALF)
1975                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1976                 else {
1977                         lcl_adv = mii_advertise_flowctrl(
1978                                   tp->link_config.flowctrl);
1979
1980                         if (phydev->pause)
1981                                 rmt_adv = LPA_PAUSE_CAP;
1982                         if (phydev->asym_pause)
1983                                 rmt_adv |= LPA_PAUSE_ASYM;
1984                 }
1985
1986                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1987         } else
1988                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1989
1990         if (mac_mode != tp->mac_mode) {
1991                 tp->mac_mode = mac_mode;
1992                 tw32_f(MAC_MODE, tp->mac_mode);
1993                 udelay(40);
1994         }
1995
1996         if (tg3_asic_rev(tp) == ASIC_REV_5785) {
1997                 if (phydev->speed == SPEED_10)
1998                         tw32(MAC_MI_STAT,
1999                              MAC_MI_STAT_10MBPS_MODE |
2000                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2001                 else
2002                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2003         }
2004
2005         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2006                 tw32(MAC_TX_LENGTHS,
2007                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2008                       (6 << TX_LENGTHS_IPG_SHIFT) |
2009                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2010         else
2011                 tw32(MAC_TX_LENGTHS,
2012                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2013                       (6 << TX_LENGTHS_IPG_SHIFT) |
2014                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2015
2016         if (phydev->link != tp->old_link ||
2017             phydev->speed != tp->link_config.active_speed ||
2018             phydev->duplex != tp->link_config.active_duplex ||
2019             oldflowctrl != tp->link_config.active_flowctrl)
2020                 linkmesg = 1;
2021
2022         tp->old_link = phydev->link;
2023         tp->link_config.active_speed = phydev->speed;
2024         tp->link_config.active_duplex = phydev->duplex;
2025
2026         spin_unlock_bh(&tp->lock);
2027
2028         if (linkmesg)
2029                 tg3_link_report(tp);
2030 }
2031
2032 static int tg3_phy_init(struct tg3 *tp)
2033 {
2034         struct phy_device *phydev;
2035
2036         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2037                 return 0;
2038
2039         /* Bring the PHY back to a known state. */
2040         tg3_bmcr_reset(tp);
2041
2042         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2043
2044         /* Attach the MAC to the PHY. */
2045         phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2046                              tg3_adjust_link, phydev->interface);
2047         if (IS_ERR(phydev)) {
2048                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2049                 return PTR_ERR(phydev);
2050         }
2051
2052         /* Mask with MAC supported features. */
2053         switch (phydev->interface) {
2054         case PHY_INTERFACE_MODE_GMII:
2055         case PHY_INTERFACE_MODE_RGMII:
2056                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2057                         phydev->supported &= (PHY_GBIT_FEATURES |
2058                                               SUPPORTED_Pause |
2059                                               SUPPORTED_Asym_Pause);
2060                         break;
2061                 }
2062                 /* fallthru */
2063         case PHY_INTERFACE_MODE_MII:
2064                 phydev->supported &= (PHY_BASIC_FEATURES |
2065                                       SUPPORTED_Pause |
2066                                       SUPPORTED_Asym_Pause);
2067                 break;
2068         default:
2069                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2070                 return -EINVAL;
2071         }
2072
2073         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2074
2075         phydev->advertising = phydev->supported;
2076
2077         return 0;
2078 }
2079
2080 static void tg3_phy_start(struct tg3 *tp)
2081 {
2082         struct phy_device *phydev;
2083
2084         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2085                 return;
2086
2087         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2088
2089         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2090                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2091                 phydev->speed = tp->link_config.speed;
2092                 phydev->duplex = tp->link_config.duplex;
2093                 phydev->autoneg = tp->link_config.autoneg;
2094                 phydev->advertising = tp->link_config.advertising;
2095         }
2096
2097         phy_start(phydev);
2098
2099         phy_start_aneg(phydev);
2100 }
2101
2102 static void tg3_phy_stop(struct tg3 *tp)
2103 {
2104         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2105                 return;
2106
2107         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2108 }
2109
2110 static void tg3_phy_fini(struct tg3 *tp)
2111 {
2112         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2113                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2114                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2115         }
2116 }
2117
2118 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2119 {
2120         int err;
2121         u32 val;
2122
2123         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2124                 return 0;
2125
2126         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2127                 /* Cannot do read-modify-write on 5401 */
2128                 err = tg3_phy_auxctl_write(tp,
2129                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2130                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2131                                            0x4c20);
2132                 goto done;
2133         }
2134
2135         err = tg3_phy_auxctl_read(tp,
2136                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2137         if (err)
2138                 return err;
2139
2140         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2141         err = tg3_phy_auxctl_write(tp,
2142                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2143
2144 done:
2145         return err;
2146 }
2147
2148 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2149 {
2150         u32 phytest;
2151
2152         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2153                 u32 phy;
2154
2155                 tg3_writephy(tp, MII_TG3_FET_TEST,
2156                              phytest | MII_TG3_FET_SHADOW_EN);
2157                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2158                         if (enable)
2159                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2160                         else
2161                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2162                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2163                 }
2164                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2165         }
2166 }
2167
2168 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2169 {
2170         u32 reg;
2171
2172         if (!tg3_flag(tp, 5705_PLUS) ||
2173             (tg3_flag(tp, 5717_PLUS) &&
2174              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2175                 return;
2176
2177         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2178                 tg3_phy_fet_toggle_apd(tp, enable);
2179                 return;
2180         }
2181
2182         reg = MII_TG3_MISC_SHDW_WREN |
2183               MII_TG3_MISC_SHDW_SCR5_SEL |
2184               MII_TG3_MISC_SHDW_SCR5_LPED |
2185               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2186               MII_TG3_MISC_SHDW_SCR5_SDTL |
2187               MII_TG3_MISC_SHDW_SCR5_C125OE;
2188         if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2189                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2190
2191         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2192
2193
2194         reg = MII_TG3_MISC_SHDW_WREN |
2195               MII_TG3_MISC_SHDW_APD_SEL |
2196               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2197         if (enable)
2198                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2199
2200         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2201 }
2202
2203 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2204 {
2205         u32 phy;
2206
2207         if (!tg3_flag(tp, 5705_PLUS) ||
2208             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2209                 return;
2210
2211         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2212                 u32 ephy;
2213
2214                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2215                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2216
2217                         tg3_writephy(tp, MII_TG3_FET_TEST,
2218                                      ephy | MII_TG3_FET_SHADOW_EN);
2219                         if (!tg3_readphy(tp, reg, &phy)) {
2220                                 if (enable)
2221                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2222                                 else
2223                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2224                                 tg3_writephy(tp, reg, phy);
2225                         }
2226                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2227                 }
2228         } else {
2229                 int ret;
2230
2231                 ret = tg3_phy_auxctl_read(tp,
2232                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2233                 if (!ret) {
2234                         if (enable)
2235                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2236                         else
2237                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2238                         tg3_phy_auxctl_write(tp,
2239                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2240                 }
2241         }
2242 }
2243
2244 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2245 {
2246         int ret;
2247         u32 val;
2248
2249         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2250                 return;
2251
2252         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2253         if (!ret)
2254                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2255                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2256 }
2257
2258 static void tg3_phy_apply_otp(struct tg3 *tp)
2259 {
2260         u32 otp, phy;
2261
2262         if (!tp->phy_otp)
2263                 return;
2264
2265         otp = tp->phy_otp;
2266
2267         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2268                 return;
2269
2270         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2271         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2272         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2273
2274         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2275               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2276         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2277
2278         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2279         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2280         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2281
2282         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2283         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2284
2285         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2286         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2287
2288         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2289               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2290         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2291
2292         tg3_phy_toggle_auxctl_smdsp(tp, false);
2293 }
2294
2295 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2296 {
2297         u32 val;
2298
2299         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2300                 return;
2301
2302         tp->setlpicnt = 0;
2303
2304         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2305             current_link_up == 1 &&
2306             tp->link_config.active_duplex == DUPLEX_FULL &&
2307             (tp->link_config.active_speed == SPEED_100 ||
2308              tp->link_config.active_speed == SPEED_1000)) {
2309                 u32 eeectl;
2310
2311                 if (tp->link_config.active_speed == SPEED_1000)
2312                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2313                 else
2314                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2315
2316                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2317
2318                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2319                                   TG3_CL45_D7_EEERES_STAT, &val);
2320
2321                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2322                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2323                         tp->setlpicnt = 2;
2324         }
2325
2326         if (!tp->setlpicnt) {
2327                 if (current_link_up == 1 &&
2328                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2329                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2330                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2331                 }
2332
2333                 val = tr32(TG3_CPMU_EEE_MODE);
2334                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2335         }
2336 }
2337
2338 static void tg3_phy_eee_enable(struct tg3 *tp)
2339 {
2340         u32 val;
2341
2342         if (tp->link_config.active_speed == SPEED_1000 &&
2343             (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2344              tg3_asic_rev(tp) == ASIC_REV_5719 ||
2345              tg3_flag(tp, 57765_CLASS)) &&
2346             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2347                 val = MII_TG3_DSP_TAP26_ALNOKO |
2348                       MII_TG3_DSP_TAP26_RMRXSTO;
2349                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2350                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2351         }
2352
2353         val = tr32(TG3_CPMU_EEE_MODE);
2354         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2355 }
2356
2357 static int tg3_wait_macro_done(struct tg3 *tp)
2358 {
2359         int limit = 100;
2360
2361         while (limit--) {
2362                 u32 tmp32;
2363
2364                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2365                         if ((tmp32 & 0x1000) == 0)
2366                                 break;
2367                 }
2368         }
2369         if (limit < 0)
2370                 return -EBUSY;
2371
2372         return 0;
2373 }
2374
2375 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2376 {
2377         static const u32 test_pat[4][6] = {
2378         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2379         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2380         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2381         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2382         };
2383         int chan;
2384
2385         for (chan = 0; chan < 4; chan++) {
2386                 int i;
2387
2388                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2389                              (chan * 0x2000) | 0x0200);
2390                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2391
2392                 for (i = 0; i < 6; i++)
2393                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2394                                      test_pat[chan][i]);
2395
2396                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2397                 if (tg3_wait_macro_done(tp)) {
2398                         *resetp = 1;
2399                         return -EBUSY;
2400                 }
2401
2402                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2403                              (chan * 0x2000) | 0x0200);
2404                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2405                 if (tg3_wait_macro_done(tp)) {
2406                         *resetp = 1;
2407                         return -EBUSY;
2408                 }
2409
2410                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2411                 if (tg3_wait_macro_done(tp)) {
2412                         *resetp = 1;
2413                         return -EBUSY;
2414                 }
2415
2416                 for (i = 0; i < 6; i += 2) {
2417                         u32 low, high;
2418
2419                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2420                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2421                             tg3_wait_macro_done(tp)) {
2422                                 *resetp = 1;
2423                                 return -EBUSY;
2424                         }
2425                         low &= 0x7fff;
2426                         high &= 0x000f;
2427                         if (low != test_pat[chan][i] ||
2428                             high != test_pat[chan][i+1]) {
2429                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2430                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2431                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2432
2433                                 return -EBUSY;
2434                         }
2435                 }
2436         }
2437
2438         return 0;
2439 }
2440
2441 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2442 {
2443         int chan;
2444
2445         for (chan = 0; chan < 4; chan++) {
2446                 int i;
2447
2448                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2449                              (chan * 0x2000) | 0x0200);
2450                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2451                 for (i = 0; i < 6; i++)
2452                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2453                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2454                 if (tg3_wait_macro_done(tp))
2455                         return -EBUSY;
2456         }
2457
2458         return 0;
2459 }
2460
2461 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2462 {
2463         u32 reg32, phy9_orig;
2464         int retries, do_phy_reset, err;
2465
2466         retries = 10;
2467         do_phy_reset = 1;
2468         do {
2469                 if (do_phy_reset) {
2470                         err = tg3_bmcr_reset(tp);
2471                         if (err)
2472                                 return err;
2473                         do_phy_reset = 0;
2474                 }
2475
2476                 /* Disable transmitter and interrupt.  */
2477                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2478                         continue;
2479
2480                 reg32 |= 0x3000;
2481                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2482
2483                 /* Set full-duplex, 1000 mbps.  */
2484                 tg3_writephy(tp, MII_BMCR,
2485                              BMCR_FULLDPLX | BMCR_SPEED1000);
2486
2487                 /* Set to master mode.  */
2488                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2489                         continue;
2490
2491                 tg3_writephy(tp, MII_CTRL1000,
2492                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2493
2494                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2495                 if (err)
2496                         return err;
2497
2498                 /* Block the PHY control access.  */
2499                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2500
2501                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2502                 if (!err)
2503                         break;
2504         } while (--retries);
2505
2506         err = tg3_phy_reset_chanpat(tp);
2507         if (err)
2508                 return err;
2509
2510         tg3_phydsp_write(tp, 0x8005, 0x0000);
2511
2512         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2513         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2514
2515         tg3_phy_toggle_auxctl_smdsp(tp, false);
2516
2517         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2518
2519         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2520                 reg32 &= ~0x3000;
2521                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2522         } else if (!err)
2523                 err = -EBUSY;
2524
2525         return err;
2526 }
2527
2528 static void tg3_carrier_off(struct tg3 *tp)
2529 {
2530         netif_carrier_off(tp->dev);
2531         tp->link_up = false;
2532 }
2533
2534 /* This will reset the tigon3 PHY if there is no valid
2535  * link unless the FORCE argument is non-zero.
2536  */
2537 static int tg3_phy_reset(struct tg3 *tp)
2538 {
2539         u32 val, cpmuctrl;
2540         int err;
2541
2542         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2543                 val = tr32(GRC_MISC_CFG);
2544                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2545                 udelay(40);
2546         }
2547         err  = tg3_readphy(tp, MII_BMSR, &val);
2548         err |= tg3_readphy(tp, MII_BMSR, &val);
2549         if (err != 0)
2550                 return -EBUSY;
2551
2552         if (netif_running(tp->dev) && tp->link_up) {
2553                 netif_carrier_off(tp->dev);
2554                 tg3_link_report(tp);
2555         }
2556
2557         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2558             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2559             tg3_asic_rev(tp) == ASIC_REV_5705) {
2560                 err = tg3_phy_reset_5703_4_5(tp);
2561                 if (err)
2562                         return err;
2563                 goto out;
2564         }
2565
2566         cpmuctrl = 0;
2567         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2568             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2569                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2570                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2571                         tw32(TG3_CPMU_CTRL,
2572                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2573         }
2574
2575         err = tg3_bmcr_reset(tp);
2576         if (err)
2577                 return err;
2578
2579         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2580                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2581                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2582
2583                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2584         }
2585
2586         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2587             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2588                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2589                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2590                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2591                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2592                         udelay(40);
2593                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2594                 }
2595         }
2596
2597         if (tg3_flag(tp, 5717_PLUS) &&
2598             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2599                 return 0;
2600
2601         tg3_phy_apply_otp(tp);
2602
2603         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2604                 tg3_phy_toggle_apd(tp, true);
2605         else
2606                 tg3_phy_toggle_apd(tp, false);
2607
2608 out:
2609         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2610             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2611                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2612                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2613                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2614         }
2615
2616         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2617                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2618                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2619         }
2620
2621         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2622                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2623                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2624                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2625                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2626                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2627                 }
2628         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2629                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2630                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2631                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2632                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2633                                 tg3_writephy(tp, MII_TG3_TEST1,
2634                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2635                         } else
2636                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2637
2638                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2639                 }
2640         }
2641
2642         /* Set Extended packet length bit (bit 14) on all chips that */
2643         /* support jumbo frames */
2644         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2645                 /* Cannot do read-modify-write on 5401 */
2646                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2647         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2648                 /* Set bit 14 with read-modify-write to preserve other bits */
2649                 err = tg3_phy_auxctl_read(tp,
2650                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2651                 if (!err)
2652                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2653                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2654         }
2655
2656         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2657          * jumbo frames transmission.
2658          */
2659         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2660                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2661                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2662                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2663         }
2664
2665         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2666                 /* adjust output voltage */
2667                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2668         }
2669
2670         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2671                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2672
2673         tg3_phy_toggle_automdix(tp, 1);
2674         tg3_phy_set_wirespeed(tp);
2675         return 0;
2676 }
2677
2678 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2679 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2680 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2681                                           TG3_GPIO_MSG_NEED_VAUX)
2682 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2683         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2684          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2685          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2686          (TG3_GPIO_MSG_DRVR_PRES << 12))
2687
2688 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2689         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2690          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2691          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2692          (TG3_GPIO_MSG_NEED_VAUX << 12))
2693
2694 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2695 {
2696         u32 status, shift;
2697
2698         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2699             tg3_asic_rev(tp) == ASIC_REV_5719)
2700                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2701         else
2702                 status = tr32(TG3_CPMU_DRV_STATUS);
2703
2704         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2705         status &= ~(TG3_GPIO_MSG_MASK << shift);
2706         status |= (newstat << shift);
2707
2708         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2709             tg3_asic_rev(tp) == ASIC_REV_5719)
2710                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2711         else
2712                 tw32(TG3_CPMU_DRV_STATUS, status);
2713
2714         return status >> TG3_APE_GPIO_MSG_SHIFT;
2715 }
2716
2717 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2718 {
2719         if (!tg3_flag(tp, IS_NIC))
2720                 return 0;
2721
2722         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2723             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2724             tg3_asic_rev(tp) == ASIC_REV_5720) {
2725                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2726                         return -EIO;
2727
2728                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2729
2730                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2731                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2732
2733                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2734         } else {
2735                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2736                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2737         }
2738
2739         return 0;
2740 }
2741
2742 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2743 {
2744         u32 grc_local_ctrl;
2745
2746         if (!tg3_flag(tp, IS_NIC) ||
2747             tg3_asic_rev(tp) == ASIC_REV_5700 ||
2748             tg3_asic_rev(tp) == ASIC_REV_5701)
2749                 return;
2750
2751         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2752
2753         tw32_wait_f(GRC_LOCAL_CTRL,
2754                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2755                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2756
2757         tw32_wait_f(GRC_LOCAL_CTRL,
2758                     grc_local_ctrl,
2759                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2760
2761         tw32_wait_f(GRC_LOCAL_CTRL,
2762                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2763                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2764 }
2765
2766 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2767 {
2768         if (!tg3_flag(tp, IS_NIC))
2769                 return;
2770
2771         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2772             tg3_asic_rev(tp) == ASIC_REV_5701) {
2773                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2774                             (GRC_LCLCTRL_GPIO_OE0 |
2775                              GRC_LCLCTRL_GPIO_OE1 |
2776                              GRC_LCLCTRL_GPIO_OE2 |
2777                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2778                              GRC_LCLCTRL_GPIO_OUTPUT1),
2779                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2780         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2781                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2782                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2783                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2784                                      GRC_LCLCTRL_GPIO_OE1 |
2785                                      GRC_LCLCTRL_GPIO_OE2 |
2786                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2787                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2788                                      tp->grc_local_ctrl;
2789                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2790                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2791
2792                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2793                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2794                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2795
2796                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2797                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2798                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2799         } else {
2800                 u32 no_gpio2;
2801                 u32 grc_local_ctrl = 0;
2802
2803                 /* Workaround to prevent overdrawing Amps. */
2804                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2805                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2806                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2807                                     grc_local_ctrl,
2808                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2809                 }
2810
2811                 /* On 5753 and variants, GPIO2 cannot be used. */
2812                 no_gpio2 = tp->nic_sram_data_cfg &
2813                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2814
2815                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2816                                   GRC_LCLCTRL_GPIO_OE1 |
2817                                   GRC_LCLCTRL_GPIO_OE2 |
2818                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2819                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2820                 if (no_gpio2) {
2821                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2822                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2823                 }
2824                 tw32_wait_f(GRC_LOCAL_CTRL,
2825                             tp->grc_local_ctrl | grc_local_ctrl,
2826                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2827
2828                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2829
2830                 tw32_wait_f(GRC_LOCAL_CTRL,
2831                             tp->grc_local_ctrl | grc_local_ctrl,
2832                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2833
2834                 if (!no_gpio2) {
2835                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2836                         tw32_wait_f(GRC_LOCAL_CTRL,
2837                                     tp->grc_local_ctrl | grc_local_ctrl,
2838                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2839                 }
2840         }
2841 }
2842
2843 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2844 {
2845         u32 msg = 0;
2846
2847         /* Serialize power state transitions */
2848         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2849                 return;
2850
2851         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2852                 msg = TG3_GPIO_MSG_NEED_VAUX;
2853
2854         msg = tg3_set_function_status(tp, msg);
2855
2856         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2857                 goto done;
2858
2859         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2860                 tg3_pwrsrc_switch_to_vaux(tp);
2861         else
2862                 tg3_pwrsrc_die_with_vmain(tp);
2863
2864 done:
2865         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2866 }
2867
2868 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2869 {
2870         bool need_vaux = false;
2871
2872         /* The GPIOs do something completely different on 57765. */
2873         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2874                 return;
2875
2876         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2877             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2878             tg3_asic_rev(tp) == ASIC_REV_5720) {
2879                 tg3_frob_aux_power_5717(tp, include_wol ?
2880                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2881                 return;
2882         }
2883
2884         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2885                 struct net_device *dev_peer;
2886
2887                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2888
2889                 /* remove_one() may have been run on the peer. */
2890                 if (dev_peer) {
2891                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2892
2893                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2894                                 return;
2895
2896                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2897                             tg3_flag(tp_peer, ENABLE_ASF))
2898                                 need_vaux = true;
2899                 }
2900         }
2901
2902         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2903             tg3_flag(tp, ENABLE_ASF))
2904                 need_vaux = true;
2905
2906         if (need_vaux)
2907                 tg3_pwrsrc_switch_to_vaux(tp);
2908         else
2909                 tg3_pwrsrc_die_with_vmain(tp);
2910 }
2911
2912 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2913 {
2914         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2915                 return 1;
2916         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2917                 if (speed != SPEED_10)
2918                         return 1;
2919         } else if (speed == SPEED_10)
2920                 return 1;
2921
2922         return 0;
2923 }
2924
2925 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2926 {
2927         u32 val;
2928
2929         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2930                 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
2931                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2932                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2933
2934                         sg_dig_ctrl |=
2935                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2936                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2937                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2938                 }
2939                 return;
2940         }
2941
2942         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2943                 tg3_bmcr_reset(tp);
2944                 val = tr32(GRC_MISC_CFG);
2945                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2946                 udelay(40);
2947                 return;
2948         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2949                 u32 phytest;
2950                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2951                         u32 phy;
2952
2953                         tg3_writephy(tp, MII_ADVERTISE, 0);
2954                         tg3_writephy(tp, MII_BMCR,
2955                                      BMCR_ANENABLE | BMCR_ANRESTART);
2956
2957                         tg3_writephy(tp, MII_TG3_FET_TEST,
2958                                      phytest | MII_TG3_FET_SHADOW_EN);
2959                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2960                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2961                                 tg3_writephy(tp,
2962                                              MII_TG3_FET_SHDW_AUXMODE4,
2963                                              phy);
2964                         }
2965                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2966                 }
2967                 return;
2968         } else if (do_low_power) {
2969                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2970                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2971
2972                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2973                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2974                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2975                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2976         }
2977
2978         /* The PHY should not be powered down on some chips because
2979          * of bugs.
2980          */
2981         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2982             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2983             (tg3_asic_rev(tp) == ASIC_REV_5780 &&
2984              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2985             (tg3_asic_rev(tp) == ASIC_REV_5717 &&
2986              !tp->pci_fn))
2987                 return;
2988
2989         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2990             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2991                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2992                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2993                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2994                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2995         }
2996
2997         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2998 }
2999
3000 /* tp->lock is held. */
3001 static int tg3_nvram_lock(struct tg3 *tp)
3002 {
3003         if (tg3_flag(tp, NVRAM)) {
3004                 int i;
3005
3006                 if (tp->nvram_lock_cnt == 0) {
3007                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3008                         for (i = 0; i < 8000; i++) {
3009                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3010                                         break;
3011                                 udelay(20);
3012                         }
3013                         if (i == 8000) {
3014                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3015                                 return -ENODEV;
3016                         }
3017                 }
3018                 tp->nvram_lock_cnt++;
3019         }
3020         return 0;
3021 }
3022
3023 /* tp->lock is held. */
3024 static void tg3_nvram_unlock(struct tg3 *tp)
3025 {
3026         if (tg3_flag(tp, NVRAM)) {
3027                 if (tp->nvram_lock_cnt > 0)
3028                         tp->nvram_lock_cnt--;
3029                 if (tp->nvram_lock_cnt == 0)
3030                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3031         }
3032 }
3033
3034 /* tp->lock is held. */
3035 static void tg3_enable_nvram_access(struct tg3 *tp)
3036 {
3037         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3038                 u32 nvaccess = tr32(NVRAM_ACCESS);
3039
3040                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3041         }
3042 }
3043
3044 /* tp->lock is held. */
3045 static void tg3_disable_nvram_access(struct tg3 *tp)
3046 {
3047         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3048                 u32 nvaccess = tr32(NVRAM_ACCESS);
3049
3050                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3051         }
3052 }
3053
3054 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3055                                         u32 offset, u32 *val)
3056 {
3057         u32 tmp;
3058         int i;
3059
3060         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3061                 return -EINVAL;
3062
3063         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3064                                         EEPROM_ADDR_DEVID_MASK |
3065                                         EEPROM_ADDR_READ);
3066         tw32(GRC_EEPROM_ADDR,
3067              tmp |
3068              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3069              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3070               EEPROM_ADDR_ADDR_MASK) |
3071              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3072
3073         for (i = 0; i < 1000; i++) {
3074                 tmp = tr32(GRC_EEPROM_ADDR);
3075
3076                 if (tmp & EEPROM_ADDR_COMPLETE)
3077                         break;
3078                 msleep(1);
3079         }
3080         if (!(tmp & EEPROM_ADDR_COMPLETE))
3081                 return -EBUSY;
3082
3083         tmp = tr32(GRC_EEPROM_DATA);
3084
3085         /*
3086          * The data will always be opposite the native endian
3087          * format.  Perform a blind byteswap to compensate.
3088          */
3089         *val = swab32(tmp);
3090
3091         return 0;
3092 }
3093
3094 #define NVRAM_CMD_TIMEOUT 10000
3095
3096 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3097 {
3098         int i;
3099
3100         tw32(NVRAM_CMD, nvram_cmd);
3101         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3102                 udelay(10);
3103                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3104                         udelay(10);
3105                         break;
3106                 }
3107         }
3108
3109         if (i == NVRAM_CMD_TIMEOUT)
3110                 return -EBUSY;
3111
3112         return 0;
3113 }
3114
3115 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3116 {
3117         if (tg3_flag(tp, NVRAM) &&
3118             tg3_flag(tp, NVRAM_BUFFERED) &&
3119             tg3_flag(tp, FLASH) &&
3120             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3121             (tp->nvram_jedecnum == JEDEC_ATMEL))
3122
3123                 addr = ((addr / tp->nvram_pagesize) <<
3124                         ATMEL_AT45DB0X1B_PAGE_POS) +
3125                        (addr % tp->nvram_pagesize);
3126
3127         return addr;
3128 }
3129
3130 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3131 {
3132         if (tg3_flag(tp, NVRAM) &&
3133             tg3_flag(tp, NVRAM_BUFFERED) &&
3134             tg3_flag(tp, FLASH) &&
3135             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3136             (tp->nvram_jedecnum == JEDEC_ATMEL))
3137
3138                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3139                         tp->nvram_pagesize) +
3140                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3141
3142         return addr;
3143 }
3144
3145 /* NOTE: Data read in from NVRAM is byteswapped according to
3146  * the byteswapping settings for all other register accesses.
3147  * tg3 devices are BE devices, so on a BE machine, the data
3148  * returned will be exactly as it is seen in NVRAM.  On a LE
3149  * machine, the 32-bit value will be byteswapped.
3150  */
3151 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3152 {
3153         int ret;
3154
3155         if (!tg3_flag(tp, NVRAM))
3156                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3157
3158         offset = tg3_nvram_phys_addr(tp, offset);
3159
3160         if (offset > NVRAM_ADDR_MSK)
3161                 return -EINVAL;
3162
3163         ret = tg3_nvram_lock(tp);
3164         if (ret)
3165                 return ret;
3166
3167         tg3_enable_nvram_access(tp);
3168
3169         tw32(NVRAM_ADDR, offset);
3170         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3171                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3172
3173         if (ret == 0)
3174                 *val = tr32(NVRAM_RDDATA);
3175
3176         tg3_disable_nvram_access(tp);
3177
3178         tg3_nvram_unlock(tp);
3179
3180         return ret;
3181 }
3182
3183 /* Ensures NVRAM data is in bytestream format. */
3184 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3185 {
3186         u32 v;
3187         int res = tg3_nvram_read(tp, offset, &v);
3188         if (!res)
3189                 *val = cpu_to_be32(v);
3190         return res;
3191 }
3192
3193 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3194                                     u32 offset, u32 len, u8 *buf)
3195 {
3196         int i, j, rc = 0;
3197         u32 val;
3198
3199         for (i = 0; i < len; i += 4) {
3200                 u32 addr;
3201                 __be32 data;
3202
3203                 addr = offset + i;
3204
3205                 memcpy(&data, buf + i, 4);
3206
3207                 /*
3208                  * The SEEPROM interface expects the data to always be opposite
3209                  * the native endian format.  We accomplish this by reversing
3210                  * all the operations that would have been performed on the
3211                  * data from a call to tg3_nvram_read_be32().
3212                  */
3213                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3214
3215                 val = tr32(GRC_EEPROM_ADDR);
3216                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3217
3218                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3219                         EEPROM_ADDR_READ);
3220                 tw32(GRC_EEPROM_ADDR, val |
3221                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3222                         (addr & EEPROM_ADDR_ADDR_MASK) |
3223                         EEPROM_ADDR_START |
3224                         EEPROM_ADDR_WRITE);
3225
3226                 for (j = 0; j < 1000; j++) {
3227                         val = tr32(GRC_EEPROM_ADDR);
3228
3229                         if (val & EEPROM_ADDR_COMPLETE)
3230                                 break;
3231                         msleep(1);
3232                 }
3233                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3234                         rc = -EBUSY;
3235                         break;
3236                 }
3237         }
3238
3239         return rc;
3240 }
3241
3242 /* offset and length are dword aligned */
3243 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3244                 u8 *buf)
3245 {
3246         int ret = 0;
3247         u32 pagesize = tp->nvram_pagesize;
3248         u32 pagemask = pagesize - 1;
3249         u32 nvram_cmd;
3250         u8 *tmp;
3251
3252         tmp = kmalloc(pagesize, GFP_KERNEL);
3253         if (tmp == NULL)
3254                 return -ENOMEM;
3255
3256         while (len) {
3257                 int j;
3258                 u32 phy_addr, page_off, size;
3259
3260                 phy_addr = offset & ~pagemask;
3261
3262                 for (j = 0; j < pagesize; j += 4) {
3263                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3264                                                   (__be32 *) (tmp + j));
3265                         if (ret)
3266                                 break;
3267                 }
3268                 if (ret)
3269                         break;
3270
3271                 page_off = offset & pagemask;
3272                 size = pagesize;
3273                 if (len < size)
3274                         size = len;
3275
3276                 len -= size;
3277
3278                 memcpy(tmp + page_off, buf, size);
3279
3280                 offset = offset + (pagesize - page_off);
3281
3282                 tg3_enable_nvram_access(tp);
3283
3284                 /*
3285                  * Before we can erase the flash page, we need
3286                  * to issue a special "write enable" command.
3287                  */
3288                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3289
3290                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3291                         break;
3292
3293                 /* Erase the target page */
3294                 tw32(NVRAM_ADDR, phy_addr);
3295
3296                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3297                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3298
3299                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3300                         break;
3301
3302                 /* Issue another write enable to start the write. */
3303                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3304
3305                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3306                         break;
3307
3308                 for (j = 0; j < pagesize; j += 4) {
3309                         __be32 data;
3310
3311                         data = *((__be32 *) (tmp + j));
3312
3313                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3314
3315                         tw32(NVRAM_ADDR, phy_addr + j);
3316
3317                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3318                                 NVRAM_CMD_WR;
3319
3320                         if (j == 0)
3321                                 nvram_cmd |= NVRAM_CMD_FIRST;
3322                         else if (j == (pagesize - 4))
3323                                 nvram_cmd |= NVRAM_CMD_LAST;
3324
3325                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3326                         if (ret)
3327                                 break;
3328                 }
3329                 if (ret)
3330                         break;
3331         }
3332
3333         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3334         tg3_nvram_exec_cmd(tp, nvram_cmd);
3335
3336         kfree(tmp);
3337
3338         return ret;
3339 }
3340
3341 /* offset and length are dword aligned */
3342 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3343                 u8 *buf)
3344 {
3345         int i, ret = 0;
3346
3347         for (i = 0; i < len; i += 4, offset += 4) {
3348                 u32 page_off, phy_addr, nvram_cmd;
3349                 __be32 data;
3350
3351                 memcpy(&data, buf + i, 4);
3352                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3353
3354                 page_off = offset % tp->nvram_pagesize;
3355
3356                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3357
3358                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3359
3360                 if (page_off == 0 || i == 0)
3361                         nvram_cmd |= NVRAM_CMD_FIRST;
3362                 if (page_off == (tp->nvram_pagesize - 4))
3363                         nvram_cmd |= NVRAM_CMD_LAST;
3364
3365                 if (i == (len - 4))
3366                         nvram_cmd |= NVRAM_CMD_LAST;
3367
3368                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3369                     !tg3_flag(tp, FLASH) ||
3370                     !tg3_flag(tp, 57765_PLUS))
3371                         tw32(NVRAM_ADDR, phy_addr);
3372
3373                 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3374                     !tg3_flag(tp, 5755_PLUS) &&
3375                     (tp->nvram_jedecnum == JEDEC_ST) &&
3376                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3377                         u32 cmd;
3378
3379                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3380                         ret = tg3_nvram_exec_cmd(tp, cmd);
3381                         if (ret)
3382                                 break;
3383                 }
3384                 if (!tg3_flag(tp, FLASH)) {
3385                         /* We always do complete word writes to eeprom. */
3386                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3387                 }
3388
3389                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3390                 if (ret)
3391                         break;
3392         }
3393         return ret;
3394 }
3395
3396 /* offset and length are dword aligned */
3397 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3398 {
3399         int ret;
3400
3401         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3402                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3403                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3404                 udelay(40);
3405         }
3406
3407         if (!tg3_flag(tp, NVRAM)) {
3408                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3409         } else {
3410                 u32 grc_mode;
3411
3412                 ret = tg3_nvram_lock(tp);
3413                 if (ret)
3414                         return ret;
3415
3416                 tg3_enable_nvram_access(tp);
3417                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3418                         tw32(NVRAM_WRITE1, 0x406);
3419
3420                 grc_mode = tr32(GRC_MODE);
3421                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3422
3423                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3424                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3425                                 buf);
3426                 } else {
3427                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3428                                 buf);
3429                 }
3430
3431                 grc_mode = tr32(GRC_MODE);
3432                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3433
3434                 tg3_disable_nvram_access(tp);
3435                 tg3_nvram_unlock(tp);
3436         }
3437
3438         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3439                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3440                 udelay(40);
3441         }
3442
3443         return ret;
3444 }
3445
3446 #define RX_CPU_SCRATCH_BASE     0x30000
3447 #define RX_CPU_SCRATCH_SIZE     0x04000
3448 #define TX_CPU_SCRATCH_BASE     0x34000
3449 #define TX_CPU_SCRATCH_SIZE     0x04000
3450
3451 /* tp->lock is held. */
3452 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3453 {
3454         int i;
3455         const int iters = 10000;
3456
3457         for (i = 0; i < iters; i++) {
3458                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3459                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3460                 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3461                         break;
3462         }
3463
3464         return (i == iters) ? -EBUSY : 0;
3465 }
3466
3467 /* tp->lock is held. */
3468 static int tg3_rxcpu_pause(struct tg3 *tp)
3469 {
3470         int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3471
3472         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3473         tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3474         udelay(10);
3475
3476         return rc;
3477 }
3478
3479 /* tp->lock is held. */
3480 static int tg3_txcpu_pause(struct tg3 *tp)
3481 {
3482         return tg3_pause_cpu(tp, TX_CPU_BASE);
3483 }
3484
3485 /* tp->lock is held. */
3486 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3487 {
3488         tw32(cpu_base + CPU_STATE, 0xffffffff);
3489         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3490 }
3491
3492 /* tp->lock is held. */
3493 static void tg3_rxcpu_resume(struct tg3 *tp)
3494 {
3495         tg3_resume_cpu(tp, RX_CPU_BASE);
3496 }
3497
3498 /* tp->lock is held. */
3499 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3500 {
3501         int rc;
3502
3503         BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3504
3505         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3506                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3507
3508                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3509                 return 0;
3510         }
3511         if (cpu_base == RX_CPU_BASE) {
3512                 rc = tg3_rxcpu_pause(tp);
3513         } else {
3514                 /*
3515                  * There is only an Rx CPU for the 5750 derivative in the
3516                  * BCM4785.
3517                  */
3518                 if (tg3_flag(tp, IS_SSB_CORE))
3519                         return 0;
3520
3521                 rc = tg3_txcpu_pause(tp);
3522         }
3523
3524         if (rc) {
3525                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3526                            __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3527                 return -ENODEV;
3528         }
3529
3530         /* Clear firmware's nvram arbitration. */
3531         if (tg3_flag(tp, NVRAM))
3532                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3533         return 0;
3534 }
3535
3536 static int tg3_fw_data_len(struct tg3 *tp,
3537                            const struct tg3_firmware_hdr *fw_hdr)
3538 {
3539         int fw_len;
3540
3541         /* Non fragmented firmware have one firmware header followed by a
3542          * contiguous chunk of data to be written. The length field in that
3543          * header is not the length of data to be written but the complete
3544          * length of the bss. The data length is determined based on
3545          * tp->fw->size minus headers.
3546          *
3547          * Fragmented firmware have a main header followed by multiple
3548          * fragments. Each fragment is identical to non fragmented firmware
3549          * with a firmware header followed by a contiguous chunk of data. In
3550          * the main header, the length field is unused and set to 0xffffffff.
3551          * In each fragment header the length is the entire size of that
3552          * fragment i.e. fragment data + header length. Data length is
3553          * therefore length field in the header minus TG3_FW_HDR_LEN.
3554          */
3555         if (tp->fw_len == 0xffffffff)
3556                 fw_len = be32_to_cpu(fw_hdr->len);
3557         else
3558                 fw_len = tp->fw->size;
3559
3560         return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3561 }
3562
3563 /* tp->lock is held. */
3564 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3565                                  u32 cpu_scratch_base, int cpu_scratch_size,
3566                                  const struct tg3_firmware_hdr *fw_hdr)
3567 {
3568         int err, i;
3569         void (*write_op)(struct tg3 *, u32, u32);
3570         int total_len = tp->fw->size;
3571
3572         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3573                 netdev_err(tp->dev,
3574                            "%s: Trying to load TX cpu firmware which is 5705\n",
3575                            __func__);
3576                 return -EINVAL;
3577         }
3578
3579         if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3580                 write_op = tg3_write_mem;
3581         else
3582                 write_op = tg3_write_indirect_reg32;
3583
3584         if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3585                 /* It is possible that bootcode is still loading at this point.
3586                  * Get the nvram lock first before halting the cpu.
3587                  */
3588                 int lock_err = tg3_nvram_lock(tp);
3589                 err = tg3_halt_cpu(tp, cpu_base);
3590                 if (!lock_err)
3591                         tg3_nvram_unlock(tp);
3592                 if (err)
3593                         goto out;
3594
3595                 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3596                         write_op(tp, cpu_scratch_base + i, 0);
3597                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3598                 tw32(cpu_base + CPU_MODE,
3599                      tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3600         } else {
3601                 /* Subtract additional main header for fragmented firmware and
3602                  * advance to the first fragment
3603                  */
3604                 total_len -= TG3_FW_HDR_LEN;
3605                 fw_hdr++;
3606         }
3607
3608         do {
3609                 u32 *fw_data = (u32 *)(fw_hdr + 1);
3610                 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3611                         write_op(tp, cpu_scratch_base +
3612                                      (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3613                                      (i * sizeof(u32)),
3614                                  be32_to_cpu(fw_data[i]));
3615
3616                 total_len -= be32_to_cpu(fw_hdr->len);
3617
3618                 /* Advance to next fragment */
3619                 fw_hdr = (struct tg3_firmware_hdr *)
3620                          ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3621         } while (total_len > 0);
3622
3623         err = 0;
3624
3625 out:
3626         return err;
3627 }
3628
3629 /* tp->lock is held. */
3630 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3631 {
3632         int i;
3633         const int iters = 5;
3634
3635         tw32(cpu_base + CPU_STATE, 0xffffffff);
3636         tw32_f(cpu_base + CPU_PC, pc);
3637
3638         for (i = 0; i < iters; i++) {
3639                 if (tr32(cpu_base + CPU_PC) == pc)
3640                         break;
3641                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3642                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3643                 tw32_f(cpu_base + CPU_PC, pc);
3644                 udelay(1000);
3645         }
3646
3647         return (i == iters) ? -EBUSY : 0;
3648 }
3649
3650 /* tp->lock is held. */
3651 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3652 {
3653         const struct tg3_firmware_hdr *fw_hdr;
3654         int err;
3655
3656         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3657
3658         /* Firmware blob starts with version numbers, followed by
3659            start address and length. We are setting complete length.
3660            length = end_address_of_bss - start_address_of_text.
3661            Remainder is the blob to be loaded contiguously
3662            from start address. */
3663
3664         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3665                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3666                                     fw_hdr);
3667         if (err)
3668                 return err;
3669
3670         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3671                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3672                                     fw_hdr);
3673         if (err)
3674                 return err;
3675
3676         /* Now startup only the RX cpu. */
3677         err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3678                                        be32_to_cpu(fw_hdr->base_addr));
3679         if (err) {
3680                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3681                            "should be %08x\n", __func__,
3682                            tr32(RX_CPU_BASE + CPU_PC),
3683                                 be32_to_cpu(fw_hdr->base_addr));
3684                 return -ENODEV;
3685         }
3686
3687         tg3_rxcpu_resume(tp);
3688
3689         return 0;
3690 }
3691
3692 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3693 {
3694         const int iters = 1000;
3695         int i;
3696         u32 val;
3697
3698         /* Wait for boot code to complete initialization and enter service
3699          * loop. It is then safe to download service patches
3700          */
3701         for (i = 0; i < iters; i++) {
3702                 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3703                         break;
3704
3705                 udelay(10);
3706         }
3707
3708         if (i == iters) {
3709                 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3710                 return -EBUSY;
3711         }
3712
3713         val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3714         if (val & 0xff) {
3715                 netdev_warn(tp->dev,
3716                             "Other patches exist. Not downloading EEE patch\n");
3717                 return -EEXIST;
3718         }
3719
3720         return 0;
3721 }
3722
3723 /* tp->lock is held. */
3724 static void tg3_load_57766_firmware(struct tg3 *tp)
3725 {
3726         struct tg3_firmware_hdr *fw_hdr;
3727
3728         if (!tg3_flag(tp, NO_NVRAM))
3729                 return;
3730
3731         if (tg3_validate_rxcpu_state(tp))
3732                 return;
3733
3734         if (!tp->fw)
3735                 return;
3736
3737         /* This firmware blob has a different format than older firmware
3738          * releases as given below. The main difference is we have fragmented
3739          * data to be written to non-contiguous locations.
3740          *
3741          * In the beginning we have a firmware header identical to other
3742          * firmware which consists of version, base addr and length. The length
3743          * here is unused and set to 0xffffffff.
3744          *
3745          * This is followed by a series of firmware fragments which are
3746          * individually identical to previous firmware. i.e. they have the
3747          * firmware header and followed by data for that fragment. The version
3748          * field of the individual fragment header is unused.
3749          */
3750
3751         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3752         if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3753                 return;
3754
3755         if (tg3_rxcpu_pause(tp))
3756                 return;
3757
3758         /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3759         tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3760
3761         tg3_rxcpu_resume(tp);
3762 }
3763
3764 /* tp->lock is held. */
3765 static int tg3_load_tso_firmware(struct tg3 *tp)
3766 {
3767         const struct tg3_firmware_hdr *fw_hdr;
3768         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3769         int err;
3770
3771         if (!tg3_flag(tp, FW_TSO))
3772                 return 0;
3773
3774         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3775
3776         /* Firmware blob starts with version numbers, followed by
3777            start address and length. We are setting complete length.
3778            length = end_address_of_bss - start_address_of_text.
3779            Remainder is the blob to be loaded contiguously
3780            from start address. */
3781
3782         cpu_scratch_size = tp->fw_len;
3783
3784         if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3785                 cpu_base = RX_CPU_BASE;
3786                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3787         } else {
3788                 cpu_base = TX_CPU_BASE;
3789                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3790                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3791         }
3792
3793         err = tg3_load_firmware_cpu(tp, cpu_base,
3794                                     cpu_scratch_base, cpu_scratch_size,
3795                                     fw_hdr);
3796         if (err)
3797                 return err;
3798
3799         /* Now startup the cpu. */
3800         err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3801                                        be32_to_cpu(fw_hdr->base_addr));
3802         if (err) {
3803                 netdev_err(tp->dev,
3804                            "%s fails to set CPU PC, is %08x should be %08x\n",
3805                            __func__, tr32(cpu_base + CPU_PC),
3806                            be32_to_cpu(fw_hdr->base_addr));
3807                 return -ENODEV;
3808         }
3809
3810         tg3_resume_cpu(tp, cpu_base);
3811         return 0;
3812 }
3813
3814
3815 /* tp->lock is held. */
3816 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3817 {
3818         u32 addr_high, addr_low;
3819         int i;
3820
3821         addr_high = ((tp->dev->dev_addr[0] << 8) |
3822                      tp->dev->dev_addr[1]);
3823         addr_low = ((tp->dev->dev_addr[2] << 24) |
3824                     (tp->dev->dev_addr[3] << 16) |
3825                     (tp->dev->dev_addr[4] <<  8) |
3826                     (tp->dev->dev_addr[5] <<  0));
3827         for (i = 0; i < 4; i++) {
3828                 if (i == 1 && skip_mac_1)
3829                         continue;
3830                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3831                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3832         }
3833
3834         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3835             tg3_asic_rev(tp) == ASIC_REV_5704) {
3836                 for (i = 0; i < 12; i++) {
3837                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3838                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3839                 }
3840         }
3841
3842         addr_high = (tp->dev->dev_addr[0] +
3843                      tp->dev->dev_addr[1] +
3844                      tp->dev->dev_addr[2] +
3845                      tp->dev->dev_addr[3] +
3846                      tp->dev->dev_addr[4] +
3847                      tp->dev->dev_addr[5]) &
3848                 TX_BACKOFF_SEED_MASK;
3849         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3850 }
3851
3852 static void tg3_enable_register_access(struct tg3 *tp)
3853 {
3854         /*
3855          * Make sure register accesses (indirect or otherwise) will function
3856          * correctly.
3857          */
3858         pci_write_config_dword(tp->pdev,
3859                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3860 }
3861
3862 static int tg3_power_up(struct tg3 *tp)
3863 {
3864         int err;
3865
3866         tg3_enable_register_access(tp);
3867
3868         err = pci_set_power_state(tp->pdev, PCI_D0);
3869         if (!err) {
3870                 /* Switch out of Vaux if it is a NIC */
3871                 tg3_pwrsrc_switch_to_vmain(tp);
3872         } else {
3873                 netdev_err(tp->dev, "Transition to D0 failed\n");
3874         }
3875
3876         return err;
3877 }
3878
3879 static int tg3_setup_phy(struct tg3 *, int);
3880
3881 static int tg3_power_down_prepare(struct tg3 *tp)
3882 {
3883         u32 misc_host_ctrl;
3884         bool device_should_wake, do_low_power;
3885
3886         tg3_enable_register_access(tp);
3887
3888         /* Restore the CLKREQ setting. */
3889         if (tg3_flag(tp, CLKREQ_BUG))
3890                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3891                                          PCI_EXP_LNKCTL_CLKREQ_EN);
3892
3893         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3894         tw32(TG3PCI_MISC_HOST_CTRL,
3895              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3896
3897         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3898                              tg3_flag(tp, WOL_ENABLE);
3899
3900         if (tg3_flag(tp, USE_PHYLIB)) {
3901                 do_low_power = false;
3902                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3903                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3904                         struct phy_device *phydev;
3905                         u32 phyid, advertising;
3906
3907                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3908
3909                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3910
3911                         tp->link_config.speed = phydev->speed;
3912                         tp->link_config.duplex = phydev->duplex;
3913                         tp->link_config.autoneg = phydev->autoneg;
3914                         tp->link_config.advertising = phydev->advertising;
3915
3916                         advertising = ADVERTISED_TP |
3917                                       ADVERTISED_Pause |
3918                                       ADVERTISED_Autoneg |
3919                                       ADVERTISED_10baseT_Half;
3920
3921                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3922                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3923                                         advertising |=
3924                                                 ADVERTISED_100baseT_Half |
3925                                                 ADVERTISED_100baseT_Full |
3926                                                 ADVERTISED_10baseT_Full;
3927                                 else
3928                                         advertising |= ADVERTISED_10baseT_Full;
3929                         }
3930
3931                         phydev->advertising = advertising;
3932
3933                         phy_start_aneg(phydev);
3934
3935                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3936                         if (phyid != PHY_ID_BCMAC131) {
3937                                 phyid &= PHY_BCM_OUI_MASK;
3938                                 if (phyid == PHY_BCM_OUI_1 ||
3939                                     phyid == PHY_BCM_OUI_2 ||
3940                                     phyid == PHY_BCM_OUI_3)
3941                                         do_low_power = true;
3942                         }
3943                 }
3944         } else {
3945                 do_low_power = true;
3946
3947                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3948                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3949
3950                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3951                         tg3_setup_phy(tp, 0);
3952         }
3953
3954         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3955                 u32 val;
3956
3957                 val = tr32(GRC_VCPU_EXT_CTRL);
3958                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3959         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3960                 int i;
3961                 u32 val;
3962
3963                 for (i = 0; i < 200; i++) {
3964                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3965                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3966                                 break;
3967                         msleep(1);
3968                 }
3969         }
3970         if (tg3_flag(tp, WOL_CAP))
3971                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3972                                                      WOL_DRV_STATE_SHUTDOWN |
3973                                                      WOL_DRV_WOL |
3974                                                      WOL_SET_MAGIC_PKT);
3975
3976         if (device_should_wake) {
3977                 u32 mac_mode;
3978
3979                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3980                         if (do_low_power &&
3981                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3982                                 tg3_phy_auxctl_write(tp,
3983                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3984                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3985                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3986                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3987                                 udelay(40);
3988                         }
3989
3990                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3991                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3992                         else
3993                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3994
3995                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3996                         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
3997                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3998                                              SPEED_100 : SPEED_10;
3999                                 if (tg3_5700_link_polarity(tp, speed))
4000                                         mac_mode |= MAC_MODE_LINK_POLARITY;
4001                                 else
4002                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
4003                         }
4004                 } else {
4005                         mac_mode = MAC_MODE_PORT_MODE_TBI;
4006                 }
4007
4008                 if (!tg3_flag(tp, 5750_PLUS))
4009                         tw32(MAC_LED_CTRL, tp->led_ctrl);
4010
4011                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4012                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4013                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4014                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4015
4016                 if (tg3_flag(tp, ENABLE_APE))
4017                         mac_mode |= MAC_MODE_APE_TX_EN |
4018                                     MAC_MODE_APE_RX_EN |
4019                                     MAC_MODE_TDE_ENABLE;
4020
4021                 tw32_f(MAC_MODE, mac_mode);
4022                 udelay(100);
4023
4024                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4025                 udelay(10);
4026         }
4027
4028         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4029             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4030              tg3_asic_rev(tp) == ASIC_REV_5701)) {
4031                 u32 base_val;
4032
4033                 base_val = tp->pci_clock_ctrl;
4034                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4035                              CLOCK_CTRL_TXCLK_DISABLE);
4036
4037                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4038                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
4039         } else if (tg3_flag(tp, 5780_CLASS) ||
4040                    tg3_flag(tp, CPMU_PRESENT) ||
4041                    tg3_asic_rev(tp) == ASIC_REV_5906) {
4042                 /* do nothing */
4043         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4044                 u32 newbits1, newbits2;
4045
4046                 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4047                     tg3_asic_rev(tp) == ASIC_REV_5701) {
4048                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4049                                     CLOCK_CTRL_TXCLK_DISABLE |
4050                                     CLOCK_CTRL_ALTCLK);
4051                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4052                 } else if (tg3_flag(tp, 5705_PLUS)) {
4053                         newbits1 = CLOCK_CTRL_625_CORE;
4054                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4055                 } else {
4056                         newbits1 = CLOCK_CTRL_ALTCLK;
4057                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4058                 }
4059
4060                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4061                             40);
4062
4063                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4064                             40);
4065
4066                 if (!tg3_flag(tp, 5705_PLUS)) {
4067                         u32 newbits3;
4068
4069                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4070                             tg3_asic_rev(tp) == ASIC_REV_5701) {
4071                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4072                                             CLOCK_CTRL_TXCLK_DISABLE |
4073                                             CLOCK_CTRL_44MHZ_CORE);
4074                         } else {
4075                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4076                         }
4077
4078                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
4079                                     tp->pci_clock_ctrl | newbits3, 40);
4080                 }
4081         }
4082
4083         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4084                 tg3_power_down_phy(tp, do_low_power);
4085
4086         tg3_frob_aux_power(tp, true);
4087
4088         /* Workaround for unstable PLL clock */
4089         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4090             ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4091              (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4092                 u32 val = tr32(0x7d00);
4093
4094                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4095                 tw32(0x7d00, val);
4096                 if (!tg3_flag(tp, ENABLE_ASF)) {
4097                         int err;
4098
4099                         err = tg3_nvram_lock(tp);
4100                         tg3_halt_cpu(tp, RX_CPU_BASE);
4101                         if (!err)
4102                                 tg3_nvram_unlock(tp);
4103                 }
4104         }
4105
4106         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4107
4108         return 0;
4109 }
4110
4111 static void tg3_power_down(struct tg3 *tp)
4112 {
4113         tg3_power_down_prepare(tp);
4114
4115         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4116         pci_set_power_state(tp->pdev, PCI_D3hot);
4117 }
4118
4119 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4120 {
4121         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4122         case MII_TG3_AUX_STAT_10HALF:
4123                 *speed = SPEED_10;
4124                 *duplex = DUPLEX_HALF;
4125                 break;
4126
4127         case MII_TG3_AUX_STAT_10FULL:
4128                 *speed = SPEED_10;
4129                 *duplex = DUPLEX_FULL;
4130                 break;
4131
4132         case MII_TG3_AUX_STAT_100HALF:
4133                 *speed = SPEED_100;
4134                 *duplex = DUPLEX_HALF;
4135                 break;
4136
4137         case MII_TG3_AUX_STAT_100FULL:
4138                 *speed = SPEED_100;
4139                 *duplex = DUPLEX_FULL;
4140                 break;
4141
4142         case MII_TG3_AUX_STAT_1000HALF:
4143                 *speed = SPEED_1000;
4144                 *duplex = DUPLEX_HALF;
4145                 break;
4146
4147         case MII_TG3_AUX_STAT_1000FULL:
4148                 *speed = SPEED_1000;
4149                 *duplex = DUPLEX_FULL;
4150                 break;
4151
4152         default:
4153                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4154                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4155                                  SPEED_10;
4156                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4157                                   DUPLEX_HALF;
4158                         break;
4159                 }
4160                 *speed = SPEED_UNKNOWN;
4161                 *duplex = DUPLEX_UNKNOWN;
4162                 break;
4163         }
4164 }
4165
4166 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4167 {
4168         int err = 0;
4169         u32 val, new_adv;
4170
4171         new_adv = ADVERTISE_CSMA;
4172         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4173         new_adv |= mii_advertise_flowctrl(flowctrl);
4174
4175         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4176         if (err)
4177                 goto done;
4178
4179         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4180                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4181
4182                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4183                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4184                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4185
4186                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4187                 if (err)
4188                         goto done;
4189         }
4190
4191         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4192                 goto done;
4193
4194         tw32(TG3_CPMU_EEE_MODE,
4195              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4196
4197         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4198         if (!err) {
4199                 u32 err2;
4200
4201                 val = 0;
4202                 /* Advertise 100-BaseTX EEE ability */
4203                 if (advertise & ADVERTISED_100baseT_Full)
4204                         val |= MDIO_AN_EEE_ADV_100TX;
4205                 /* Advertise 1000-BaseT EEE ability */
4206                 if (advertise & ADVERTISED_1000baseT_Full)
4207                         val |= MDIO_AN_EEE_ADV_1000T;
4208                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4209                 if (err)
4210                         val = 0;
4211
4212                 switch (tg3_asic_rev(tp)) {
4213                 case ASIC_REV_5717:
4214                 case ASIC_REV_57765:
4215                 case ASIC_REV_57766:
4216                 case ASIC_REV_5719:
4217                         /* If we advertised any eee advertisements above... */
4218                         if (val)
4219                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4220                                       MII_TG3_DSP_TAP26_RMRXSTO |
4221                                       MII_TG3_DSP_TAP26_OPCSINPT;
4222                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4223                         /* Fall through */
4224                 case ASIC_REV_5720:
4225                 case ASIC_REV_5762:
4226                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4227                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4228                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4229                 }
4230
4231                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4232                 if (!err)
4233                         err = err2;
4234         }
4235
4236 done:
4237         return err;
4238 }
4239
4240 static void tg3_phy_copper_begin(struct tg3 *tp)
4241 {
4242         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4243             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4244                 u32 adv, fc;
4245
4246                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4247                         adv = ADVERTISED_10baseT_Half |
4248                               ADVERTISED_10baseT_Full;
4249                         if (tg3_flag(tp, WOL_SPEED_100MB))
4250                                 adv |= ADVERTISED_100baseT_Half |
4251                                        ADVERTISED_100baseT_Full;
4252
4253                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4254                 } else {
4255                         adv = tp->link_config.advertising;
4256                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4257                                 adv &= ~(ADVERTISED_1000baseT_Half |
4258                                          ADVERTISED_1000baseT_Full);
4259
4260                         fc = tp->link_config.flowctrl;
4261                 }
4262
4263                 tg3_phy_autoneg_cfg(tp, adv, fc);
4264
4265                 tg3_writephy(tp, MII_BMCR,
4266                              BMCR_ANENABLE | BMCR_ANRESTART);
4267         } else {
4268                 int i;
4269                 u32 bmcr, orig_bmcr;
4270
4271                 tp->link_config.active_speed = tp->link_config.speed;
4272                 tp->link_config.active_duplex = tp->link_config.duplex;
4273
4274                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4275                         /* With autoneg disabled, 5715 only links up when the
4276                          * advertisement register has the configured speed
4277                          * enabled.
4278                          */
4279                         tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4280                 }
4281
4282                 bmcr = 0;
4283                 switch (tp->link_config.speed) {
4284                 default:
4285                 case SPEED_10:
4286                         break;
4287
4288                 case SPEED_100:
4289                         bmcr |= BMCR_SPEED100;
4290                         break;
4291
4292                 case SPEED_1000:
4293                         bmcr |= BMCR_SPEED1000;
4294                         break;
4295                 }
4296
4297                 if (tp->link_config.duplex == DUPLEX_FULL)
4298                         bmcr |= BMCR_FULLDPLX;
4299
4300                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4301                     (bmcr != orig_bmcr)) {
4302                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4303                         for (i = 0; i < 1500; i++) {
4304                                 u32 tmp;
4305
4306                                 udelay(10);
4307                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4308                                     tg3_readphy(tp, MII_BMSR, &tmp))
4309                                         continue;
4310                                 if (!(tmp & BMSR_LSTATUS)) {
4311                                         udelay(40);
4312                                         break;
4313                                 }
4314                         }
4315                         tg3_writephy(tp, MII_BMCR, bmcr);
4316                         udelay(40);
4317                 }
4318         }
4319 }
4320
4321 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4322 {
4323         int err;
4324
4325         /* Turn off tap power management. */
4326         /* Set Extended packet length bit */
4327         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4328
4329         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4330         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4331         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4332         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4333         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4334
4335         udelay(40);
4336
4337         return err;
4338 }
4339
4340 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4341 {
4342         u32 advmsk, tgtadv, advertising;
4343
4344         advertising = tp->link_config.advertising;
4345         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4346
4347         advmsk = ADVERTISE_ALL;
4348         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4349                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4350                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4351         }
4352
4353         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4354                 return false;
4355
4356         if ((*lcladv & advmsk) != tgtadv)
4357                 return false;
4358
4359         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4360                 u32 tg3_ctrl;
4361
4362                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4363
4364                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4365                         return false;
4366
4367                 if (tgtadv &&
4368                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4369                      tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4370                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4371                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4372                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4373                 } else {
4374                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4375                 }
4376
4377                 if (tg3_ctrl != tgtadv)
4378                         return false;
4379         }
4380
4381         return true;
4382 }
4383
4384 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4385 {
4386         u32 lpeth = 0;
4387
4388         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4389                 u32 val;
4390
4391                 if (tg3_readphy(tp, MII_STAT1000, &val))
4392                         return false;
4393
4394                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4395         }
4396
4397         if (tg3_readphy(tp, MII_LPA, rmtadv))
4398                 return false;
4399
4400         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4401         tp->link_config.rmt_adv = lpeth;
4402
4403         return true;
4404 }
4405
4406 static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
4407 {
4408         if (curr_link_up != tp->link_up) {
4409                 if (curr_link_up) {
4410                         netif_carrier_on(tp->dev);
4411                 } else {
4412                         netif_carrier_off(tp->dev);
4413                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4414                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4415                 }
4416
4417                 tg3_link_report(tp);
4418                 return true;
4419         }
4420
4421         return false;
4422 }
4423
4424 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4425 {
4426         int current_link_up;
4427         u32 bmsr, val;
4428         u32 lcl_adv, rmt_adv;
4429         u16 current_speed;
4430         u8 current_duplex;
4431         int i, err;
4432
4433         tw32(MAC_EVENT, 0);
4434
4435         tw32_f(MAC_STATUS,
4436              (MAC_STATUS_SYNC_CHANGED |
4437               MAC_STATUS_CFG_CHANGED |
4438               MAC_STATUS_MI_COMPLETION |
4439               MAC_STATUS_LNKSTATE_CHANGED));
4440         udelay(40);
4441
4442         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4443                 tw32_f(MAC_MI_MODE,
4444                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4445                 udelay(80);
4446         }
4447
4448         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4449
4450         /* Some third-party PHYs need to be reset on link going
4451          * down.
4452          */
4453         if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4454              tg3_asic_rev(tp) == ASIC_REV_5704 ||
4455              tg3_asic_rev(tp) == ASIC_REV_5705) &&
4456             tp->link_up) {
4457                 tg3_readphy(tp, MII_BMSR, &bmsr);
4458                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4459                     !(bmsr & BMSR_LSTATUS))
4460                         force_reset = 1;
4461         }
4462         if (force_reset)
4463                 tg3_phy_reset(tp);
4464
4465         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4466                 tg3_readphy(tp, MII_BMSR, &bmsr);
4467                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4468                     !tg3_flag(tp, INIT_COMPLETE))
4469                         bmsr = 0;
4470
4471                 if (!(bmsr & BMSR_LSTATUS)) {
4472                         err = tg3_init_5401phy_dsp(tp);
4473                         if (err)
4474                                 return err;
4475
4476                         tg3_readphy(tp, MII_BMSR, &bmsr);
4477                         for (i = 0; i < 1000; i++) {
4478                                 udelay(10);
4479                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4480                                     (bmsr & BMSR_LSTATUS)) {
4481                                         udelay(40);
4482                                         break;
4483                                 }
4484                         }
4485
4486                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4487                             TG3_PHY_REV_BCM5401_B0 &&
4488                             !(bmsr & BMSR_LSTATUS) &&
4489                             tp->link_config.active_speed == SPEED_1000) {
4490                                 err = tg3_phy_reset(tp);
4491                                 if (!err)
4492                                         err = tg3_init_5401phy_dsp(tp);
4493                                 if (err)
4494                                         return err;
4495                         }
4496                 }
4497         } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4498                    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4499                 /* 5701 {A0,B0} CRC bug workaround */
4500                 tg3_writephy(tp, 0x15, 0x0a75);
4501                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4502                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4503                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4504         }
4505
4506         /* Clear pending interrupts... */
4507         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4508         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4509
4510         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4511                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4512         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4513                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4514
4515         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4516             tg3_asic_rev(tp) == ASIC_REV_5701) {
4517                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4518                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4519                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4520                 else
4521                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4522         }
4523
4524         current_link_up = 0;
4525         current_speed = SPEED_UNKNOWN;
4526         current_duplex = DUPLEX_UNKNOWN;
4527         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4528         tp->link_config.rmt_adv = 0;
4529
4530         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4531                 err = tg3_phy_auxctl_read(tp,
4532                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4533                                           &val);
4534                 if (!err && !(val & (1 << 10))) {
4535                         tg3_phy_auxctl_write(tp,
4536                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4537                                              val | (1 << 10));
4538                         goto relink;
4539                 }
4540         }
4541
4542         bmsr = 0;
4543         for (i = 0; i < 100; i++) {
4544                 tg3_readphy(tp, MII_BMSR, &bmsr);
4545                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4546                     (bmsr & BMSR_LSTATUS))
4547                         break;
4548                 udelay(40);
4549         }
4550
4551         if (bmsr & BMSR_LSTATUS) {
4552                 u32 aux_stat, bmcr;
4553
4554                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4555                 for (i = 0; i < 2000; i++) {
4556                         udelay(10);
4557                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4558                             aux_stat)
4559                                 break;
4560                 }
4561
4562                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4563                                              &current_speed,
4564                                              &current_duplex);
4565
4566                 bmcr = 0;
4567                 for (i = 0; i < 200; i++) {
4568                         tg3_readphy(tp, MII_BMCR, &bmcr);
4569                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4570                                 continue;
4571                         if (bmcr && bmcr != 0x7fff)
4572                                 break;
4573                         udelay(10);
4574                 }
4575
4576                 lcl_adv = 0;
4577                 rmt_adv = 0;
4578
4579                 tp->link_config.active_speed = current_speed;
4580                 tp->link_config.active_duplex = current_duplex;
4581
4582                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4583                         if ((bmcr & BMCR_ANENABLE) &&
4584                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4585                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4586                                 current_link_up = 1;
4587                 } else {
4588                         if (!(bmcr & BMCR_ANENABLE) &&
4589                             tp->link_config.speed == current_speed &&
4590                             tp->link_config.duplex == current_duplex) {
4591                                 current_link_up = 1;
4592                         }
4593                 }
4594
4595                 if (current_link_up == 1 &&
4596                     tp->link_config.active_duplex == DUPLEX_FULL) {
4597                         u32 reg, bit;
4598
4599                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4600                                 reg = MII_TG3_FET_GEN_STAT;
4601                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4602                         } else {
4603                                 reg = MII_TG3_EXT_STAT;
4604                                 bit = MII_TG3_EXT_STAT_MDIX;
4605                         }
4606
4607                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4608                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4609
4610                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4611                 }
4612         }
4613
4614 relink:
4615         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4616                 tg3_phy_copper_begin(tp);
4617
4618                 if (tg3_flag(tp, ROBOSWITCH)) {
4619                         current_link_up = 1;
4620                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4621                         current_speed = SPEED_1000;
4622                         current_duplex = DUPLEX_FULL;
4623                         tp->link_config.active_speed = current_speed;
4624                         tp->link_config.active_duplex = current_duplex;
4625                 }
4626
4627                 tg3_readphy(tp, MII_BMSR, &bmsr);
4628                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4629                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4630                         current_link_up = 1;
4631         }
4632
4633         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4634         if (current_link_up == 1) {
4635                 if (tp->link_config.active_speed == SPEED_100 ||
4636                     tp->link_config.active_speed == SPEED_10)
4637                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4638                 else
4639                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4640         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4641                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4642         else
4643                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4644
4645         /* In order for the 5750 core in BCM4785 chip to work properly
4646          * in RGMII mode, the Led Control Register must be set up.
4647          */
4648         if (tg3_flag(tp, RGMII_MODE)) {
4649                 u32 led_ctrl = tr32(MAC_LED_CTRL);
4650                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4651
4652                 if (tp->link_config.active_speed == SPEED_10)
4653                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4654                 else if (tp->link_config.active_speed == SPEED_100)
4655                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4656                                      LED_CTRL_100MBPS_ON);
4657                 else if (tp->link_config.active_speed == SPEED_1000)
4658                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4659                                      LED_CTRL_1000MBPS_ON);
4660
4661                 tw32(MAC_LED_CTRL, led_ctrl);
4662                 udelay(40);
4663         }
4664
4665         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4666         if (tp->link_config.active_duplex == DUPLEX_HALF)
4667                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4668
4669         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4670                 if (current_link_up == 1 &&
4671                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4672                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4673                 else
4674                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4675         }
4676
4677         /* ??? Without this setting Netgear GA302T PHY does not
4678          * ??? send/receive packets...
4679          */
4680         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4681             tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
4682                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4683                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4684                 udelay(80);
4685         }
4686
4687         tw32_f(MAC_MODE, tp->mac_mode);
4688         udelay(40);
4689
4690         tg3_phy_eee_adjust(tp, current_link_up);
4691
4692         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4693                 /* Polled via timer. */
4694                 tw32_f(MAC_EVENT, 0);
4695         } else {
4696                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4697         }
4698         udelay(40);
4699
4700         if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
4701             current_link_up == 1 &&
4702             tp->link_config.active_speed == SPEED_1000 &&
4703             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4704                 udelay(120);
4705                 tw32_f(MAC_STATUS,
4706                      (MAC_STATUS_SYNC_CHANGED |
4707                       MAC_STATUS_CFG_CHANGED));
4708                 udelay(40);
4709                 tg3_write_mem(tp,
4710                               NIC_SRAM_FIRMWARE_MBOX,
4711                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4712         }
4713
4714         /* Prevent send BD corruption. */
4715         if (tg3_flag(tp, CLKREQ_BUG)) {
4716                 if (tp->link_config.active_speed == SPEED_100 ||
4717                     tp->link_config.active_speed == SPEED_10)
4718                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4719                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
4720                 else
4721                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4722                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
4723         }
4724
4725         tg3_test_and_report_link_chg(tp, current_link_up);
4726
4727         return 0;
4728 }
4729
4730 struct tg3_fiber_aneginfo {
4731         int state;
4732 #define ANEG_STATE_UNKNOWN              0
4733 #define ANEG_STATE_AN_ENABLE            1
4734 #define ANEG_STATE_RESTART_INIT         2
4735 #define ANEG_STATE_RESTART              3
4736 #define ANEG_STATE_DISABLE_LINK_OK      4
4737 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4738 #define ANEG_STATE_ABILITY_DETECT       6
4739 #define ANEG_STATE_ACK_DETECT_INIT      7
4740 #define ANEG_STATE_ACK_DETECT           8
4741 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4742 #define ANEG_STATE_COMPLETE_ACK         10
4743 #define ANEG_STATE_IDLE_DETECT_INIT     11
4744 #define ANEG_STATE_IDLE_DETECT          12
4745 #define ANEG_STATE_LINK_OK              13
4746 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4747 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4748
4749         u32 flags;
4750 #define MR_AN_ENABLE            0x00000001
4751 #define MR_RESTART_AN           0x00000002
4752 #define MR_AN_COMPLETE          0x00000004
4753 #define MR_PAGE_RX              0x00000008
4754 #define MR_NP_LOADED            0x00000010
4755 #define MR_TOGGLE_TX            0x00000020
4756 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4757 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4758 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4759 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4760 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4761 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4762 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4763 #define MR_TOGGLE_RX            0x00002000
4764 #define MR_NP_RX                0x00004000
4765
4766 #define MR_LINK_OK              0x80000000
4767
4768         unsigned long link_time, cur_time;
4769
4770         u32 ability_match_cfg;
4771         int ability_match_count;
4772
4773         char ability_match, idle_match, ack_match;
4774
4775         u32 txconfig, rxconfig;
4776 #define ANEG_CFG_NP             0x00000080
4777 #define ANEG_CFG_ACK            0x00000040
4778 #define ANEG_CFG_RF2            0x00000020
4779 #define ANEG_CFG_RF1            0x00000010
4780 #define ANEG_CFG_PS2            0x00000001
4781 #define ANEG_CFG_PS1            0x00008000
4782 #define ANEG_CFG_HD             0x00004000
4783 #define ANEG_CFG_FD             0x00002000
4784 #define ANEG_CFG_INVAL          0x00001f06
4785
4786 };
4787 #define ANEG_OK         0
4788 #define ANEG_DONE       1
4789 #define ANEG_TIMER_ENAB 2
4790 #define ANEG_FAILED     -1
4791
4792 #define ANEG_STATE_SETTLE_TIME  10000
4793
4794 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4795                                    struct tg3_fiber_aneginfo *ap)
4796 {
4797         u16 flowctrl;
4798         unsigned long delta;
4799         u32 rx_cfg_reg;
4800         int ret;
4801
4802         if (ap->state == ANEG_STATE_UNKNOWN) {
4803                 ap->rxconfig = 0;
4804                 ap->link_time = 0;
4805                 ap->cur_time = 0;
4806                 ap->ability_match_cfg = 0;
4807                 ap->ability_match_count = 0;
4808                 ap->ability_match = 0;
4809                 ap->idle_match = 0;
4810                 ap->ack_match = 0;
4811         }
4812         ap->cur_time++;
4813
4814         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4815                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4816
4817                 if (rx_cfg_reg != ap->ability_match_cfg) {
4818                         ap->ability_match_cfg = rx_cfg_reg;
4819                         ap->ability_match = 0;
4820                         ap->ability_match_count = 0;
4821                 } else {
4822                         if (++ap->ability_match_count > 1) {
4823                                 ap->ability_match = 1;
4824                                 ap->ability_match_cfg = rx_cfg_reg;
4825                         }
4826                 }
4827                 if (rx_cfg_reg & ANEG_CFG_ACK)
4828                         ap->ack_match = 1;
4829                 else
4830                         ap->ack_match = 0;
4831
4832                 ap->idle_match = 0;
4833         } else {
4834                 ap->idle_match = 1;
4835                 ap->ability_match_cfg = 0;
4836                 ap->ability_match_count = 0;
4837                 ap->ability_match = 0;
4838                 ap->ack_match = 0;
4839
4840                 rx_cfg_reg = 0;
4841         }
4842
4843         ap->rxconfig = rx_cfg_reg;
4844         ret = ANEG_OK;
4845
4846         switch (ap->state) {
4847         case ANEG_STATE_UNKNOWN:
4848                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4849                         ap->state = ANEG_STATE_AN_ENABLE;
4850
4851                 /* fallthru */
4852         case ANEG_STATE_AN_ENABLE:
4853                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4854                 if (ap->flags & MR_AN_ENABLE) {
4855                         ap->link_time = 0;
4856                         ap->cur_time = 0;
4857                         ap->ability_match_cfg = 0;
4858                         ap->ability_match_count = 0;
4859                         ap->ability_match = 0;
4860                         ap->idle_match = 0;
4861                         ap->ack_match = 0;
4862
4863                         ap->state = ANEG_STATE_RESTART_INIT;
4864                 } else {
4865                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4866                 }
4867                 break;
4868
4869         case ANEG_STATE_RESTART_INIT:
4870                 ap->link_time = ap->cur_time;
4871                 ap->flags &= ~(MR_NP_LOADED);
4872                 ap->txconfig = 0;
4873                 tw32(MAC_TX_AUTO_NEG, 0);
4874                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4875                 tw32_f(MAC_MODE, tp->mac_mode);
4876                 udelay(40);
4877
4878                 ret = ANEG_TIMER_ENAB;
4879                 ap->state = ANEG_STATE_RESTART;
4880
4881                 /* fallthru */
4882         case ANEG_STATE_RESTART:
4883                 delta = ap->cur_time - ap->link_time;
4884                 if (delta > ANEG_STATE_SETTLE_TIME)
4885                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4886                 else
4887                         ret = ANEG_TIMER_ENAB;
4888                 break;
4889
4890         case ANEG_STATE_DISABLE_LINK_OK:
4891                 ret = ANEG_DONE;
4892                 break;
4893
4894         case ANEG_STATE_ABILITY_DETECT_INIT:
4895                 ap->flags &= ~(MR_TOGGLE_TX);
4896                 ap->txconfig = ANEG_CFG_FD;
4897                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4898                 if (flowctrl & ADVERTISE_1000XPAUSE)
4899                         ap->txconfig |= ANEG_CFG_PS1;
4900                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4901                         ap->txconfig |= ANEG_CFG_PS2;
4902                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4903                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4904                 tw32_f(MAC_MODE, tp->mac_mode);
4905                 udelay(40);
4906
4907                 ap->state = ANEG_STATE_ABILITY_DETECT;
4908                 break;
4909
4910         case ANEG_STATE_ABILITY_DETECT:
4911                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4912                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4913                 break;
4914
4915         case ANEG_STATE_ACK_DETECT_INIT:
4916                 ap->txconfig |= ANEG_CFG_ACK;
4917                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4918                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4919                 tw32_f(MAC_MODE, tp->mac_mode);
4920                 udelay(40);
4921
4922                 ap->state = ANEG_STATE_ACK_DETECT;
4923
4924                 /* fallthru */
4925         case ANEG_STATE_ACK_DETECT:
4926                 if (ap->ack_match != 0) {
4927                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4928                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4929                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4930                         } else {
4931                                 ap->state = ANEG_STATE_AN_ENABLE;
4932                         }
4933                 } else if (ap->ability_match != 0 &&
4934                            ap->rxconfig == 0) {
4935                         ap->state = ANEG_STATE_AN_ENABLE;
4936                 }
4937                 break;
4938
4939         case ANEG_STATE_COMPLETE_ACK_INIT:
4940                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4941                         ret = ANEG_FAILED;
4942                         break;
4943                 }
4944                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4945                                MR_LP_ADV_HALF_DUPLEX |
4946                                MR_LP_ADV_SYM_PAUSE |
4947                                MR_LP_ADV_ASYM_PAUSE |
4948                                MR_LP_ADV_REMOTE_FAULT1 |
4949                                MR_LP_ADV_REMOTE_FAULT2 |
4950                                MR_LP_ADV_NEXT_PAGE |
4951                                MR_TOGGLE_RX |
4952                                MR_NP_RX);
4953                 if (ap->rxconfig & ANEG_CFG_FD)
4954                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4955                 if (ap->rxconfig & ANEG_CFG_HD)
4956                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4957                 if (ap->rxconfig & ANEG_CFG_PS1)
4958                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4959                 if (ap->rxconfig & ANEG_CFG_PS2)
4960                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4961                 if (ap->rxconfig & ANEG_CFG_RF1)
4962                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4963                 if (ap->rxconfig & ANEG_CFG_RF2)
4964                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4965                 if (ap->rxconfig & ANEG_CFG_NP)
4966                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4967
4968                 ap->link_time = ap->cur_time;
4969
4970                 ap->flags ^= (MR_TOGGLE_TX);
4971                 if (ap->rxconfig & 0x0008)
4972                         ap->flags |= MR_TOGGLE_RX;
4973                 if (ap->rxconfig & ANEG_CFG_NP)
4974                         ap->flags |= MR_NP_RX;
4975                 ap->flags |= MR_PAGE_RX;
4976
4977                 ap->state = ANEG_STATE_COMPLETE_ACK;
4978                 ret = ANEG_TIMER_ENAB;
4979                 break;
4980
4981         case ANEG_STATE_COMPLETE_ACK:
4982                 if (ap->ability_match != 0 &&
4983                     ap->rxconfig == 0) {
4984                         ap->state = ANEG_STATE_AN_ENABLE;
4985                         break;
4986                 }
4987                 delta = ap->cur_time - ap->link_time;
4988                 if (delta > ANEG_STATE_SETTLE_TIME) {
4989                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4990                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4991                         } else {
4992                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4993                                     !(ap->flags & MR_NP_RX)) {
4994                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4995                                 } else {
4996                                         ret = ANEG_FAILED;
4997                                 }
4998                         }
4999                 }
5000                 break;
5001
5002         case ANEG_STATE_IDLE_DETECT_INIT:
5003                 ap->link_time = ap->cur_time;
5004                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5005                 tw32_f(MAC_MODE, tp->mac_mode);
5006                 udelay(40);
5007
5008                 ap->state = ANEG_STATE_IDLE_DETECT;
5009                 ret = ANEG_TIMER_ENAB;
5010                 break;
5011
5012         case ANEG_STATE_IDLE_DETECT:
5013                 if (ap->ability_match != 0 &&
5014                     ap->rxconfig == 0) {
5015                         ap->state = ANEG_STATE_AN_ENABLE;
5016                         break;
5017                 }
5018                 delta = ap->cur_time - ap->link_time;
5019                 if (delta > ANEG_STATE_SETTLE_TIME) {
5020                         /* XXX another gem from the Broadcom driver :( */
5021                         ap->state = ANEG_STATE_LINK_OK;
5022                 }
5023                 break;
5024
5025         case ANEG_STATE_LINK_OK:
5026                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5027                 ret = ANEG_DONE;
5028                 break;
5029
5030         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5031                 /* ??? unimplemented */
5032                 break;
5033
5034         case ANEG_STATE_NEXT_PAGE_WAIT:
5035                 /* ??? unimplemented */
5036                 break;
5037
5038         default:
5039                 ret = ANEG_FAILED;
5040                 break;
5041         }
5042
5043         return ret;
5044 }
5045
5046 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5047 {
5048         int res = 0;
5049         struct tg3_fiber_aneginfo aninfo;
5050         int status = ANEG_FAILED;
5051         unsigned int tick;
5052         u32 tmp;
5053
5054         tw32_f(MAC_TX_AUTO_NEG, 0);
5055
5056         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5057         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5058         udelay(40);
5059
5060         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5061         udelay(40);
5062
5063         memset(&aninfo, 0, sizeof(aninfo));
5064         aninfo.flags |= MR_AN_ENABLE;
5065         aninfo.state = ANEG_STATE_UNKNOWN;
5066         aninfo.cur_time = 0;
5067         tick = 0;
5068         while (++tick < 195000) {
5069                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5070                 if (status == ANEG_DONE || status == ANEG_FAILED)
5071                         break;
5072
5073                 udelay(1);
5074         }
5075
5076         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5077         tw32_f(MAC_MODE, tp->mac_mode);
5078         udelay(40);
5079
5080         *txflags = aninfo.txconfig;
5081         *rxflags = aninfo.flags;
5082
5083         if (status == ANEG_DONE &&
5084             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5085                              MR_LP_ADV_FULL_DUPLEX)))
5086                 res = 1;
5087
5088         return res;
5089 }
5090
5091 static void tg3_init_bcm8002(struct tg3 *tp)
5092 {
5093         u32 mac_status = tr32(MAC_STATUS);
5094         int i;
5095
5096         /* Reset when initting first time or we have a link. */
5097         if (tg3_flag(tp, INIT_COMPLETE) &&
5098             !(mac_status & MAC_STATUS_PCS_SYNCED))
5099                 return;
5100
5101         /* Set PLL lock range. */
5102         tg3_writephy(tp, 0x16, 0x8007);
5103
5104         /* SW reset */
5105         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5106
5107         /* Wait for reset to complete. */
5108         /* XXX schedule_timeout() ... */
5109         for (i = 0; i < 500; i++)
5110                 udelay(10);
5111
5112         /* Config mode; select PMA/Ch 1 regs. */
5113         tg3_writephy(tp, 0x10, 0x8411);
5114
5115         /* Enable auto-lock and comdet, select txclk for tx. */
5116         tg3_writephy(tp, 0x11, 0x0a10);
5117
5118         tg3_writephy(tp, 0x18, 0x00a0);
5119         tg3_writephy(tp, 0x16, 0x41ff);
5120
5121         /* Assert and deassert POR. */
5122         tg3_writephy(tp, 0x13, 0x0400);
5123         udelay(40);
5124         tg3_writephy(tp, 0x13, 0x0000);
5125
5126         tg3_writephy(tp, 0x11, 0x0a50);
5127         udelay(40);
5128         tg3_writephy(tp, 0x11, 0x0a10);
5129
5130         /* Wait for signal to stabilize */
5131         /* XXX schedule_timeout() ... */
5132         for (i = 0; i < 15000; i++)
5133                 udelay(10);
5134
5135         /* Deselect the channel register so we can read the PHYID
5136          * later.
5137          */
5138         tg3_writephy(tp, 0x10, 0x8011);
5139 }
5140
5141 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5142 {
5143         u16 flowctrl;
5144         u32 sg_dig_ctrl, sg_dig_status;
5145         u32 serdes_cfg, expected_sg_dig_ctrl;
5146         int workaround, port_a;
5147         int current_link_up;
5148
5149         serdes_cfg = 0;
5150         expected_sg_dig_ctrl = 0;
5151         workaround = 0;
5152         port_a = 1;
5153         current_link_up = 0;
5154
5155         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5156             tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5157                 workaround = 1;
5158                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5159                         port_a = 0;
5160
5161                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5162                 /* preserve bits 20-23 for voltage regulator */
5163                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5164         }
5165
5166         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5167
5168         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5169                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5170                         if (workaround) {
5171                                 u32 val = serdes_cfg;
5172
5173                                 if (port_a)
5174                                         val |= 0xc010000;
5175                                 else
5176                                         val |= 0x4010000;
5177                                 tw32_f(MAC_SERDES_CFG, val);
5178                         }
5179
5180                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5181                 }
5182                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5183                         tg3_setup_flow_control(tp, 0, 0);
5184                         current_link_up = 1;
5185                 }
5186                 goto out;
5187         }
5188
5189         /* Want auto-negotiation.  */
5190         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5191
5192         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5193         if (flowctrl & ADVERTISE_1000XPAUSE)
5194                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5195         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5196                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5197
5198         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5199                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5200                     tp->serdes_counter &&
5201                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5202                                     MAC_STATUS_RCVD_CFG)) ==
5203                      MAC_STATUS_PCS_SYNCED)) {
5204                         tp->serdes_counter--;
5205                         current_link_up = 1;
5206                         goto out;
5207                 }
5208 restart_autoneg:
5209                 if (workaround)
5210                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5211                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5212                 udelay(5);
5213                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5214
5215                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5216                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5217         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5218                                  MAC_STATUS_SIGNAL_DET)) {
5219                 sg_dig_status = tr32(SG_DIG_STATUS);
5220                 mac_status = tr32(MAC_STATUS);
5221
5222                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5223                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5224                         u32 local_adv = 0, remote_adv = 0;
5225
5226                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5227                                 local_adv |= ADVERTISE_1000XPAUSE;
5228                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5229                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5230
5231                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5232                                 remote_adv |= LPA_1000XPAUSE;
5233                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5234                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5235
5236                         tp->link_config.rmt_adv =
5237                                            mii_adv_to_ethtool_adv_x(remote_adv);
5238
5239                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5240                         current_link_up = 1;
5241                         tp->serdes_counter = 0;
5242                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5243                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5244                         if (tp->serdes_counter)
5245                                 tp->serdes_counter--;
5246                         else {
5247                                 if (workaround) {
5248                                         u32 val = serdes_cfg;
5249
5250                                         if (port_a)
5251                                                 val |= 0xc010000;
5252                                         else
5253                                                 val |= 0x4010000;
5254
5255                                         tw32_f(MAC_SERDES_CFG, val);
5256                                 }
5257
5258                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5259                                 udelay(40);
5260
5261                                 /* Link parallel detection - link is up */
5262                                 /* only if we have PCS_SYNC and not */
5263                                 /* receiving config code words */
5264                                 mac_status = tr32(MAC_STATUS);
5265                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5266                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5267                                         tg3_setup_flow_control(tp, 0, 0);
5268                                         current_link_up = 1;
5269                                         tp->phy_flags |=
5270                                                 TG3_PHYFLG_PARALLEL_DETECT;
5271                                         tp->serdes_counter =
5272                                                 SERDES_PARALLEL_DET_TIMEOUT;
5273                                 } else
5274                                         goto restart_autoneg;
5275                         }
5276                 }
5277         } else {
5278                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5279                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5280         }
5281
5282 out:
5283         return current_link_up;
5284 }
5285
5286 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5287 {
5288         int current_link_up = 0;
5289
5290         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5291                 goto out;
5292
5293         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5294                 u32 txflags, rxflags;
5295                 int i;
5296
5297                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5298                         u32 local_adv = 0, remote_adv = 0;
5299
5300                         if (txflags & ANEG_CFG_PS1)
5301                                 local_adv |= ADVERTISE_1000XPAUSE;
5302                         if (txflags & ANEG_CFG_PS2)
5303                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5304
5305                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5306                                 remote_adv |= LPA_1000XPAUSE;
5307                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5308                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5309
5310                         tp->link_config.rmt_adv =
5311                                            mii_adv_to_ethtool_adv_x(remote_adv);
5312
5313                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5314
5315                         current_link_up = 1;
5316                 }
5317                 for (i = 0; i < 30; i++) {
5318                         udelay(20);
5319                         tw32_f(MAC_STATUS,
5320                                (MAC_STATUS_SYNC_CHANGED |
5321                                 MAC_STATUS_CFG_CHANGED));
5322                         udelay(40);
5323                         if ((tr32(MAC_STATUS) &
5324                              (MAC_STATUS_SYNC_CHANGED |
5325                               MAC_STATUS_CFG_CHANGED)) == 0)
5326                                 break;
5327                 }
5328
5329                 mac_status = tr32(MAC_STATUS);
5330                 if (current_link_up == 0 &&
5331                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5332                     !(mac_status & MAC_STATUS_RCVD_CFG))
5333                         current_link_up = 1;
5334         } else {
5335                 tg3_setup_flow_control(tp, 0, 0);
5336
5337                 /* Forcing 1000FD link up. */
5338                 current_link_up = 1;
5339
5340                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5341                 udelay(40);
5342
5343                 tw32_f(MAC_MODE, tp->mac_mode);
5344                 udelay(40);
5345         }
5346
5347 out:
5348         return current_link_up;
5349 }
5350
5351 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5352 {
5353         u32 orig_pause_cfg;
5354         u16 orig_active_speed;
5355         u8 orig_active_duplex;
5356         u32 mac_status;
5357         int current_link_up;
5358         int i;
5359
5360         orig_pause_cfg = tp->link_config.active_flowctrl;
5361         orig_active_speed = tp->link_config.active_speed;
5362         orig_active_duplex = tp->link_config.active_duplex;
5363
5364         if (!tg3_flag(tp, HW_AUTONEG) &&
5365             tp->link_up &&
5366             tg3_flag(tp, INIT_COMPLETE)) {
5367                 mac_status = tr32(MAC_STATUS);
5368                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5369                                MAC_STATUS_SIGNAL_DET |
5370                                MAC_STATUS_CFG_CHANGED |
5371                                MAC_STATUS_RCVD_CFG);
5372                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5373                                    MAC_STATUS_SIGNAL_DET)) {
5374                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5375                                             MAC_STATUS_CFG_CHANGED));
5376                         return 0;
5377                 }
5378         }
5379
5380         tw32_f(MAC_TX_AUTO_NEG, 0);
5381
5382         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5383         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5384         tw32_f(MAC_MODE, tp->mac_mode);
5385         udelay(40);
5386
5387         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5388                 tg3_init_bcm8002(tp);
5389
5390         /* Enable link change event even when serdes polling.  */
5391         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5392         udelay(40);
5393
5394         current_link_up = 0;
5395         tp->link_config.rmt_adv = 0;
5396         mac_status = tr32(MAC_STATUS);
5397
5398         if (tg3_flag(tp, HW_AUTONEG))
5399                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5400         else
5401                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5402
5403         tp->napi[0].hw_status->status =
5404                 (SD_STATUS_UPDATED |
5405                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5406
5407         for (i = 0; i < 100; i++) {
5408                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5409                                     MAC_STATUS_CFG_CHANGED));
5410                 udelay(5);
5411                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5412                                          MAC_STATUS_CFG_CHANGED |
5413                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5414                         break;
5415         }
5416
5417         mac_status = tr32(MAC_STATUS);
5418         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5419                 current_link_up = 0;
5420                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5421                     tp->serdes_counter == 0) {
5422                         tw32_f(MAC_MODE, (tp->mac_mode |
5423                                           MAC_MODE_SEND_CONFIGS));
5424                         udelay(1);
5425                         tw32_f(MAC_MODE, tp->mac_mode);
5426                 }
5427         }
5428
5429         if (current_link_up == 1) {
5430                 tp->link_config.active_speed = SPEED_1000;
5431                 tp->link_config.active_duplex = DUPLEX_FULL;
5432                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5433                                     LED_CTRL_LNKLED_OVERRIDE |
5434                                     LED_CTRL_1000MBPS_ON));
5435         } else {
5436                 tp->link_config.active_speed = SPEED_UNKNOWN;
5437                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5438                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5439                                     LED_CTRL_LNKLED_OVERRIDE |
5440                                     LED_CTRL_TRAFFIC_OVERRIDE));
5441         }
5442
5443         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5444                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5445                 if (orig_pause_cfg != now_pause_cfg ||
5446                     orig_active_speed != tp->link_config.active_speed ||
5447                     orig_active_duplex != tp->link_config.active_duplex)
5448                         tg3_link_report(tp);
5449         }
5450
5451         return 0;
5452 }
5453
5454 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5455 {
5456         int current_link_up, err = 0;
5457         u32 bmsr, bmcr;
5458         u16 current_speed;
5459         u8 current_duplex;
5460         u32 local_adv, remote_adv;
5461
5462         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5463         tw32_f(MAC_MODE, tp->mac_mode);
5464         udelay(40);
5465
5466         tw32(MAC_EVENT, 0);
5467
5468         tw32_f(MAC_STATUS,
5469              (MAC_STATUS_SYNC_CHANGED |
5470               MAC_STATUS_CFG_CHANGED |
5471               MAC_STATUS_MI_COMPLETION |
5472               MAC_STATUS_LNKSTATE_CHANGED));
5473         udelay(40);
5474
5475         if (force_reset)
5476                 tg3_phy_reset(tp);
5477
5478         current_link_up = 0;
5479         current_speed = SPEED_UNKNOWN;
5480         current_duplex = DUPLEX_UNKNOWN;
5481         tp->link_config.rmt_adv = 0;
5482
5483         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5484         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5485         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5486                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5487                         bmsr |= BMSR_LSTATUS;
5488                 else
5489                         bmsr &= ~BMSR_LSTATUS;
5490         }
5491
5492         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5493
5494         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5495             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5496                 /* do nothing, just check for link up at the end */
5497         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5498                 u32 adv, newadv;
5499
5500                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5501                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5502                                  ADVERTISE_1000XPAUSE |
5503                                  ADVERTISE_1000XPSE_ASYM |
5504                                  ADVERTISE_SLCT);
5505
5506                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5507                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5508
5509                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5510                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5511                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5512                         tg3_writephy(tp, MII_BMCR, bmcr);
5513
5514                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5515                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5516                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5517
5518                         return err;
5519                 }
5520         } else {
5521                 u32 new_bmcr;
5522
5523                 bmcr &= ~BMCR_SPEED1000;
5524                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5525
5526                 if (tp->link_config.duplex == DUPLEX_FULL)
5527                         new_bmcr |= BMCR_FULLDPLX;
5528
5529                 if (new_bmcr != bmcr) {
5530                         /* BMCR_SPEED1000 is a reserved bit that needs
5531                          * to be set on write.
5532                          */
5533                         new_bmcr |= BMCR_SPEED1000;
5534
5535                         /* Force a linkdown */
5536                         if (tp->link_up) {
5537                                 u32 adv;
5538
5539                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5540                                 adv &= ~(ADVERTISE_1000XFULL |
5541                                          ADVERTISE_1000XHALF |
5542                                          ADVERTISE_SLCT);
5543                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5544                                 tg3_writephy(tp, MII_BMCR, bmcr |
5545                                                            BMCR_ANRESTART |
5546                                                            BMCR_ANENABLE);
5547                                 udelay(10);
5548                                 tg3_carrier_off(tp);
5549                         }
5550                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5551                         bmcr = new_bmcr;
5552                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5553                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5554                         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5555                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5556                                         bmsr |= BMSR_LSTATUS;
5557                                 else
5558                                         bmsr &= ~BMSR_LSTATUS;
5559                         }
5560                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5561                 }
5562         }
5563
5564         if (bmsr & BMSR_LSTATUS) {
5565                 current_speed = SPEED_1000;
5566                 current_link_up = 1;
5567                 if (bmcr & BMCR_FULLDPLX)
5568                         current_duplex = DUPLEX_FULL;
5569                 else
5570                         current_duplex = DUPLEX_HALF;
5571
5572                 local_adv = 0;
5573                 remote_adv = 0;
5574
5575                 if (bmcr & BMCR_ANENABLE) {
5576                         u32 common;
5577
5578                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5579                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5580                         common = local_adv & remote_adv;
5581                         if (common & (ADVERTISE_1000XHALF |
5582                                       ADVERTISE_1000XFULL)) {
5583                                 if (common & ADVERTISE_1000XFULL)
5584                                         current_duplex = DUPLEX_FULL;
5585                                 else
5586                                         current_duplex = DUPLEX_HALF;
5587
5588                                 tp->link_config.rmt_adv =
5589                                            mii_adv_to_ethtool_adv_x(remote_adv);
5590                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5591                                 /* Link is up via parallel detect */
5592                         } else {
5593                                 current_link_up = 0;
5594                         }
5595                 }
5596         }
5597
5598         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5599                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5600
5601         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5602         if (tp->link_config.active_duplex == DUPLEX_HALF)
5603                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5604
5605         tw32_f(MAC_MODE, tp->mac_mode);
5606         udelay(40);
5607
5608         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5609
5610         tp->link_config.active_speed = current_speed;
5611         tp->link_config.active_duplex = current_duplex;
5612
5613         tg3_test_and_report_link_chg(tp, current_link_up);
5614         return err;
5615 }
5616
5617 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5618 {
5619         if (tp->serdes_counter) {
5620                 /* Give autoneg time to complete. */
5621                 tp->serdes_counter--;
5622                 return;
5623         }
5624
5625         if (!tp->link_up &&
5626             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5627                 u32 bmcr;
5628
5629                 tg3_readphy(tp, MII_BMCR, &bmcr);
5630                 if (bmcr & BMCR_ANENABLE) {
5631                         u32 phy1, phy2;
5632
5633                         /* Select shadow register 0x1f */
5634                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5635                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5636
5637                         /* Select expansion interrupt status register */
5638                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5639                                          MII_TG3_DSP_EXP1_INT_STAT);
5640                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5641                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5642
5643                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5644                                 /* We have signal detect and not receiving
5645                                  * config code words, link is up by parallel
5646                                  * detection.
5647                                  */
5648
5649                                 bmcr &= ~BMCR_ANENABLE;
5650                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5651                                 tg3_writephy(tp, MII_BMCR, bmcr);
5652                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5653                         }
5654                 }
5655         } else if (tp->link_up &&
5656                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5657                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5658                 u32 phy2;
5659
5660                 /* Select expansion interrupt status register */
5661                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5662                                  MII_TG3_DSP_EXP1_INT_STAT);
5663                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5664                 if (phy2 & 0x20) {
5665                         u32 bmcr;
5666
5667                         /* Config code words received, turn on autoneg. */
5668                         tg3_readphy(tp, MII_BMCR, &bmcr);
5669                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5670
5671                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5672
5673                 }
5674         }
5675 }
5676
5677 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5678 {
5679         u32 val;
5680         int err;
5681
5682         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5683                 err = tg3_setup_fiber_phy(tp, force_reset);
5684         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5685                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5686         else
5687                 err = tg3_setup_copper_phy(tp, force_reset);
5688
5689         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
5690                 u32 scale;
5691
5692                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5693                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5694                         scale = 65;
5695                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5696                         scale = 6;
5697                 else
5698                         scale = 12;
5699
5700                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5701                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5702                 tw32(GRC_MISC_CFG, val);
5703         }
5704
5705         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5706               (6 << TX_LENGTHS_IPG_SHIFT);
5707         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
5708             tg3_asic_rev(tp) == ASIC_REV_5762)
5709                 val |= tr32(MAC_TX_LENGTHS) &
5710                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5711                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5712
5713         if (tp->link_config.active_speed == SPEED_1000 &&
5714             tp->link_config.active_duplex == DUPLEX_HALF)
5715                 tw32(MAC_TX_LENGTHS, val |
5716                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5717         else
5718                 tw32(MAC_TX_LENGTHS, val |
5719                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5720
5721         if (!tg3_flag(tp, 5705_PLUS)) {
5722                 if (tp->link_up) {
5723                         tw32(HOSTCC_STAT_COAL_TICKS,
5724                              tp->coal.stats_block_coalesce_usecs);
5725                 } else {
5726                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5727                 }
5728         }
5729
5730         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5731                 val = tr32(PCIE_PWR_MGMT_THRESH);
5732                 if (!tp->link_up)
5733                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5734                               tp->pwrmgmt_thresh;
5735                 else
5736                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5737                 tw32(PCIE_PWR_MGMT_THRESH, val);
5738         }
5739
5740         return err;
5741 }
5742
5743 /* tp->lock must be held */
5744 static u64 tg3_refclk_read(struct tg3 *tp)
5745 {
5746         u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
5747         return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
5748 }
5749
5750 /* tp->lock must be held */
5751 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
5752 {
5753         tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
5754         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
5755         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
5756         tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
5757 }
5758
5759 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
5760 static inline void tg3_full_unlock(struct tg3 *tp);
5761 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
5762 {
5763         struct tg3 *tp = netdev_priv(dev);
5764
5765         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5766                                 SOF_TIMESTAMPING_RX_SOFTWARE |
5767                                 SOF_TIMESTAMPING_SOFTWARE    |
5768                                 SOF_TIMESTAMPING_TX_HARDWARE |
5769                                 SOF_TIMESTAMPING_RX_HARDWARE |
5770                                 SOF_TIMESTAMPING_RAW_HARDWARE;
5771
5772         if (tp->ptp_clock)
5773                 info->phc_index = ptp_clock_index(tp->ptp_clock);
5774         else
5775                 info->phc_index = -1;
5776
5777         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
5778
5779         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
5780                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
5781                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
5782                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
5783         return 0;
5784 }
5785
5786 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
5787 {
5788         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5789         bool neg_adj = false;
5790         u32 correction = 0;
5791
5792         if (ppb < 0) {
5793                 neg_adj = true;
5794                 ppb = -ppb;
5795         }
5796
5797         /* Frequency adjustment is performed using hardware with a 24 bit
5798          * accumulator and a programmable correction value. On each clk, the
5799          * correction value gets added to the accumulator and when it
5800          * overflows, the time counter is incremented/decremented.
5801          *
5802          * So conversion from ppb to correction value is
5803          *              ppb * (1 << 24) / 1000000000
5804          */
5805         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
5806                      TG3_EAV_REF_CLK_CORRECT_MASK;
5807
5808         tg3_full_lock(tp, 0);
5809
5810         if (correction)
5811                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
5812                      TG3_EAV_REF_CLK_CORRECT_EN |
5813                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
5814         else
5815                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
5816
5817         tg3_full_unlock(tp);
5818
5819         return 0;
5820 }
5821
5822 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
5823 {
5824         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5825
5826         tg3_full_lock(tp, 0);
5827         tp->ptp_adjust += delta;
5828         tg3_full_unlock(tp);
5829
5830         return 0;
5831 }
5832
5833 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
5834 {
5835         u64 ns;
5836         u32 remainder;
5837         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5838
5839         tg3_full_lock(tp, 0);
5840         ns = tg3_refclk_read(tp);
5841         ns += tp->ptp_adjust;
5842         tg3_full_unlock(tp);
5843
5844         ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
5845         ts->tv_nsec = remainder;
5846
5847         return 0;
5848 }
5849
5850 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
5851                            const struct timespec *ts)
5852 {
5853         u64 ns;
5854         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5855
5856         ns = timespec_to_ns(ts);
5857
5858         tg3_full_lock(tp, 0);
5859         tg3_refclk_write(tp, ns);
5860         tp->ptp_adjust = 0;
5861         tg3_full_unlock(tp);
5862
5863         return 0;
5864 }
5865
5866 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
5867                           struct ptp_clock_request *rq, int on)
5868 {
5869         return -EOPNOTSUPP;
5870 }
5871
5872 static const struct ptp_clock_info tg3_ptp_caps = {
5873         .owner          = THIS_MODULE,
5874         .name           = "tg3 clock",
5875         .max_adj        = 250000000,
5876         .n_alarm        = 0,
5877         .n_ext_ts       = 0,
5878         .n_per_out      = 0,
5879         .pps            = 0,
5880         .adjfreq        = tg3_ptp_adjfreq,
5881         .adjtime        = tg3_ptp_adjtime,
5882         .gettime        = tg3_ptp_gettime,
5883         .settime        = tg3_ptp_settime,
5884         .enable         = tg3_ptp_enable,
5885 };
5886
5887 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
5888                                      struct skb_shared_hwtstamps *timestamp)
5889 {
5890         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
5891         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
5892                                            tp->ptp_adjust);
5893 }
5894
5895 /* tp->lock must be held */
5896 static void tg3_ptp_init(struct tg3 *tp)
5897 {
5898         if (!tg3_flag(tp, PTP_CAPABLE))
5899                 return;
5900
5901         /* Initialize the hardware clock to the system time. */
5902         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
5903         tp->ptp_adjust = 0;
5904         tp->ptp_info = tg3_ptp_caps;
5905 }
5906
5907 /* tp->lock must be held */
5908 static void tg3_ptp_resume(struct tg3 *tp)
5909 {
5910         if (!tg3_flag(tp, PTP_CAPABLE))
5911                 return;
5912
5913         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
5914         tp->ptp_adjust = 0;
5915 }
5916
5917 static void tg3_ptp_fini(struct tg3 *tp)
5918 {
5919         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
5920                 return;
5921
5922         ptp_clock_unregister(tp->ptp_clock);
5923         tp->ptp_clock = NULL;
5924         tp->ptp_adjust = 0;
5925 }
5926
5927 static inline int tg3_irq_sync(struct tg3 *tp)
5928 {
5929         return tp->irq_sync;
5930 }
5931
5932 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5933 {
5934         int i;
5935
5936         dst = (u32 *)((u8 *)dst + off);
5937         for (i = 0; i < len; i += sizeof(u32))
5938                 *dst++ = tr32(off + i);
5939 }
5940
5941 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5942 {
5943         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5944         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5945         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5946         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5947         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5948         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5949         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5950         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5951         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5952         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5953         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5954         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5955         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5956         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5957         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5958         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5959         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5960         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5961         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5962
5963         if (tg3_flag(tp, SUPPORT_MSIX))
5964                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5965
5966         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5967         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5968         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5969         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5970         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5971         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5972         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5973         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5974
5975         if (!tg3_flag(tp, 5705_PLUS)) {
5976                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5977                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5978                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5979         }
5980
5981         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5982         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5983         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5984         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5985         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5986
5987         if (tg3_flag(tp, NVRAM))
5988                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5989 }
5990
5991 static void tg3_dump_state(struct tg3 *tp)
5992 {
5993         int i;
5994         u32 *regs;
5995
5996         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5997         if (!regs)
5998                 return;
5999
6000         if (tg3_flag(tp, PCI_EXPRESS)) {
6001                 /* Read up to but not including private PCI registers */
6002                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6003                         regs[i / sizeof(u32)] = tr32(i);
6004         } else
6005                 tg3_dump_legacy_regs(tp, regs);
6006
6007         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6008                 if (!regs[i + 0] && !regs[i + 1] &&
6009                     !regs[i + 2] && !regs[i + 3])
6010                         continue;
6011
6012                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6013                            i * 4,
6014                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6015         }
6016
6017         kfree(regs);
6018
6019         for (i = 0; i < tp->irq_cnt; i++) {
6020                 struct tg3_napi *tnapi = &tp->napi[i];
6021
6022                 /* SW status block */
6023                 netdev_err(tp->dev,
6024                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6025                            i,
6026                            tnapi->hw_status->status,
6027                            tnapi->hw_status->status_tag,
6028                            tnapi->hw_status->rx_jumbo_consumer,
6029                            tnapi->hw_status->rx_consumer,
6030                            tnapi->hw_status->rx_mini_consumer,
6031                            tnapi->hw_status->idx[0].rx_producer,
6032                            tnapi->hw_status->idx[0].tx_consumer);
6033
6034                 netdev_err(tp->dev,
6035                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6036                            i,
6037                            tnapi->last_tag, tnapi->last_irq_tag,
6038                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6039                            tnapi->rx_rcb_ptr,
6040                            tnapi->prodring.rx_std_prod_idx,
6041                            tnapi->prodring.rx_std_cons_idx,
6042                            tnapi->prodring.rx_jmb_prod_idx,
6043                            tnapi->prodring.rx_jmb_cons_idx);
6044         }
6045 }
6046
6047 /* This is called whenever we suspect that the system chipset is re-
6048  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6049  * is bogus tx completions. We try to recover by setting the
6050  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6051  * in the workqueue.
6052  */
6053 static void tg3_tx_recover(struct tg3 *tp)
6054 {
6055         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6056                tp->write32_tx_mbox == tg3_write_indirect_mbox);
6057
6058         netdev_warn(tp->dev,
6059                     "The system may be re-ordering memory-mapped I/O "
6060                     "cycles to the network device, attempting to recover. "
6061                     "Please report the problem to the driver maintainer "
6062                     "and include system chipset information.\n");
6063
6064         spin_lock(&tp->lock);
6065         tg3_flag_set(tp, TX_RECOVERY_PENDING);
6066         spin_unlock(&tp->lock);
6067 }
6068
6069 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6070 {
6071         /* Tell compiler to fetch tx indices from memory. */
6072         barrier();
6073         return tnapi->tx_pending -
6074                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6075 }
6076
6077 /* Tigon3 never reports partial packet sends.  So we do not
6078  * need special logic to handle SKBs that have not had all
6079  * of their frags sent yet, like SunGEM does.
6080  */
6081 static void tg3_tx(struct tg3_napi *tnapi)
6082 {
6083         struct tg3 *tp = tnapi->tp;
6084         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6085         u32 sw_idx = tnapi->tx_cons;
6086         struct netdev_queue *txq;
6087         int index = tnapi - tp->napi;
6088         unsigned int pkts_compl = 0, bytes_compl = 0;
6089
6090         if (tg3_flag(tp, ENABLE_TSS))
6091                 index--;
6092
6093         txq = netdev_get_tx_queue(tp->dev, index);
6094
6095         while (sw_idx != hw_idx) {
6096                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6097                 struct sk_buff *skb = ri->skb;
6098                 int i, tx_bug = 0;
6099
6100                 if (unlikely(skb == NULL)) {
6101                         tg3_tx_recover(tp);
6102                         return;
6103                 }
6104
6105                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6106                         struct skb_shared_hwtstamps timestamp;
6107                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6108                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6109
6110                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6111
6112                         skb_tstamp_tx(skb, &timestamp);
6113                 }
6114
6115                 pci_unmap_single(tp->pdev,
6116                                  dma_unmap_addr(ri, mapping),
6117                                  skb_headlen(skb),
6118                                  PCI_DMA_TODEVICE);
6119
6120                 ri->skb = NULL;
6121
6122                 while (ri->fragmented) {
6123                         ri->fragmented = false;
6124                         sw_idx = NEXT_TX(sw_idx);
6125                         ri = &tnapi->tx_buffers[sw_idx];
6126                 }
6127
6128                 sw_idx = NEXT_TX(sw_idx);
6129
6130                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6131                         ri = &tnapi->tx_buffers[sw_idx];
6132                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6133                                 tx_bug = 1;
6134
6135                         pci_unmap_page(tp->pdev,
6136                                        dma_unmap_addr(ri, mapping),
6137                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
6138                                        PCI_DMA_TODEVICE);
6139
6140                         while (ri->fragmented) {
6141                                 ri->fragmented = false;
6142                                 sw_idx = NEXT_TX(sw_idx);
6143                                 ri = &tnapi->tx_buffers[sw_idx];
6144                         }
6145
6146                         sw_idx = NEXT_TX(sw_idx);
6147                 }
6148
6149                 pkts_compl++;
6150                 bytes_compl += skb->len;
6151
6152                 dev_kfree_skb(skb);
6153
6154                 if (unlikely(tx_bug)) {
6155                         tg3_tx_recover(tp);
6156                         return;
6157                 }
6158         }
6159
6160         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6161
6162         tnapi->tx_cons = sw_idx;
6163
6164         /* Need to make the tx_cons update visible to tg3_start_xmit()
6165          * before checking for netif_queue_stopped().  Without the
6166          * memory barrier, there is a small possibility that tg3_start_xmit()
6167          * will miss it and cause the queue to be stopped forever.
6168          */
6169         smp_mb();
6170
6171         if (unlikely(netif_tx_queue_stopped(txq) &&
6172                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6173                 __netif_tx_lock(txq, smp_processor_id());
6174                 if (netif_tx_queue_stopped(txq) &&
6175                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6176                         netif_tx_wake_queue(txq);
6177                 __netif_tx_unlock(txq);
6178         }
6179 }
6180
6181 static void tg3_frag_free(bool is_frag, void *data)
6182 {
6183         if (is_frag)
6184                 put_page(virt_to_head_page(data));
6185         else
6186                 kfree(data);
6187 }
6188
6189 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6190 {
6191         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6192                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6193
6194         if (!ri->data)
6195                 return;
6196
6197         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6198                          map_sz, PCI_DMA_FROMDEVICE);
6199         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6200         ri->data = NULL;
6201 }
6202
6203
6204 /* Returns size of skb allocated or < 0 on error.
6205  *
6206  * We only need to fill in the address because the other members
6207  * of the RX descriptor are invariant, see tg3_init_rings.
6208  *
6209  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6210  * posting buffers we only dirty the first cache line of the RX
6211  * descriptor (containing the address).  Whereas for the RX status
6212  * buffers the cpu only reads the last cacheline of the RX descriptor
6213  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6214  */
6215 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6216                              u32 opaque_key, u32 dest_idx_unmasked,
6217                              unsigned int *frag_size)
6218 {
6219         struct tg3_rx_buffer_desc *desc;
6220         struct ring_info *map;
6221         u8 *data;
6222         dma_addr_t mapping;
6223         int skb_size, data_size, dest_idx;
6224
6225         switch (opaque_key) {
6226         case RXD_OPAQUE_RING_STD:
6227                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6228                 desc = &tpr->rx_std[dest_idx];
6229                 map = &tpr->rx_std_buffers[dest_idx];
6230                 data_size = tp->rx_pkt_map_sz;
6231                 break;
6232
6233         case RXD_OPAQUE_RING_JUMBO:
6234                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6235                 desc = &tpr->rx_jmb[dest_idx].std;
6236                 map = &tpr->rx_jmb_buffers[dest_idx];
6237                 data_size = TG3_RX_JMB_MAP_SZ;
6238                 break;
6239
6240         default:
6241                 return -EINVAL;
6242         }
6243
6244         /* Do not overwrite any of the map or rp information
6245          * until we are sure we can commit to a new buffer.
6246          *
6247          * Callers depend upon this behavior and assume that
6248          * we leave everything unchanged if we fail.
6249          */
6250         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6251                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6252         if (skb_size <= PAGE_SIZE) {
6253                 data = netdev_alloc_frag(skb_size);
6254                 *frag_size = skb_size;
6255         } else {
6256                 data = kmalloc(skb_size, GFP_ATOMIC);
6257                 *frag_size = 0;
6258         }
6259         if (!data)
6260                 return -ENOMEM;
6261
6262         mapping = pci_map_single(tp->pdev,
6263                                  data + TG3_RX_OFFSET(tp),
6264                                  data_size,
6265                                  PCI_DMA_FROMDEVICE);
6266         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6267                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6268                 return -EIO;
6269         }
6270
6271         map->data = data;
6272         dma_unmap_addr_set(map, mapping, mapping);
6273
6274         desc->addr_hi = ((u64)mapping >> 32);
6275         desc->addr_lo = ((u64)mapping & 0xffffffff);
6276
6277         return data_size;
6278 }
6279
6280 /* We only need to move over in the address because the other
6281  * members of the RX descriptor are invariant.  See notes above
6282  * tg3_alloc_rx_data for full details.
6283  */
6284 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6285                            struct tg3_rx_prodring_set *dpr,
6286                            u32 opaque_key, int src_idx,
6287                            u32 dest_idx_unmasked)
6288 {
6289         struct tg3 *tp = tnapi->tp;
6290         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6291         struct ring_info *src_map, *dest_map;
6292         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6293         int dest_idx;
6294
6295         switch (opaque_key) {
6296         case RXD_OPAQUE_RING_STD:
6297                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6298                 dest_desc = &dpr->rx_std[dest_idx];
6299                 dest_map = &dpr->rx_std_buffers[dest_idx];
6300                 src_desc = &spr->rx_std[src_idx];
6301                 src_map = &spr->rx_std_buffers[src_idx];
6302                 break;
6303
6304         case RXD_OPAQUE_RING_JUMBO:
6305                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6306                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6307                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6308                 src_desc = &spr->rx_jmb[src_idx].std;
6309                 src_map = &spr->rx_jmb_buffers[src_idx];
6310                 break;
6311
6312         default:
6313                 return;
6314         }
6315
6316         dest_map->data = src_map->data;
6317         dma_unmap_addr_set(dest_map, mapping,
6318                            dma_unmap_addr(src_map, mapping));
6319         dest_desc->addr_hi = src_desc->addr_hi;
6320         dest_desc->addr_lo = src_desc->addr_lo;
6321
6322         /* Ensure that the update to the skb happens after the physical
6323          * addresses have been transferred to the new BD location.
6324          */
6325         smp_wmb();
6326
6327         src_map->data = NULL;
6328 }
6329
6330 /* The RX ring scheme is composed of multiple rings which post fresh
6331  * buffers to the chip, and one special ring the chip uses to report
6332  * status back to the host.
6333  *
6334  * The special ring reports the status of received packets to the
6335  * host.  The chip does not write into the original descriptor the
6336  * RX buffer was obtained from.  The chip simply takes the original
6337  * descriptor as provided by the host, updates the status and length
6338  * field, then writes this into the next status ring entry.
6339  *
6340  * Each ring the host uses to post buffers to the chip is described
6341  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6342  * it is first placed into the on-chip ram.  When the packet's length
6343  * is known, it walks down the TG3_BDINFO entries to select the ring.
6344  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6345  * which is within the range of the new packet's length is chosen.
6346  *
6347  * The "separate ring for rx status" scheme may sound queer, but it makes
6348  * sense from a cache coherency perspective.  If only the host writes
6349  * to the buffer post rings, and only the chip writes to the rx status
6350  * rings, then cache lines never move beyond shared-modified state.
6351  * If both the host and chip were to write into the same ring, cache line
6352  * eviction could occur since both entities want it in an exclusive state.
6353  */
6354 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6355 {
6356         struct tg3 *tp = tnapi->tp;
6357         u32 work_mask, rx_std_posted = 0;
6358         u32 std_prod_idx, jmb_prod_idx;
6359         u32 sw_idx = tnapi->rx_rcb_ptr;
6360         u16 hw_idx;
6361         int received;
6362         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6363
6364         hw_idx = *(tnapi->rx_rcb_prod_idx);
6365         /*
6366          * We need to order the read of hw_idx and the read of
6367          * the opaque cookie.
6368          */
6369         rmb();
6370         work_mask = 0;
6371         received = 0;
6372         std_prod_idx = tpr->rx_std_prod_idx;
6373         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6374         while (sw_idx != hw_idx && budget > 0) {
6375                 struct ring_info *ri;
6376                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6377                 unsigned int len;
6378                 struct sk_buff *skb;
6379                 dma_addr_t dma_addr;
6380                 u32 opaque_key, desc_idx, *post_ptr;
6381                 u8 *data;
6382                 u64 tstamp = 0;
6383
6384                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6385                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6386                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6387                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6388                         dma_addr = dma_unmap_addr(ri, mapping);
6389                         data = ri->data;
6390                         post_ptr = &std_prod_idx;
6391                         rx_std_posted++;
6392                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6393                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6394                         dma_addr = dma_unmap_addr(ri, mapping);
6395                         data = ri->data;
6396                         post_ptr = &jmb_prod_idx;
6397                 } else
6398                         goto next_pkt_nopost;
6399
6400                 work_mask |= opaque_key;
6401
6402                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6403                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6404                 drop_it:
6405                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6406                                        desc_idx, *post_ptr);
6407                 drop_it_no_recycle:
6408                         /* Other statistics kept track of by card. */
6409                         tp->rx_dropped++;
6410                         goto next_pkt;
6411                 }
6412
6413                 prefetch(data + TG3_RX_OFFSET(tp));
6414                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6415                       ETH_FCS_LEN;
6416
6417                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6418                      RXD_FLAG_PTPSTAT_PTPV1 ||
6419                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6420                      RXD_FLAG_PTPSTAT_PTPV2) {
6421                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6422                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6423                 }
6424
6425                 if (len > TG3_RX_COPY_THRESH(tp)) {
6426                         int skb_size;
6427                         unsigned int frag_size;
6428
6429                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6430                                                     *post_ptr, &frag_size);
6431                         if (skb_size < 0)
6432                                 goto drop_it;
6433
6434                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6435                                          PCI_DMA_FROMDEVICE);
6436
6437                         skb = build_skb(data, frag_size);
6438                         if (!skb) {
6439                                 tg3_frag_free(frag_size != 0, data);
6440                                 goto drop_it_no_recycle;
6441                         }
6442                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6443                         /* Ensure that the update to the data happens
6444                          * after the usage of the old DMA mapping.
6445                          */
6446                         smp_wmb();
6447
6448                         ri->data = NULL;
6449
6450                 } else {
6451                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6452                                        desc_idx, *post_ptr);
6453
6454                         skb = netdev_alloc_skb(tp->dev,
6455                                                len + TG3_RAW_IP_ALIGN);
6456                         if (skb == NULL)
6457                                 goto drop_it_no_recycle;
6458
6459                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6460                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6461                         memcpy(skb->data,
6462                                data + TG3_RX_OFFSET(tp),
6463                                len);
6464                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6465                 }
6466
6467                 skb_put(skb, len);
6468                 if (tstamp)
6469                         tg3_hwclock_to_timestamp(tp, tstamp,
6470                                                  skb_hwtstamps(skb));
6471
6472                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6473                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6474                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6475                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6476                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6477                 else
6478                         skb_checksum_none_assert(skb);
6479
6480                 skb->protocol = eth_type_trans(skb, tp->dev);
6481
6482                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6483                     skb->protocol != htons(ETH_P_8021Q)) {
6484                         dev_kfree_skb(skb);
6485                         goto drop_it_no_recycle;
6486                 }
6487
6488                 if (desc->type_flags & RXD_FLAG_VLAN &&
6489                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6490                         __vlan_hwaccel_put_tag(skb,
6491                                                desc->err_vlan & RXD_VLAN_MASK);
6492
6493                 napi_gro_receive(&tnapi->napi, skb);
6494
6495                 received++;
6496                 budget--;
6497
6498 next_pkt:
6499                 (*post_ptr)++;
6500
6501                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6502                         tpr->rx_std_prod_idx = std_prod_idx &
6503                                                tp->rx_std_ring_mask;
6504                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6505                                      tpr->rx_std_prod_idx);
6506                         work_mask &= ~RXD_OPAQUE_RING_STD;
6507                         rx_std_posted = 0;
6508                 }
6509 next_pkt_nopost:
6510                 sw_idx++;
6511                 sw_idx &= tp->rx_ret_ring_mask;
6512
6513                 /* Refresh hw_idx to see if there is new work */
6514                 if (sw_idx == hw_idx) {
6515                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6516                         rmb();
6517                 }
6518         }
6519
6520         /* ACK the status ring. */
6521         tnapi->rx_rcb_ptr = sw_idx;
6522         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6523
6524         /* Refill RX ring(s). */
6525         if (!tg3_flag(tp, ENABLE_RSS)) {
6526                 /* Sync BD data before updating mailbox */
6527                 wmb();
6528
6529                 if (work_mask & RXD_OPAQUE_RING_STD) {
6530                         tpr->rx_std_prod_idx = std_prod_idx &
6531                                                tp->rx_std_ring_mask;
6532                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6533                                      tpr->rx_std_prod_idx);
6534                 }
6535                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6536                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6537                                                tp->rx_jmb_ring_mask;
6538                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6539                                      tpr->rx_jmb_prod_idx);
6540                 }
6541                 mmiowb();
6542         } else if (work_mask) {
6543                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6544                  * updated before the producer indices can be updated.
6545                  */
6546                 smp_wmb();
6547
6548                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6549                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6550
6551                 if (tnapi != &tp->napi[1]) {
6552                         tp->rx_refill = true;
6553                         napi_schedule(&tp->napi[1].napi);
6554                 }
6555         }
6556
6557         return received;
6558 }
6559
6560 static void tg3_poll_link(struct tg3 *tp)
6561 {
6562         /* handle link change and other phy events */
6563         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6564                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6565
6566                 if (sblk->status & SD_STATUS_LINK_CHG) {
6567                         sblk->status = SD_STATUS_UPDATED |
6568                                        (sblk->status & ~SD_STATUS_LINK_CHG);
6569                         spin_lock(&tp->lock);
6570                         if (tg3_flag(tp, USE_PHYLIB)) {
6571                                 tw32_f(MAC_STATUS,
6572                                      (MAC_STATUS_SYNC_CHANGED |
6573                                       MAC_STATUS_CFG_CHANGED |
6574                                       MAC_STATUS_MI_COMPLETION |
6575                                       MAC_STATUS_LNKSTATE_CHANGED));
6576                                 udelay(40);
6577                         } else
6578                                 tg3_setup_phy(tp, 0);
6579                         spin_unlock(&tp->lock);
6580                 }
6581         }
6582 }
6583
6584 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6585                                 struct tg3_rx_prodring_set *dpr,
6586                                 struct tg3_rx_prodring_set *spr)
6587 {
6588         u32 si, di, cpycnt, src_prod_idx;
6589         int i, err = 0;
6590
6591         while (1) {
6592                 src_prod_idx = spr->rx_std_prod_idx;
6593
6594                 /* Make sure updates to the rx_std_buffers[] entries and the
6595                  * standard producer index are seen in the correct order.
6596                  */
6597                 smp_rmb();
6598
6599                 if (spr->rx_std_cons_idx == src_prod_idx)
6600                         break;
6601
6602                 if (spr->rx_std_cons_idx < src_prod_idx)
6603                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6604                 else
6605                         cpycnt = tp->rx_std_ring_mask + 1 -
6606                                  spr->rx_std_cons_idx;
6607
6608                 cpycnt = min(cpycnt,
6609                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6610
6611                 si = spr->rx_std_cons_idx;
6612                 di = dpr->rx_std_prod_idx;
6613
6614                 for (i = di; i < di + cpycnt; i++) {
6615                         if (dpr->rx_std_buffers[i].data) {
6616                                 cpycnt = i - di;
6617                                 err = -ENOSPC;
6618                                 break;
6619                         }
6620                 }
6621
6622                 if (!cpycnt)
6623                         break;
6624
6625                 /* Ensure that updates to the rx_std_buffers ring and the
6626                  * shadowed hardware producer ring from tg3_recycle_skb() are
6627                  * ordered correctly WRT the skb check above.
6628                  */
6629                 smp_rmb();
6630
6631                 memcpy(&dpr->rx_std_buffers[di],
6632                        &spr->rx_std_buffers[si],
6633                        cpycnt * sizeof(struct ring_info));
6634
6635                 for (i = 0; i < cpycnt; i++, di++, si++) {
6636                         struct tg3_rx_buffer_desc *sbd, *dbd;
6637                         sbd = &spr->rx_std[si];
6638                         dbd = &dpr->rx_std[di];
6639                         dbd->addr_hi = sbd->addr_hi;
6640                         dbd->addr_lo = sbd->addr_lo;
6641                 }
6642
6643                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6644                                        tp->rx_std_ring_mask;
6645                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6646                                        tp->rx_std_ring_mask;
6647         }
6648
6649         while (1) {
6650                 src_prod_idx = spr->rx_jmb_prod_idx;
6651
6652                 /* Make sure updates to the rx_jmb_buffers[] entries and
6653                  * the jumbo producer index are seen in the correct order.
6654                  */
6655                 smp_rmb();
6656
6657                 if (spr->rx_jmb_cons_idx == src_prod_idx)
6658                         break;
6659
6660                 if (spr->rx_jmb_cons_idx < src_prod_idx)
6661                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6662                 else
6663                         cpycnt = tp->rx_jmb_ring_mask + 1 -
6664                                  spr->rx_jmb_cons_idx;
6665
6666                 cpycnt = min(cpycnt,
6667                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6668
6669                 si = spr->rx_jmb_cons_idx;
6670                 di = dpr->rx_jmb_prod_idx;
6671
6672                 for (i = di; i < di + cpycnt; i++) {
6673                         if (dpr->rx_jmb_buffers[i].data) {
6674                                 cpycnt = i - di;
6675                                 err = -ENOSPC;
6676                                 break;
6677                         }
6678                 }
6679
6680                 if (!cpycnt)
6681                         break;
6682
6683                 /* Ensure that updates to the rx_jmb_buffers ring and the
6684                  * shadowed hardware producer ring from tg3_recycle_skb() are
6685                  * ordered correctly WRT the skb check above.
6686                  */
6687                 smp_rmb();
6688
6689                 memcpy(&dpr->rx_jmb_buffers[di],
6690                        &spr->rx_jmb_buffers[si],
6691                        cpycnt * sizeof(struct ring_info));
6692
6693                 for (i = 0; i < cpycnt; i++, di++, si++) {
6694                         struct tg3_rx_buffer_desc *sbd, *dbd;
6695                         sbd = &spr->rx_jmb[si].std;
6696                         dbd = &dpr->rx_jmb[di].std;
6697                         dbd->addr_hi = sbd->addr_hi;
6698                         dbd->addr_lo = sbd->addr_lo;
6699                 }
6700
6701                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6702                                        tp->rx_jmb_ring_mask;
6703                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6704                                        tp->rx_jmb_ring_mask;
6705         }
6706
6707         return err;
6708 }
6709
6710 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6711 {
6712         struct tg3 *tp = tnapi->tp;
6713
6714         /* run TX completion thread */
6715         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6716                 tg3_tx(tnapi);
6717                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6718                         return work_done;
6719         }
6720
6721         if (!tnapi->rx_rcb_prod_idx)
6722                 return work_done;
6723
6724         /* run RX thread, within the bounds set by NAPI.
6725          * All RX "locking" is done by ensuring outside
6726          * code synchronizes with tg3->napi.poll()
6727          */
6728         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6729                 work_done += tg3_rx(tnapi, budget - work_done);
6730
6731         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6732                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6733                 int i, err = 0;
6734                 u32 std_prod_idx = dpr->rx_std_prod_idx;
6735                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6736
6737                 tp->rx_refill = false;
6738                 for (i = 1; i <= tp->rxq_cnt; i++)
6739                         err |= tg3_rx_prodring_xfer(tp, dpr,
6740                                                     &tp->napi[i].prodring);
6741
6742                 wmb();
6743
6744                 if (std_prod_idx != dpr->rx_std_prod_idx)
6745                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6746                                      dpr->rx_std_prod_idx);
6747
6748                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6749                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6750                                      dpr->rx_jmb_prod_idx);
6751
6752                 mmiowb();
6753
6754                 if (err)
6755                         tw32_f(HOSTCC_MODE, tp->coal_now);
6756         }
6757
6758         return work_done;
6759 }
6760
6761 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6762 {
6763         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6764                 schedule_work(&tp->reset_task);
6765 }
6766
6767 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6768 {
6769         cancel_work_sync(&tp->reset_task);
6770         tg3_flag_clear(tp, RESET_TASK_PENDING);
6771         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6772 }
6773
6774 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6775 {
6776         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6777         struct tg3 *tp = tnapi->tp;
6778         int work_done = 0;
6779         struct tg3_hw_status *sblk = tnapi->hw_status;
6780
6781         while (1) {
6782                 work_done = tg3_poll_work(tnapi, work_done, budget);
6783
6784                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6785                         goto tx_recovery;
6786
6787                 if (unlikely(work_done >= budget))
6788                         break;
6789
6790                 /* tp->last_tag is used in tg3_int_reenable() below
6791                  * to tell the hw how much work has been processed,
6792                  * so we must read it before checking for more work.
6793                  */
6794                 tnapi->last_tag = sblk->status_tag;
6795                 tnapi->last_irq_tag = tnapi->last_tag;
6796                 rmb();
6797
6798                 /* check for RX/TX work to do */
6799                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6800                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6801
6802                         /* This test here is not race free, but will reduce
6803                          * the number of interrupts by looping again.
6804                          */
6805                         if (tnapi == &tp->napi[1] && tp->rx_refill)
6806                                 continue;
6807
6808                         napi_complete(napi);
6809                         /* Reenable interrupts. */
6810                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6811
6812                         /* This test here is synchronized by napi_schedule()
6813                          * and napi_complete() to close the race condition.
6814                          */
6815                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6816                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
6817                                                   HOSTCC_MODE_ENABLE |
6818                                                   tnapi->coal_now);
6819                         }
6820                         mmiowb();
6821                         break;
6822                 }
6823         }
6824
6825         return work_done;
6826
6827 tx_recovery:
6828         /* work_done is guaranteed to be less than budget. */
6829         napi_complete(napi);
6830         tg3_reset_task_schedule(tp);
6831         return work_done;
6832 }
6833
6834 static void tg3_process_error(struct tg3 *tp)
6835 {
6836         u32 val;
6837         bool real_error = false;
6838
6839         if (tg3_flag(tp, ERROR_PROCESSED))
6840                 return;
6841
6842         /* Check Flow Attention register */
6843         val = tr32(HOSTCC_FLOW_ATTN);
6844         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6845                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6846                 real_error = true;
6847         }
6848
6849         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6850                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6851                 real_error = true;
6852         }
6853
6854         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6855                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6856                 real_error = true;
6857         }
6858
6859         if (!real_error)
6860                 return;
6861
6862         tg3_dump_state(tp);
6863
6864         tg3_flag_set(tp, ERROR_PROCESSED);
6865         tg3_reset_task_schedule(tp);
6866 }
6867
6868 static int tg3_poll(struct napi_struct *napi, int budget)
6869 {
6870         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6871         struct tg3 *tp = tnapi->tp;
6872         int work_done = 0;
6873         struct tg3_hw_status *sblk = tnapi->hw_status;
6874
6875         while (1) {
6876                 if (sblk->status & SD_STATUS_ERROR)
6877                         tg3_process_error(tp);
6878
6879                 tg3_poll_link(tp);
6880
6881                 work_done = tg3_poll_work(tnapi, work_done, budget);
6882
6883                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6884                         goto tx_recovery;
6885
6886                 if (unlikely(work_done >= budget))
6887                         break;
6888
6889                 if (tg3_flag(tp, TAGGED_STATUS)) {
6890                         /* tp->last_tag is used in tg3_int_reenable() below
6891                          * to tell the hw how much work has been processed,
6892                          * so we must read it before checking for more work.
6893                          */
6894                         tnapi->last_tag = sblk->status_tag;
6895                         tnapi->last_irq_tag = tnapi->last_tag;
6896                         rmb();
6897                 } else
6898                         sblk->status &= ~SD_STATUS_UPDATED;
6899
6900                 if (likely(!tg3_has_work(tnapi))) {
6901                         napi_complete(napi);
6902                         tg3_int_reenable(tnapi);
6903                         break;
6904                 }
6905         }
6906
6907         return work_done;
6908
6909 tx_recovery:
6910         /* work_done is guaranteed to be less than budget. */
6911         napi_complete(napi);
6912         tg3_reset_task_schedule(tp);
6913         return work_done;
6914 }
6915
6916 static void tg3_napi_disable(struct tg3 *tp)
6917 {
6918         int i;
6919
6920         for (i = tp->irq_cnt - 1; i >= 0; i--)
6921                 napi_disable(&tp->napi[i].napi);
6922 }
6923
6924 static void tg3_napi_enable(struct tg3 *tp)
6925 {
6926         int i;
6927
6928         for (i = 0; i < tp->irq_cnt; i++)
6929                 napi_enable(&tp->napi[i].napi);
6930 }
6931
6932 static void tg3_napi_init(struct tg3 *tp)
6933 {
6934         int i;
6935
6936         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6937         for (i = 1; i < tp->irq_cnt; i++)
6938                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6939 }
6940
6941 static void tg3_napi_fini(struct tg3 *tp)
6942 {
6943         int i;
6944
6945         for (i = 0; i < tp->irq_cnt; i++)
6946                 netif_napi_del(&tp->napi[i].napi);
6947 }
6948
6949 static inline void tg3_netif_stop(struct tg3 *tp)
6950 {
6951         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6952         tg3_napi_disable(tp);
6953         netif_carrier_off(tp->dev);
6954         netif_tx_disable(tp->dev);
6955 }
6956
6957 /* tp->lock must be held */
6958 static inline void tg3_netif_start(struct tg3 *tp)
6959 {
6960         tg3_ptp_resume(tp);
6961
6962         /* NOTE: unconditional netif_tx_wake_all_queues is only
6963          * appropriate so long as all callers are assured to
6964          * have free tx slots (such as after tg3_init_hw)
6965          */
6966         netif_tx_wake_all_queues(tp->dev);
6967
6968         if (tp->link_up)
6969                 netif_carrier_on(tp->dev);
6970
6971         tg3_napi_enable(tp);
6972         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6973         tg3_enable_ints(tp);
6974 }
6975
6976 static void tg3_irq_quiesce(struct tg3 *tp)
6977 {
6978         int i;
6979
6980         BUG_ON(tp->irq_sync);
6981
6982         tp->irq_sync = 1;
6983         smp_mb();
6984
6985         for (i = 0; i < tp->irq_cnt; i++)
6986                 synchronize_irq(tp->napi[i].irq_vec);
6987 }
6988
6989 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6990  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6991  * with as well.  Most of the time, this is not necessary except when
6992  * shutting down the device.
6993  */
6994 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6995 {
6996         spin_lock_bh(&tp->lock);
6997         if (irq_sync)
6998                 tg3_irq_quiesce(tp);
6999 }
7000
7001 static inline void tg3_full_unlock(struct tg3 *tp)
7002 {
7003         spin_unlock_bh(&tp->lock);
7004 }
7005
7006 /* One-shot MSI handler - Chip automatically disables interrupt
7007  * after sending MSI so driver doesn't have to do it.
7008  */
7009 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7010 {
7011         struct tg3_napi *tnapi = dev_id;
7012         struct tg3 *tp = tnapi->tp;
7013
7014         prefetch(tnapi->hw_status);
7015         if (tnapi->rx_rcb)
7016                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7017
7018         if (likely(!tg3_irq_sync(tp)))
7019                 napi_schedule(&tnapi->napi);
7020
7021         return IRQ_HANDLED;
7022 }
7023
7024 /* MSI ISR - No need to check for interrupt sharing and no need to
7025  * flush status block and interrupt mailbox. PCI ordering rules
7026  * guarantee that MSI will arrive after the status block.
7027  */
7028 static irqreturn_t tg3_msi(int irq, void *dev_id)
7029 {
7030         struct tg3_napi *tnapi = dev_id;
7031         struct tg3 *tp = tnapi->tp;
7032
7033         prefetch(tnapi->hw_status);
7034         if (tnapi->rx_rcb)
7035                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7036         /*
7037          * Writing any value to intr-mbox-0 clears PCI INTA# and
7038          * chip-internal interrupt pending events.
7039          * Writing non-zero to intr-mbox-0 additional tells the
7040          * NIC to stop sending us irqs, engaging "in-intr-handler"
7041          * event coalescing.
7042          */
7043         tw32_mailbox(tnapi->int_mbox, 0x00000001);
7044         if (likely(!tg3_irq_sync(tp)))
7045                 napi_schedule(&tnapi->napi);
7046
7047         return IRQ_RETVAL(1);
7048 }
7049
7050 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7051 {
7052         struct tg3_napi *tnapi = dev_id;
7053         struct tg3 *tp = tnapi->tp;
7054         struct tg3_hw_status *sblk = tnapi->hw_status;
7055         unsigned int handled = 1;
7056
7057         /* In INTx mode, it is possible for the interrupt to arrive at
7058          * the CPU before the status block posted prior to the interrupt.
7059          * Reading the PCI State register will confirm whether the
7060          * interrupt is ours and will flush the status block.
7061          */
7062         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7063                 if (tg3_flag(tp, CHIP_RESETTING) ||
7064                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7065                         handled = 0;
7066                         goto out;
7067                 }
7068         }
7069
7070         /*
7071          * Writing any value to intr-mbox-0 clears PCI INTA# and
7072          * chip-internal interrupt pending events.
7073          * Writing non-zero to intr-mbox-0 additional tells the
7074          * NIC to stop sending us irqs, engaging "in-intr-handler"
7075          * event coalescing.
7076          *
7077          * Flush the mailbox to de-assert the IRQ immediately to prevent
7078          * spurious interrupts.  The flush impacts performance but
7079          * excessive spurious interrupts can be worse in some cases.
7080          */
7081         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7082         if (tg3_irq_sync(tp))
7083                 goto out;
7084         sblk->status &= ~SD_STATUS_UPDATED;
7085         if (likely(tg3_has_work(tnapi))) {
7086                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7087                 napi_schedule(&tnapi->napi);
7088         } else {
7089                 /* No work, shared interrupt perhaps?  re-enable
7090                  * interrupts, and flush that PCI write
7091                  */
7092                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7093                                0x00000000);
7094         }
7095 out:
7096         return IRQ_RETVAL(handled);
7097 }
7098
7099 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7100 {
7101         struct tg3_napi *tnapi = dev_id;
7102         struct tg3 *tp = tnapi->tp;
7103         struct tg3_hw_status *sblk = tnapi->hw_status;
7104         unsigned int handled = 1;
7105
7106         /* In INTx mode, it is possible for the interrupt to arrive at
7107          * the CPU before the status block posted prior to the interrupt.
7108          * Reading the PCI State register will confirm whether the
7109          * interrupt is ours and will flush the status block.
7110          */
7111         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7112                 if (tg3_flag(tp, CHIP_RESETTING) ||
7113                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7114                         handled = 0;
7115                         goto out;
7116                 }
7117         }
7118
7119         /*
7120          * writing any value to intr-mbox-0 clears PCI INTA# and
7121          * chip-internal interrupt pending events.
7122          * writing non-zero to intr-mbox-0 additional tells the
7123          * NIC to stop sending us irqs, engaging "in-intr-handler"
7124          * event coalescing.
7125          *
7126          * Flush the mailbox to de-assert the IRQ immediately to prevent
7127          * spurious interrupts.  The flush impacts performance but
7128          * excessive spurious interrupts can be worse in some cases.
7129          */
7130         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7131
7132         /*
7133          * In a shared interrupt configuration, sometimes other devices'
7134          * interrupts will scream.  We record the current status tag here
7135          * so that the above check can report that the screaming interrupts
7136          * are unhandled.  Eventually they will be silenced.
7137          */
7138         tnapi->last_irq_tag = sblk->status_tag;
7139
7140         if (tg3_irq_sync(tp))
7141                 goto out;
7142
7143         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7144
7145         napi_schedule(&tnapi->napi);
7146
7147 out:
7148         return IRQ_RETVAL(handled);
7149 }
7150
7151 /* ISR for interrupt test */
7152 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7153 {
7154         struct tg3_napi *tnapi = dev_id;
7155         struct tg3 *tp = tnapi->tp;
7156         struct tg3_hw_status *sblk = tnapi->hw_status;
7157
7158         if ((sblk->status & SD_STATUS_UPDATED) ||
7159             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7160                 tg3_disable_ints(tp);
7161                 return IRQ_RETVAL(1);
7162         }
7163         return IRQ_RETVAL(0);
7164 }
7165
7166 #ifdef CONFIG_NET_POLL_CONTROLLER
7167 static void tg3_poll_controller(struct net_device *dev)
7168 {
7169         int i;
7170         struct tg3 *tp = netdev_priv(dev);
7171
7172         if (tg3_irq_sync(tp))
7173                 return;
7174
7175         for (i = 0; i < tp->irq_cnt; i++)
7176                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7177 }
7178 #endif
7179
7180 static void tg3_tx_timeout(struct net_device *dev)
7181 {
7182         struct tg3 *tp = netdev_priv(dev);
7183
7184         if (netif_msg_tx_err(tp)) {
7185                 netdev_err(dev, "transmit timed out, resetting\n");
7186                 tg3_dump_state(tp);
7187         }
7188
7189         tg3_reset_task_schedule(tp);
7190 }
7191
7192 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7193 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7194 {
7195         u32 base = (u32) mapping & 0xffffffff;
7196
7197         return (base > 0xffffdcc0) && (base + len + 8 < base);
7198 }
7199
7200 /* Test for DMA addresses > 40-bit */
7201 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7202                                           int len)
7203 {
7204 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7205         if (tg3_flag(tp, 40BIT_DMA_BUG))
7206                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7207         return 0;
7208 #else
7209         return 0;
7210 #endif
7211 }
7212
7213 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7214                                  dma_addr_t mapping, u32 len, u32 flags,
7215                                  u32 mss, u32 vlan)
7216 {
7217         txbd->addr_hi = ((u64) mapping >> 32);
7218         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7219         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7220         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7221 }
7222
7223 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7224                             dma_addr_t map, u32 len, u32 flags,
7225                             u32 mss, u32 vlan)
7226 {
7227         struct tg3 *tp = tnapi->tp;
7228         bool hwbug = false;
7229
7230         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7231                 hwbug = true;
7232
7233         if (tg3_4g_overflow_test(map, len))
7234                 hwbug = true;
7235
7236         if (tg3_40bit_overflow_test(tp, map, len))
7237                 hwbug = true;
7238
7239         if (tp->dma_limit) {
7240                 u32 prvidx = *entry;
7241                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7242                 while (len > tp->dma_limit && *budget) {
7243                         u32 frag_len = tp->dma_limit;
7244                         len -= tp->dma_limit;
7245
7246                         /* Avoid the 8byte DMA problem */
7247                         if (len <= 8) {
7248                                 len += tp->dma_limit / 2;
7249                                 frag_len = tp->dma_limit / 2;
7250                         }
7251
7252                         tnapi->tx_buffers[*entry].fragmented = true;
7253
7254                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7255                                       frag_len, tmp_flag, mss, vlan);
7256                         *budget -= 1;
7257                         prvidx = *entry;
7258                         *entry = NEXT_TX(*entry);
7259
7260                         map += frag_len;
7261                 }
7262
7263                 if (len) {
7264                         if (*budget) {
7265                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7266                                               len, flags, mss, vlan);
7267                                 *budget -= 1;
7268                                 *entry = NEXT_TX(*entry);
7269                         } else {
7270                                 hwbug = true;
7271                                 tnapi->tx_buffers[prvidx].fragmented = false;
7272                         }
7273                 }
7274         } else {
7275                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7276                               len, flags, mss, vlan);
7277                 *entry = NEXT_TX(*entry);
7278         }
7279
7280         return hwbug;
7281 }
7282
7283 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7284 {
7285         int i;
7286         struct sk_buff *skb;
7287         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7288
7289         skb = txb->skb;
7290         txb->skb = NULL;
7291
7292         pci_unmap_single(tnapi->tp->pdev,
7293                          dma_unmap_addr(txb, mapping),
7294                          skb_headlen(skb),
7295                          PCI_DMA_TODEVICE);
7296
7297         while (txb->fragmented) {
7298                 txb->fragmented = false;
7299                 entry = NEXT_TX(entry);
7300                 txb = &tnapi->tx_buffers[entry];
7301         }
7302
7303         for (i = 0; i <= last; i++) {
7304                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7305
7306                 entry = NEXT_TX(entry);
7307                 txb = &tnapi->tx_buffers[entry];
7308
7309                 pci_unmap_page(tnapi->tp->pdev,
7310                                dma_unmap_addr(txb, mapping),
7311                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7312
7313                 while (txb->fragmented) {
7314                         txb->fragmented = false;
7315                         entry = NEXT_TX(entry);
7316                         txb = &tnapi->tx_buffers[entry];
7317                 }
7318         }
7319 }
7320
7321 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7322 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7323                                        struct sk_buff **pskb,
7324                                        u32 *entry, u32 *budget,
7325                                        u32 base_flags, u32 mss, u32 vlan)
7326 {
7327         struct tg3 *tp = tnapi->tp;
7328         struct sk_buff *new_skb, *skb = *pskb;
7329         dma_addr_t new_addr = 0;
7330         int ret = 0;
7331
7332         if (tg3_asic_rev(tp) != ASIC_REV_5701)
7333                 new_skb = skb_copy(skb, GFP_ATOMIC);
7334         else {
7335                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7336
7337                 new_skb = skb_copy_expand(skb,
7338                                           skb_headroom(skb) + more_headroom,
7339                                           skb_tailroom(skb), GFP_ATOMIC);
7340         }
7341
7342         if (!new_skb) {
7343                 ret = -1;
7344         } else {
7345                 /* New SKB is guaranteed to be linear. */
7346                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7347                                           PCI_DMA_TODEVICE);
7348                 /* Make sure the mapping succeeded */
7349                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7350                         dev_kfree_skb(new_skb);
7351                         ret = -1;
7352                 } else {
7353                         u32 save_entry = *entry;
7354
7355                         base_flags |= TXD_FLAG_END;
7356
7357                         tnapi->tx_buffers[*entry].skb = new_skb;
7358                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7359                                            mapping, new_addr);
7360
7361                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7362                                             new_skb->len, base_flags,
7363                                             mss, vlan)) {
7364                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7365                                 dev_kfree_skb(new_skb);
7366                                 ret = -1;
7367                         }
7368                 }
7369         }
7370
7371         dev_kfree_skb(skb);
7372         *pskb = new_skb;
7373         return ret;
7374 }
7375
7376 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7377
7378 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7379  * TSO header is greater than 80 bytes.
7380  */
7381 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7382 {
7383         struct sk_buff *segs, *nskb;
7384         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7385
7386         /* Estimate the number of fragments in the worst case */
7387         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7388                 netif_stop_queue(tp->dev);
7389
7390                 /* netif_tx_stop_queue() must be done before checking
7391                  * checking tx index in tg3_tx_avail() below, because in
7392                  * tg3_tx(), we update tx index before checking for
7393                  * netif_tx_queue_stopped().
7394                  */
7395                 smp_mb();
7396                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7397                         return NETDEV_TX_BUSY;
7398
7399                 netif_wake_queue(tp->dev);
7400         }
7401
7402         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7403         if (IS_ERR(segs))
7404                 goto tg3_tso_bug_end;
7405
7406         do {
7407                 nskb = segs;
7408                 segs = segs->next;
7409                 nskb->next = NULL;
7410                 tg3_start_xmit(nskb, tp->dev);
7411         } while (segs);
7412
7413 tg3_tso_bug_end:
7414         dev_kfree_skb(skb);
7415
7416         return NETDEV_TX_OK;
7417 }
7418
7419 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7420  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7421  */
7422 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7423 {
7424         struct tg3 *tp = netdev_priv(dev);
7425         u32 len, entry, base_flags, mss, vlan = 0;
7426         u32 budget;
7427         int i = -1, would_hit_hwbug;
7428         dma_addr_t mapping;
7429         struct tg3_napi *tnapi;
7430         struct netdev_queue *txq;
7431         unsigned int last;
7432
7433         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7434         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7435         if (tg3_flag(tp, ENABLE_TSS))
7436                 tnapi++;
7437
7438         budget = tg3_tx_avail(tnapi);
7439
7440         /* We are running in BH disabled context with netif_tx_lock
7441          * and TX reclaim runs via tp->napi.poll inside of a software
7442          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7443          * no IRQ context deadlocks to worry about either.  Rejoice!
7444          */
7445         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7446                 if (!netif_tx_queue_stopped(txq)) {
7447                         netif_tx_stop_queue(txq);
7448
7449                         /* This is a hard error, log it. */
7450                         netdev_err(dev,
7451                                    "BUG! Tx Ring full when queue awake!\n");
7452                 }
7453                 return NETDEV_TX_BUSY;
7454         }
7455
7456         entry = tnapi->tx_prod;
7457         base_flags = 0;
7458         if (skb->ip_summed == CHECKSUM_PARTIAL)
7459                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7460
7461         mss = skb_shinfo(skb)->gso_size;
7462         if (mss) {
7463                 struct iphdr *iph;
7464                 u32 tcp_opt_len, hdr_len;
7465
7466                 if (skb_header_cloned(skb) &&
7467                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7468                         goto drop;
7469
7470                 iph = ip_hdr(skb);
7471                 tcp_opt_len = tcp_optlen(skb);
7472
7473                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7474
7475                 if (!skb_is_gso_v6(skb)) {
7476                         iph->check = 0;
7477                         iph->tot_len = htons(mss + hdr_len);
7478                 }
7479
7480                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7481                     tg3_flag(tp, TSO_BUG))
7482                         return tg3_tso_bug(tp, skb);
7483
7484                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7485                                TXD_FLAG_CPU_POST_DMA);
7486
7487                 if (tg3_flag(tp, HW_TSO_1) ||
7488                     tg3_flag(tp, HW_TSO_2) ||
7489                     tg3_flag(tp, HW_TSO_3)) {
7490                         tcp_hdr(skb)->check = 0;
7491                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7492                 } else
7493                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7494                                                                  iph->daddr, 0,
7495                                                                  IPPROTO_TCP,
7496                                                                  0);
7497
7498                 if (tg3_flag(tp, HW_TSO_3)) {
7499                         mss |= (hdr_len & 0xc) << 12;
7500                         if (hdr_len & 0x10)
7501                                 base_flags |= 0x00000010;
7502                         base_flags |= (hdr_len & 0x3e0) << 5;
7503                 } else if (tg3_flag(tp, HW_TSO_2))
7504                         mss |= hdr_len << 9;
7505                 else if (tg3_flag(tp, HW_TSO_1) ||
7506                          tg3_asic_rev(tp) == ASIC_REV_5705) {
7507                         if (tcp_opt_len || iph->ihl > 5) {
7508                                 int tsflags;
7509
7510                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7511                                 mss |= (tsflags << 11);
7512                         }
7513                 } else {
7514                         if (tcp_opt_len || iph->ihl > 5) {
7515                                 int tsflags;
7516
7517                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7518                                 base_flags |= tsflags << 12;
7519                         }
7520                 }
7521         }
7522
7523         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7524             !mss && skb->len > VLAN_ETH_FRAME_LEN)
7525                 base_flags |= TXD_FLAG_JMB_PKT;
7526
7527         if (vlan_tx_tag_present(skb)) {
7528                 base_flags |= TXD_FLAG_VLAN;
7529                 vlan = vlan_tx_tag_get(skb);
7530         }
7531
7532         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7533             tg3_flag(tp, TX_TSTAMP_EN)) {
7534                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7535                 base_flags |= TXD_FLAG_HWTSTAMP;
7536         }
7537
7538         len = skb_headlen(skb);
7539
7540         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7541         if (pci_dma_mapping_error(tp->pdev, mapping))
7542                 goto drop;
7543
7544
7545         tnapi->tx_buffers[entry].skb = skb;
7546         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7547
7548         would_hit_hwbug = 0;
7549
7550         if (tg3_flag(tp, 5701_DMA_BUG))
7551                 would_hit_hwbug = 1;
7552
7553         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7554                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7555                             mss, vlan)) {
7556                 would_hit_hwbug = 1;
7557         } else if (skb_shinfo(skb)->nr_frags > 0) {
7558                 u32 tmp_mss = mss;
7559
7560                 if (!tg3_flag(tp, HW_TSO_1) &&
7561                     !tg3_flag(tp, HW_TSO_2) &&
7562                     !tg3_flag(tp, HW_TSO_3))
7563                         tmp_mss = 0;
7564
7565                 /* Now loop through additional data
7566                  * fragments, and queue them.
7567                  */
7568                 last = skb_shinfo(skb)->nr_frags - 1;
7569                 for (i = 0; i <= last; i++) {
7570                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7571
7572                         len = skb_frag_size(frag);
7573                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7574                                                    len, DMA_TO_DEVICE);
7575
7576                         tnapi->tx_buffers[entry].skb = NULL;
7577                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7578                                            mapping);
7579                         if (dma_mapping_error(&tp->pdev->dev, mapping))
7580                                 goto dma_error;
7581
7582                         if (!budget ||
7583                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7584                                             len, base_flags |
7585                                             ((i == last) ? TXD_FLAG_END : 0),
7586                                             tmp_mss, vlan)) {
7587                                 would_hit_hwbug = 1;
7588                                 break;
7589                         }
7590                 }
7591         }
7592
7593         if (would_hit_hwbug) {
7594                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7595
7596                 /* If the workaround fails due to memory/mapping
7597                  * failure, silently drop this packet.
7598                  */
7599                 entry = tnapi->tx_prod;
7600                 budget = tg3_tx_avail(tnapi);
7601                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7602                                                 base_flags, mss, vlan))
7603                         goto drop_nofree;
7604         }
7605
7606         skb_tx_timestamp(skb);
7607         netdev_tx_sent_queue(txq, skb->len);
7608
7609         /* Sync BD data before updating mailbox */
7610         wmb();
7611
7612         /* Packets are ready, update Tx producer idx local and on card. */
7613         tw32_tx_mbox(tnapi->prodmbox, entry);
7614
7615         tnapi->tx_prod = entry;
7616         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7617                 netif_tx_stop_queue(txq);
7618
7619                 /* netif_tx_stop_queue() must be done before checking
7620                  * checking tx index in tg3_tx_avail() below, because in
7621                  * tg3_tx(), we update tx index before checking for
7622                  * netif_tx_queue_stopped().
7623                  */
7624                 smp_mb();
7625                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7626                         netif_tx_wake_queue(txq);
7627         }
7628
7629         mmiowb();
7630         return NETDEV_TX_OK;
7631
7632 dma_error:
7633         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7634         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7635 drop:
7636         dev_kfree_skb(skb);
7637 drop_nofree:
7638         tp->tx_dropped++;
7639         return NETDEV_TX_OK;
7640 }
7641
7642 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7643 {
7644         if (enable) {
7645                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7646                                   MAC_MODE_PORT_MODE_MASK);
7647
7648                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7649
7650                 if (!tg3_flag(tp, 5705_PLUS))
7651                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7652
7653                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7654                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7655                 else
7656                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7657         } else {
7658                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7659
7660                 if (tg3_flag(tp, 5705_PLUS) ||
7661                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7662                     tg3_asic_rev(tp) == ASIC_REV_5700)
7663                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7664         }
7665
7666         tw32(MAC_MODE, tp->mac_mode);
7667         udelay(40);
7668 }
7669
7670 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7671 {
7672         u32 val, bmcr, mac_mode, ptest = 0;
7673
7674         tg3_phy_toggle_apd(tp, false);
7675         tg3_phy_toggle_automdix(tp, 0);
7676
7677         if (extlpbk && tg3_phy_set_extloopbk(tp))
7678                 return -EIO;
7679
7680         bmcr = BMCR_FULLDPLX;
7681         switch (speed) {
7682         case SPEED_10:
7683                 break;
7684         case SPEED_100:
7685                 bmcr |= BMCR_SPEED100;
7686                 break;
7687         case SPEED_1000:
7688         default:
7689                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7690                         speed = SPEED_100;
7691                         bmcr |= BMCR_SPEED100;
7692                 } else {
7693                         speed = SPEED_1000;
7694                         bmcr |= BMCR_SPEED1000;
7695                 }
7696         }
7697
7698         if (extlpbk) {
7699                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7700                         tg3_readphy(tp, MII_CTRL1000, &val);
7701                         val |= CTL1000_AS_MASTER |
7702                                CTL1000_ENABLE_MASTER;
7703                         tg3_writephy(tp, MII_CTRL1000, val);
7704                 } else {
7705                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7706                                 MII_TG3_FET_PTEST_TRIM_2;
7707                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7708                 }
7709         } else
7710                 bmcr |= BMCR_LOOPBACK;
7711
7712         tg3_writephy(tp, MII_BMCR, bmcr);
7713
7714         /* The write needs to be flushed for the FETs */
7715         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7716                 tg3_readphy(tp, MII_BMCR, &bmcr);
7717
7718         udelay(40);
7719
7720         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7721             tg3_asic_rev(tp) == ASIC_REV_5785) {
7722                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7723                              MII_TG3_FET_PTEST_FRC_TX_LINK |
7724                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
7725
7726                 /* The write needs to be flushed for the AC131 */
7727                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7728         }
7729
7730         /* Reset to prevent losing 1st rx packet intermittently */
7731         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7732             tg3_flag(tp, 5780_CLASS)) {
7733                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7734                 udelay(10);
7735                 tw32_f(MAC_RX_MODE, tp->rx_mode);
7736         }
7737
7738         mac_mode = tp->mac_mode &
7739                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7740         if (speed == SPEED_1000)
7741                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7742         else
7743                 mac_mode |= MAC_MODE_PORT_MODE_MII;
7744
7745         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
7746                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7747
7748                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7749                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
7750                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7751                         mac_mode |= MAC_MODE_LINK_POLARITY;
7752
7753                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7754                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7755         }
7756
7757         tw32(MAC_MODE, mac_mode);
7758         udelay(40);
7759
7760         return 0;
7761 }
7762
7763 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7764 {
7765         struct tg3 *tp = netdev_priv(dev);
7766
7767         if (features & NETIF_F_LOOPBACK) {
7768                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7769                         return;
7770
7771                 spin_lock_bh(&tp->lock);
7772                 tg3_mac_loopback(tp, true);
7773                 netif_carrier_on(tp->dev);
7774                 spin_unlock_bh(&tp->lock);
7775                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7776         } else {
7777                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7778                         return;
7779
7780                 spin_lock_bh(&tp->lock);
7781                 tg3_mac_loopback(tp, false);
7782                 /* Force link status check */
7783                 tg3_setup_phy(tp, 1);
7784                 spin_unlock_bh(&tp->lock);
7785                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7786         }
7787 }
7788
7789 static netdev_features_t tg3_fix_features(struct net_device *dev,
7790         netdev_features_t features)
7791 {
7792         struct tg3 *tp = netdev_priv(dev);
7793
7794         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7795                 features &= ~NETIF_F_ALL_TSO;
7796
7797         return features;
7798 }
7799
7800 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7801 {
7802         netdev_features_t changed = dev->features ^ features;
7803
7804         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7805                 tg3_set_loopback(dev, features);
7806
7807         return 0;
7808 }
7809
7810 static void tg3_rx_prodring_free(struct tg3 *tp,
7811                                  struct tg3_rx_prodring_set *tpr)
7812 {
7813         int i;
7814
7815         if (tpr != &tp->napi[0].prodring) {
7816                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7817                      i = (i + 1) & tp->rx_std_ring_mask)
7818                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7819                                         tp->rx_pkt_map_sz);
7820
7821                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7822                         for (i = tpr->rx_jmb_cons_idx;
7823                              i != tpr->rx_jmb_prod_idx;
7824                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7825                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7826                                                 TG3_RX_JMB_MAP_SZ);
7827                         }
7828                 }
7829
7830                 return;
7831         }
7832
7833         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7834                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7835                                 tp->rx_pkt_map_sz);
7836
7837         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7838                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7839                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7840                                         TG3_RX_JMB_MAP_SZ);
7841         }
7842 }
7843
7844 /* Initialize rx rings for packet processing.
7845  *
7846  * The chip has been shut down and the driver detached from
7847  * the networking, so no interrupts or new tx packets will
7848  * end up in the driver.  tp->{tx,}lock are held and thus
7849  * we may not sleep.
7850  */
7851 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7852                                  struct tg3_rx_prodring_set *tpr)
7853 {
7854         u32 i, rx_pkt_dma_sz;
7855
7856         tpr->rx_std_cons_idx = 0;
7857         tpr->rx_std_prod_idx = 0;
7858         tpr->rx_jmb_cons_idx = 0;
7859         tpr->rx_jmb_prod_idx = 0;
7860
7861         if (tpr != &tp->napi[0].prodring) {
7862                 memset(&tpr->rx_std_buffers[0], 0,
7863                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7864                 if (tpr->rx_jmb_buffers)
7865                         memset(&tpr->rx_jmb_buffers[0], 0,
7866                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7867                 goto done;
7868         }
7869
7870         /* Zero out all descriptors. */
7871         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7872
7873         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7874         if (tg3_flag(tp, 5780_CLASS) &&
7875             tp->dev->mtu > ETH_DATA_LEN)
7876                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7877         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7878
7879         /* Initialize invariants of the rings, we only set this
7880          * stuff once.  This works because the card does not
7881          * write into the rx buffer posting rings.
7882          */
7883         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7884                 struct tg3_rx_buffer_desc *rxd;
7885
7886                 rxd = &tpr->rx_std[i];
7887                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7888                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7889                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7890                                (i << RXD_OPAQUE_INDEX_SHIFT));
7891         }
7892
7893         /* Now allocate fresh SKBs for each rx ring. */
7894         for (i = 0; i < tp->rx_pending; i++) {
7895                 unsigned int frag_size;
7896
7897                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7898                                       &frag_size) < 0) {
7899                         netdev_warn(tp->dev,
7900                                     "Using a smaller RX standard ring. Only "
7901                                     "%d out of %d buffers were allocated "
7902                                     "successfully\n", i, tp->rx_pending);
7903                         if (i == 0)
7904                                 goto initfail;
7905                         tp->rx_pending = i;
7906                         break;
7907                 }
7908         }
7909
7910         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7911                 goto done;
7912
7913         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7914
7915         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7916                 goto done;
7917
7918         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7919                 struct tg3_rx_buffer_desc *rxd;
7920
7921                 rxd = &tpr->rx_jmb[i].std;
7922                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7923                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7924                                   RXD_FLAG_JUMBO;
7925                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7926                        (i << RXD_OPAQUE_INDEX_SHIFT));
7927         }
7928
7929         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7930                 unsigned int frag_size;
7931
7932                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7933                                       &frag_size) < 0) {
7934                         netdev_warn(tp->dev,
7935                                     "Using a smaller RX jumbo ring. Only %d "
7936                                     "out of %d buffers were allocated "
7937                                     "successfully\n", i, tp->rx_jumbo_pending);
7938                         if (i == 0)
7939                                 goto initfail;
7940                         tp->rx_jumbo_pending = i;
7941                         break;
7942                 }
7943         }
7944
7945 done:
7946         return 0;
7947
7948 initfail:
7949         tg3_rx_prodring_free(tp, tpr);
7950         return -ENOMEM;
7951 }
7952
7953 static void tg3_rx_prodring_fini(struct tg3 *tp,
7954                                  struct tg3_rx_prodring_set *tpr)
7955 {
7956         kfree(tpr->rx_std_buffers);
7957         tpr->rx_std_buffers = NULL;
7958         kfree(tpr->rx_jmb_buffers);
7959         tpr->rx_jmb_buffers = NULL;
7960         if (tpr->rx_std) {
7961                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7962                                   tpr->rx_std, tpr->rx_std_mapping);
7963                 tpr->rx_std = NULL;
7964         }
7965         if (tpr->rx_jmb) {
7966                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7967                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7968                 tpr->rx_jmb = NULL;
7969         }
7970 }
7971
7972 static int tg3_rx_prodring_init(struct tg3 *tp,
7973                                 struct tg3_rx_prodring_set *tpr)
7974 {
7975         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7976                                       GFP_KERNEL);
7977         if (!tpr->rx_std_buffers)
7978                 return -ENOMEM;
7979
7980         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7981                                          TG3_RX_STD_RING_BYTES(tp),
7982                                          &tpr->rx_std_mapping,
7983                                          GFP_KERNEL);
7984         if (!tpr->rx_std)
7985                 goto err_out;
7986
7987         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7988                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7989                                               GFP_KERNEL);
7990                 if (!tpr->rx_jmb_buffers)
7991                         goto err_out;
7992
7993                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7994                                                  TG3_RX_JMB_RING_BYTES(tp),
7995                                                  &tpr->rx_jmb_mapping,
7996                                                  GFP_KERNEL);
7997                 if (!tpr->rx_jmb)
7998                         goto err_out;
7999         }
8000
8001         return 0;
8002
8003 err_out:
8004         tg3_rx_prodring_fini(tp, tpr);
8005         return -ENOMEM;
8006 }
8007
8008 /* Free up pending packets in all rx/tx rings.
8009  *
8010  * The chip has been shut down and the driver detached from
8011  * the networking, so no interrupts or new tx packets will
8012  * end up in the driver.  tp->{tx,}lock is not held and we are not
8013  * in an interrupt context and thus may sleep.
8014  */
8015 static void tg3_free_rings(struct tg3 *tp)
8016 {
8017         int i, j;
8018
8019         for (j = 0; j < tp->irq_cnt; j++) {
8020                 struct tg3_napi *tnapi = &tp->napi[j];
8021
8022                 tg3_rx_prodring_free(tp, &tnapi->prodring);
8023
8024                 if (!tnapi->tx_buffers)
8025                         continue;
8026
8027                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8028                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8029
8030                         if (!skb)
8031                                 continue;
8032
8033                         tg3_tx_skb_unmap(tnapi, i,
8034                                          skb_shinfo(skb)->nr_frags - 1);
8035
8036                         dev_kfree_skb_any(skb);
8037                 }
8038                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8039         }
8040 }
8041
8042 /* Initialize tx/rx rings for packet processing.
8043  *
8044  * The chip has been shut down and the driver detached from
8045  * the networking, so no interrupts or new tx packets will
8046  * end up in the driver.  tp->{tx,}lock are held and thus
8047  * we may not sleep.
8048  */
8049 static int tg3_init_rings(struct tg3 *tp)
8050 {
8051         int i;
8052
8053         /* Free up all the SKBs. */
8054         tg3_free_rings(tp);
8055
8056         for (i = 0; i < tp->irq_cnt; i++) {
8057                 struct tg3_napi *tnapi = &tp->napi[i];
8058
8059                 tnapi->last_tag = 0;
8060                 tnapi->last_irq_tag = 0;
8061                 tnapi->hw_status->status = 0;
8062                 tnapi->hw_status->status_tag = 0;
8063                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8064
8065                 tnapi->tx_prod = 0;
8066                 tnapi->tx_cons = 0;
8067                 if (tnapi->tx_ring)
8068                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8069
8070                 tnapi->rx_rcb_ptr = 0;
8071                 if (tnapi->rx_rcb)
8072                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8073
8074                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8075                         tg3_free_rings(tp);
8076                         return -ENOMEM;
8077                 }
8078         }
8079
8080         return 0;
8081 }
8082
8083 static void tg3_mem_tx_release(struct tg3 *tp)
8084 {
8085         int i;
8086
8087         for (i = 0; i < tp->irq_max; i++) {
8088                 struct tg3_napi *tnapi = &tp->napi[i];
8089
8090                 if (tnapi->tx_ring) {
8091                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8092                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
8093                         tnapi->tx_ring = NULL;
8094                 }
8095
8096                 kfree(tnapi->tx_buffers);
8097                 tnapi->tx_buffers = NULL;
8098         }
8099 }
8100
8101 static int tg3_mem_tx_acquire(struct tg3 *tp)
8102 {
8103         int i;
8104         struct tg3_napi *tnapi = &tp->napi[0];
8105
8106         /* If multivector TSS is enabled, vector 0 does not handle
8107          * tx interrupts.  Don't allocate any resources for it.
8108          */
8109         if (tg3_flag(tp, ENABLE_TSS))
8110                 tnapi++;
8111
8112         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8113                 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8114                                             TG3_TX_RING_SIZE, GFP_KERNEL);
8115                 if (!tnapi->tx_buffers)
8116                         goto err_out;
8117
8118                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8119                                                     TG3_TX_RING_BYTES,
8120                                                     &tnapi->tx_desc_mapping,
8121                                                     GFP_KERNEL);
8122                 if (!tnapi->tx_ring)
8123                         goto err_out;
8124         }
8125
8126         return 0;
8127
8128 err_out:
8129         tg3_mem_tx_release(tp);
8130         return -ENOMEM;
8131 }
8132
8133 static void tg3_mem_rx_release(struct tg3 *tp)
8134 {
8135         int i;
8136
8137         for (i = 0; i < tp->irq_max; i++) {
8138                 struct tg3_napi *tnapi = &tp->napi[i];
8139
8140                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8141
8142                 if (!tnapi->rx_rcb)
8143                         continue;
8144
8145                 dma_free_coherent(&tp->pdev->dev,
8146                                   TG3_RX_RCB_RING_BYTES(tp),
8147                                   tnapi->rx_rcb,
8148                                   tnapi->rx_rcb_mapping);
8149                 tnapi->rx_rcb = NULL;
8150         }
8151 }
8152
8153 static int tg3_mem_rx_acquire(struct tg3 *tp)
8154 {
8155         unsigned int i, limit;
8156
8157         limit = tp->rxq_cnt;
8158
8159         /* If RSS is enabled, we need a (dummy) producer ring
8160          * set on vector zero.  This is the true hw prodring.
8161          */
8162         if (tg3_flag(tp, ENABLE_RSS))
8163                 limit++;
8164
8165         for (i = 0; i < limit; i++) {
8166                 struct tg3_napi *tnapi = &tp->napi[i];
8167
8168                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8169                         goto err_out;
8170
8171                 /* If multivector RSS is enabled, vector 0
8172                  * does not handle rx or tx interrupts.
8173                  * Don't allocate any resources for it.
8174                  */
8175                 if (!i && tg3_flag(tp, ENABLE_RSS))
8176                         continue;
8177
8178                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8179                                                    TG3_RX_RCB_RING_BYTES(tp),
8180                                                    &tnapi->rx_rcb_mapping,
8181                                                    GFP_KERNEL | __GFP_ZERO);
8182                 if (!tnapi->rx_rcb)
8183                         goto err_out;
8184         }
8185
8186         return 0;
8187
8188 err_out:
8189         tg3_mem_rx_release(tp);
8190         return -ENOMEM;
8191 }
8192
8193 /*
8194  * Must not be invoked with interrupt sources disabled and
8195  * the hardware shutdown down.
8196  */
8197 static void tg3_free_consistent(struct tg3 *tp)
8198 {
8199         int i;
8200
8201         for (i = 0; i < tp->irq_cnt; i++) {
8202                 struct tg3_napi *tnapi = &tp->napi[i];
8203
8204                 if (tnapi->hw_status) {
8205                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8206                                           tnapi->hw_status,
8207                                           tnapi->status_mapping);
8208                         tnapi->hw_status = NULL;
8209                 }
8210         }
8211
8212         tg3_mem_rx_release(tp);
8213         tg3_mem_tx_release(tp);
8214
8215         if (tp->hw_stats) {
8216                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8217                                   tp->hw_stats, tp->stats_mapping);
8218                 tp->hw_stats = NULL;
8219         }
8220 }
8221
8222 /*
8223  * Must not be invoked with interrupt sources disabled and
8224  * the hardware shutdown down.  Can sleep.
8225  */
8226 static int tg3_alloc_consistent(struct tg3 *tp)
8227 {
8228         int i;
8229
8230         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8231                                           sizeof(struct tg3_hw_stats),
8232                                           &tp->stats_mapping,
8233                                           GFP_KERNEL | __GFP_ZERO);
8234         if (!tp->hw_stats)
8235                 goto err_out;
8236
8237         for (i = 0; i < tp->irq_cnt; i++) {
8238                 struct tg3_napi *tnapi = &tp->napi[i];
8239                 struct tg3_hw_status *sblk;
8240
8241                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8242                                                       TG3_HW_STATUS_SIZE,
8243                                                       &tnapi->status_mapping,
8244                                                       GFP_KERNEL | __GFP_ZERO);
8245                 if (!tnapi->hw_status)
8246                         goto err_out;
8247
8248                 sblk = tnapi->hw_status;
8249
8250                 if (tg3_flag(tp, ENABLE_RSS)) {
8251                         u16 *prodptr = NULL;
8252
8253                         /*
8254                          * When RSS is enabled, the status block format changes
8255                          * slightly.  The "rx_jumbo_consumer", "reserved",
8256                          * and "rx_mini_consumer" members get mapped to the
8257                          * other three rx return ring producer indexes.
8258                          */
8259                         switch (i) {
8260                         case 1:
8261                                 prodptr = &sblk->idx[0].rx_producer;
8262                                 break;
8263                         case 2:
8264                                 prodptr = &sblk->rx_jumbo_consumer;
8265                                 break;
8266                         case 3:
8267                                 prodptr = &sblk->reserved;
8268                                 break;
8269                         case 4:
8270                                 prodptr = &sblk->rx_mini_consumer;
8271                                 break;
8272                         }
8273                         tnapi->rx_rcb_prod_idx = prodptr;
8274                 } else {
8275                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8276                 }
8277         }
8278
8279         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8280                 goto err_out;
8281
8282         return 0;
8283
8284 err_out:
8285         tg3_free_consistent(tp);
8286         return -ENOMEM;
8287 }
8288
8289 #define MAX_WAIT_CNT 1000
8290
8291 /* To stop a block, clear the enable bit and poll till it
8292  * clears.  tp->lock is held.
8293  */
8294 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
8295 {
8296         unsigned int i;
8297         u32 val;
8298
8299         if (tg3_flag(tp, 5705_PLUS)) {
8300                 switch (ofs) {
8301                 case RCVLSC_MODE:
8302                 case DMAC_MODE:
8303                 case MBFREE_MODE:
8304                 case BUFMGR_MODE:
8305                 case MEMARB_MODE:
8306                         /* We can't enable/disable these bits of the
8307                          * 5705/5750, just say success.
8308                          */
8309                         return 0;
8310
8311                 default:
8312                         break;
8313                 }
8314         }
8315
8316         val = tr32(ofs);
8317         val &= ~enable_bit;
8318         tw32_f(ofs, val);
8319
8320         for (i = 0; i < MAX_WAIT_CNT; i++) {
8321                 udelay(100);
8322                 val = tr32(ofs);
8323                 if ((val & enable_bit) == 0)
8324                         break;
8325         }
8326
8327         if (i == MAX_WAIT_CNT && !silent) {
8328                 dev_err(&tp->pdev->dev,
8329                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8330                         ofs, enable_bit);
8331                 return -ENODEV;
8332         }
8333
8334         return 0;
8335 }
8336
8337 /* tp->lock is held. */
8338 static int tg3_abort_hw(struct tg3 *tp, int silent)
8339 {
8340         int i, err;
8341
8342         tg3_disable_ints(tp);
8343
8344         tp->rx_mode &= ~RX_MODE_ENABLE;
8345         tw32_f(MAC_RX_MODE, tp->rx_mode);
8346         udelay(10);
8347
8348         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8349         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8350         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8351         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8352         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8353         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8354
8355         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8356         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8357         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8358         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8359         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8360         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8361         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8362
8363         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8364         tw32_f(MAC_MODE, tp->mac_mode);
8365         udelay(40);
8366
8367         tp->tx_mode &= ~TX_MODE_ENABLE;
8368         tw32_f(MAC_TX_MODE, tp->tx_mode);
8369
8370         for (i = 0; i < MAX_WAIT_CNT; i++) {
8371                 udelay(100);
8372                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8373                         break;
8374         }
8375         if (i >= MAX_WAIT_CNT) {
8376                 dev_err(&tp->pdev->dev,
8377                         "%s timed out, TX_MODE_ENABLE will not clear "
8378                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8379                 err |= -ENODEV;
8380         }
8381
8382         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8383         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8384         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8385
8386         tw32(FTQ_RESET, 0xffffffff);
8387         tw32(FTQ_RESET, 0x00000000);
8388
8389         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8390         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8391
8392         for (i = 0; i < tp->irq_cnt; i++) {
8393                 struct tg3_napi *tnapi = &tp->napi[i];
8394                 if (tnapi->hw_status)
8395                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8396         }
8397
8398         return err;
8399 }
8400
8401 /* Save PCI command register before chip reset */
8402 static void tg3_save_pci_state(struct tg3 *tp)
8403 {
8404         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8405 }
8406
8407 /* Restore PCI state after chip reset */
8408 static void tg3_restore_pci_state(struct tg3 *tp)
8409 {
8410         u32 val;
8411
8412         /* Re-enable indirect register accesses. */
8413         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8414                                tp->misc_host_ctrl);
8415
8416         /* Set MAX PCI retry to zero. */
8417         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8418         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8419             tg3_flag(tp, PCIX_MODE))
8420                 val |= PCISTATE_RETRY_SAME_DMA;
8421         /* Allow reads and writes to the APE register and memory space. */
8422         if (tg3_flag(tp, ENABLE_APE))
8423                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8424                        PCISTATE_ALLOW_APE_SHMEM_WR |
8425                        PCISTATE_ALLOW_APE_PSPACE_WR;
8426         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8427
8428         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8429
8430         if (!tg3_flag(tp, PCI_EXPRESS)) {
8431                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8432                                       tp->pci_cacheline_sz);
8433                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8434                                       tp->pci_lat_timer);
8435         }
8436
8437         /* Make sure PCI-X relaxed ordering bit is clear. */
8438         if (tg3_flag(tp, PCIX_MODE)) {
8439                 u16 pcix_cmd;
8440
8441                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8442                                      &pcix_cmd);
8443                 pcix_cmd &= ~PCI_X_CMD_ERO;
8444                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8445                                       pcix_cmd);
8446         }
8447
8448         if (tg3_flag(tp, 5780_CLASS)) {
8449
8450                 /* Chip reset on 5780 will reset MSI enable bit,
8451                  * so need to restore it.
8452                  */
8453                 if (tg3_flag(tp, USING_MSI)) {
8454                         u16 ctrl;
8455
8456                         pci_read_config_word(tp->pdev,
8457                                              tp->msi_cap + PCI_MSI_FLAGS,
8458                                              &ctrl);
8459                         pci_write_config_word(tp->pdev,
8460                                               tp->msi_cap + PCI_MSI_FLAGS,
8461                                               ctrl | PCI_MSI_FLAGS_ENABLE);
8462                         val = tr32(MSGINT_MODE);
8463                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8464                 }
8465         }
8466 }
8467
8468 /* tp->lock is held. */
8469 static int tg3_chip_reset(struct tg3 *tp)
8470 {
8471         u32 val;
8472         void (*write_op)(struct tg3 *, u32, u32);
8473         int i, err;
8474
8475         tg3_nvram_lock(tp);
8476
8477         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8478
8479         /* No matching tg3_nvram_unlock() after this because
8480          * chip reset below will undo the nvram lock.
8481          */
8482         tp->nvram_lock_cnt = 0;
8483
8484         /* GRC_MISC_CFG core clock reset will clear the memory
8485          * enable bit in PCI register 4 and the MSI enable bit
8486          * on some chips, so we save relevant registers here.
8487          */
8488         tg3_save_pci_state(tp);
8489
8490         if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8491             tg3_flag(tp, 5755_PLUS))
8492                 tw32(GRC_FASTBOOT_PC, 0);
8493
8494         /*
8495          * We must avoid the readl() that normally takes place.
8496          * It locks machines, causes machine checks, and other
8497          * fun things.  So, temporarily disable the 5701
8498          * hardware workaround, while we do the reset.
8499          */
8500         write_op = tp->write32;
8501         if (write_op == tg3_write_flush_reg32)
8502                 tp->write32 = tg3_write32;
8503
8504         /* Prevent the irq handler from reading or writing PCI registers
8505          * during chip reset when the memory enable bit in the PCI command
8506          * register may be cleared.  The chip does not generate interrupt
8507          * at this time, but the irq handler may still be called due to irq
8508          * sharing or irqpoll.
8509          */
8510         tg3_flag_set(tp, CHIP_RESETTING);
8511         for (i = 0; i < tp->irq_cnt; i++) {
8512                 struct tg3_napi *tnapi = &tp->napi[i];
8513                 if (tnapi->hw_status) {
8514                         tnapi->hw_status->status = 0;
8515                         tnapi->hw_status->status_tag = 0;
8516                 }
8517                 tnapi->last_tag = 0;
8518                 tnapi->last_irq_tag = 0;
8519         }
8520         smp_mb();
8521
8522         for (i = 0; i < tp->irq_cnt; i++)
8523                 synchronize_irq(tp->napi[i].irq_vec);
8524
8525         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8526                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8527                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8528         }
8529
8530         /* do the reset */
8531         val = GRC_MISC_CFG_CORECLK_RESET;
8532
8533         if (tg3_flag(tp, PCI_EXPRESS)) {
8534                 /* Force PCIe 1.0a mode */
8535                 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8536                     !tg3_flag(tp, 57765_PLUS) &&
8537                     tr32(TG3_PCIE_PHY_TSTCTL) ==
8538                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8539                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8540
8541                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8542                         tw32(GRC_MISC_CFG, (1 << 29));
8543                         val |= (1 << 29);
8544                 }
8545         }
8546
8547         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8548                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8549                 tw32(GRC_VCPU_EXT_CTRL,
8550                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8551         }
8552
8553         /* Manage gphy power for all CPMU absent PCIe devices. */
8554         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8555                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8556
8557         tw32(GRC_MISC_CFG, val);
8558
8559         /* restore 5701 hardware bug workaround write method */
8560         tp->write32 = write_op;
8561
8562         /* Unfortunately, we have to delay before the PCI read back.
8563          * Some 575X chips even will not respond to a PCI cfg access
8564          * when the reset command is given to the chip.
8565          *
8566          * How do these hardware designers expect things to work
8567          * properly if the PCI write is posted for a long period
8568          * of time?  It is always necessary to have some method by
8569          * which a register read back can occur to push the write
8570          * out which does the reset.
8571          *
8572          * For most tg3 variants the trick below was working.
8573          * Ho hum...
8574          */
8575         udelay(120);
8576
8577         /* Flush PCI posted writes.  The normal MMIO registers
8578          * are inaccessible at this time so this is the only
8579          * way to make this reliably (actually, this is no longer
8580          * the case, see above).  I tried to use indirect
8581          * register read/write but this upset some 5701 variants.
8582          */
8583         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8584
8585         udelay(120);
8586
8587         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8588                 u16 val16;
8589
8590                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
8591                         int j;
8592                         u32 cfg_val;
8593
8594                         /* Wait for link training to complete.  */
8595                         for (j = 0; j < 5000; j++)
8596                                 udelay(100);
8597
8598                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8599                         pci_write_config_dword(tp->pdev, 0xc4,
8600                                                cfg_val | (1 << 15));
8601                 }
8602
8603                 /* Clear the "no snoop" and "relaxed ordering" bits. */
8604                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8605                 /*
8606                  * Older PCIe devices only support the 128 byte
8607                  * MPS setting.  Enforce the restriction.
8608                  */
8609                 if (!tg3_flag(tp, CPMU_PRESENT))
8610                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8611                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8612
8613                 /* Clear error status */
8614                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8615                                       PCI_EXP_DEVSTA_CED |
8616                                       PCI_EXP_DEVSTA_NFED |
8617                                       PCI_EXP_DEVSTA_FED |
8618                                       PCI_EXP_DEVSTA_URD);
8619         }
8620
8621         tg3_restore_pci_state(tp);
8622
8623         tg3_flag_clear(tp, CHIP_RESETTING);
8624         tg3_flag_clear(tp, ERROR_PROCESSED);
8625
8626         val = 0;
8627         if (tg3_flag(tp, 5780_CLASS))
8628                 val = tr32(MEMARB_MODE);
8629         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8630
8631         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
8632                 tg3_stop_fw(tp);
8633                 tw32(0x5000, 0x400);
8634         }
8635
8636         if (tg3_flag(tp, IS_SSB_CORE)) {
8637                 /*
8638                  * BCM4785: In order to avoid repercussions from using
8639                  * potentially defective internal ROM, stop the Rx RISC CPU,
8640                  * which is not required.
8641                  */
8642                 tg3_stop_fw(tp);
8643                 tg3_halt_cpu(tp, RX_CPU_BASE);
8644         }
8645
8646         tw32(GRC_MODE, tp->grc_mode);
8647
8648         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
8649                 val = tr32(0xc4);
8650
8651                 tw32(0xc4, val | (1 << 15));
8652         }
8653
8654         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8655             tg3_asic_rev(tp) == ASIC_REV_5705) {
8656                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8657                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
8658                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8659                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8660         }
8661
8662         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8663                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8664                 val = tp->mac_mode;
8665         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8666                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8667                 val = tp->mac_mode;
8668         } else
8669                 val = 0;
8670
8671         tw32_f(MAC_MODE, val);
8672         udelay(40);
8673
8674         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8675
8676         err = tg3_poll_fw(tp);
8677         if (err)
8678                 return err;
8679
8680         tg3_mdio_start(tp);
8681
8682         if (tg3_flag(tp, PCI_EXPRESS) &&
8683             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
8684             tg3_asic_rev(tp) != ASIC_REV_5785 &&
8685             !tg3_flag(tp, 57765_PLUS)) {
8686                 val = tr32(0x7c00);
8687
8688                 tw32(0x7c00, val | (1 << 25));
8689         }
8690
8691         if (tg3_asic_rev(tp) == ASIC_REV_5720) {
8692                 val = tr32(TG3_CPMU_CLCK_ORIDE);
8693                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8694         }
8695
8696         /* Reprobe ASF enable state.  */
8697         tg3_flag_clear(tp, ENABLE_ASF);
8698         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8699         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8700         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8701                 u32 nic_cfg;
8702
8703                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8704                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8705                         tg3_flag_set(tp, ENABLE_ASF);
8706                         tp->last_event_jiffies = jiffies;
8707                         if (tg3_flag(tp, 5750_PLUS))
8708                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8709                 }
8710         }
8711
8712         return 0;
8713 }
8714
8715 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8716 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8717
8718 /* tp->lock is held. */
8719 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8720 {
8721         int err;
8722
8723         tg3_stop_fw(tp);
8724
8725         tg3_write_sig_pre_reset(tp, kind);
8726
8727         tg3_abort_hw(tp, silent);
8728         err = tg3_chip_reset(tp);
8729
8730         __tg3_set_mac_addr(tp, 0);
8731
8732         tg3_write_sig_legacy(tp, kind);
8733         tg3_write_sig_post_reset(tp, kind);
8734
8735         if (tp->hw_stats) {
8736                 /* Save the stats across chip resets... */
8737                 tg3_get_nstats(tp, &tp->net_stats_prev);
8738                 tg3_get_estats(tp, &tp->estats_prev);
8739
8740                 /* And make sure the next sample is new data */
8741                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8742         }
8743
8744         if (err)
8745                 return err;
8746
8747         return 0;
8748 }
8749
8750 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8751 {
8752         struct tg3 *tp = netdev_priv(dev);
8753         struct sockaddr *addr = p;
8754         int err = 0, skip_mac_1 = 0;
8755
8756         if (!is_valid_ether_addr(addr->sa_data))
8757                 return -EADDRNOTAVAIL;
8758
8759         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8760
8761         if (!netif_running(dev))
8762                 return 0;
8763
8764         if (tg3_flag(tp, ENABLE_ASF)) {
8765                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8766
8767                 addr0_high = tr32(MAC_ADDR_0_HIGH);
8768                 addr0_low = tr32(MAC_ADDR_0_LOW);
8769                 addr1_high = tr32(MAC_ADDR_1_HIGH);
8770                 addr1_low = tr32(MAC_ADDR_1_LOW);
8771
8772                 /* Skip MAC addr 1 if ASF is using it. */
8773                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8774                     !(addr1_high == 0 && addr1_low == 0))
8775                         skip_mac_1 = 1;
8776         }
8777         spin_lock_bh(&tp->lock);
8778         __tg3_set_mac_addr(tp, skip_mac_1);
8779         spin_unlock_bh(&tp->lock);
8780
8781         return err;
8782 }
8783
8784 /* tp->lock is held. */
8785 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8786                            dma_addr_t mapping, u32 maxlen_flags,
8787                            u32 nic_addr)
8788 {
8789         tg3_write_mem(tp,
8790                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8791                       ((u64) mapping >> 32));
8792         tg3_write_mem(tp,
8793                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8794                       ((u64) mapping & 0xffffffff));
8795         tg3_write_mem(tp,
8796                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8797                        maxlen_flags);
8798
8799         if (!tg3_flag(tp, 5705_PLUS))
8800                 tg3_write_mem(tp,
8801                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8802                               nic_addr);
8803 }
8804
8805
8806 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8807 {
8808         int i = 0;
8809
8810         if (!tg3_flag(tp, ENABLE_TSS)) {
8811                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8812                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8813                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8814         } else {
8815                 tw32(HOSTCC_TXCOL_TICKS, 0);
8816                 tw32(HOSTCC_TXMAX_FRAMES, 0);
8817                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8818
8819                 for (; i < tp->txq_cnt; i++) {
8820                         u32 reg;
8821
8822                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8823                         tw32(reg, ec->tx_coalesce_usecs);
8824                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8825                         tw32(reg, ec->tx_max_coalesced_frames);
8826                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8827                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8828                 }
8829         }
8830
8831         for (; i < tp->irq_max - 1; i++) {
8832                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8833                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8834                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8835         }
8836 }
8837
8838 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8839 {
8840         int i = 0;
8841         u32 limit = tp->rxq_cnt;
8842
8843         if (!tg3_flag(tp, ENABLE_RSS)) {
8844                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8845                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8846                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8847                 limit--;
8848         } else {
8849                 tw32(HOSTCC_RXCOL_TICKS, 0);
8850                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8851                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8852         }
8853
8854         for (; i < limit; i++) {
8855                 u32 reg;
8856
8857                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8858                 tw32(reg, ec->rx_coalesce_usecs);
8859                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8860                 tw32(reg, ec->rx_max_coalesced_frames);
8861                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8862                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8863         }
8864
8865         for (; i < tp->irq_max - 1; i++) {
8866                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8867                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8868                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8869         }
8870 }
8871
8872 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8873 {
8874         tg3_coal_tx_init(tp, ec);
8875         tg3_coal_rx_init(tp, ec);
8876
8877         if (!tg3_flag(tp, 5705_PLUS)) {
8878                 u32 val = ec->stats_block_coalesce_usecs;
8879
8880                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8881                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8882
8883                 if (!tp->link_up)
8884                         val = 0;
8885
8886                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8887         }
8888 }
8889
8890 /* tp->lock is held. */
8891 static void tg3_rings_reset(struct tg3 *tp)
8892 {
8893         int i;
8894         u32 stblk, txrcb, rxrcb, limit;
8895         struct tg3_napi *tnapi = &tp->napi[0];
8896
8897         /* Disable all transmit rings but the first. */
8898         if (!tg3_flag(tp, 5705_PLUS))
8899                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8900         else if (tg3_flag(tp, 5717_PLUS))
8901                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8902         else if (tg3_flag(tp, 57765_CLASS) ||
8903                  tg3_asic_rev(tp) == ASIC_REV_5762)
8904                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8905         else
8906                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8907
8908         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8909              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8910                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8911                               BDINFO_FLAGS_DISABLED);
8912
8913
8914         /* Disable all receive return rings but the first. */
8915         if (tg3_flag(tp, 5717_PLUS))
8916                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8917         else if (!tg3_flag(tp, 5705_PLUS))
8918                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8919         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
8920                  tg3_asic_rev(tp) == ASIC_REV_5762 ||
8921                  tg3_flag(tp, 57765_CLASS))
8922                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8923         else
8924                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8925
8926         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8927              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8928                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8929                               BDINFO_FLAGS_DISABLED);
8930
8931         /* Disable interrupts */
8932         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8933         tp->napi[0].chk_msi_cnt = 0;
8934         tp->napi[0].last_rx_cons = 0;
8935         tp->napi[0].last_tx_cons = 0;
8936
8937         /* Zero mailbox registers. */
8938         if (tg3_flag(tp, SUPPORT_MSIX)) {
8939                 for (i = 1; i < tp->irq_max; i++) {
8940                         tp->napi[i].tx_prod = 0;
8941                         tp->napi[i].tx_cons = 0;
8942                         if (tg3_flag(tp, ENABLE_TSS))
8943                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8944                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8945                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8946                         tp->napi[i].chk_msi_cnt = 0;
8947                         tp->napi[i].last_rx_cons = 0;
8948                         tp->napi[i].last_tx_cons = 0;
8949                 }
8950                 if (!tg3_flag(tp, ENABLE_TSS))
8951                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8952         } else {
8953                 tp->napi[0].tx_prod = 0;
8954                 tp->napi[0].tx_cons = 0;
8955                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8956                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8957         }
8958
8959         /* Make sure the NIC-based send BD rings are disabled. */
8960         if (!tg3_flag(tp, 5705_PLUS)) {
8961                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8962                 for (i = 0; i < 16; i++)
8963                         tw32_tx_mbox(mbox + i * 8, 0);
8964         }
8965
8966         txrcb = NIC_SRAM_SEND_RCB;
8967         rxrcb = NIC_SRAM_RCV_RET_RCB;
8968
8969         /* Clear status block in ram. */
8970         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8971
8972         /* Set status block DMA address */
8973         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8974              ((u64) tnapi->status_mapping >> 32));
8975         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8976              ((u64) tnapi->status_mapping & 0xffffffff));
8977
8978         if (tnapi->tx_ring) {
8979                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8980                                (TG3_TX_RING_SIZE <<
8981                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8982                                NIC_SRAM_TX_BUFFER_DESC);
8983                 txrcb += TG3_BDINFO_SIZE;
8984         }
8985
8986         if (tnapi->rx_rcb) {
8987                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8988                                (tp->rx_ret_ring_mask + 1) <<
8989                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8990                 rxrcb += TG3_BDINFO_SIZE;
8991         }
8992
8993         stblk = HOSTCC_STATBLCK_RING1;
8994
8995         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8996                 u64 mapping = (u64)tnapi->status_mapping;
8997                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8998                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8999
9000                 /* Clear status block in ram. */
9001                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9002
9003                 if (tnapi->tx_ring) {
9004                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9005                                        (TG3_TX_RING_SIZE <<
9006                                         BDINFO_FLAGS_MAXLEN_SHIFT),
9007                                        NIC_SRAM_TX_BUFFER_DESC);
9008                         txrcb += TG3_BDINFO_SIZE;
9009                 }
9010
9011                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9012                                ((tp->rx_ret_ring_mask + 1) <<
9013                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
9014
9015                 stblk += 8;
9016                 rxrcb += TG3_BDINFO_SIZE;
9017         }
9018 }
9019
9020 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9021 {
9022         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9023
9024         if (!tg3_flag(tp, 5750_PLUS) ||
9025             tg3_flag(tp, 5780_CLASS) ||
9026             tg3_asic_rev(tp) == ASIC_REV_5750 ||
9027             tg3_asic_rev(tp) == ASIC_REV_5752 ||
9028             tg3_flag(tp, 57765_PLUS))
9029                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9030         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9031                  tg3_asic_rev(tp) == ASIC_REV_5787)
9032                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9033         else
9034                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9035
9036         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9037         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9038
9039         val = min(nic_rep_thresh, host_rep_thresh);
9040         tw32(RCVBDI_STD_THRESH, val);
9041
9042         if (tg3_flag(tp, 57765_PLUS))
9043                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9044
9045         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9046                 return;
9047
9048         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9049
9050         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9051
9052         val = min(bdcache_maxcnt / 2, host_rep_thresh);
9053         tw32(RCVBDI_JUMBO_THRESH, val);
9054
9055         if (tg3_flag(tp, 57765_PLUS))
9056                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9057 }
9058
9059 static inline u32 calc_crc(unsigned char *buf, int len)
9060 {
9061         u32 reg;
9062         u32 tmp;
9063         int j, k;
9064
9065         reg = 0xffffffff;
9066
9067         for (j = 0; j < len; j++) {
9068                 reg ^= buf[j];
9069
9070                 for (k = 0; k < 8; k++) {
9071                         tmp = reg & 0x01;
9072
9073                         reg >>= 1;
9074
9075                         if (tmp)
9076                                 reg ^= 0xedb88320;
9077                 }
9078         }
9079
9080         return ~reg;
9081 }
9082
9083 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9084 {
9085         /* accept or reject all multicast frames */
9086         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9087         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9088         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9089         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9090 }
9091
9092 static void __tg3_set_rx_mode(struct net_device *dev)
9093 {
9094         struct tg3 *tp = netdev_priv(dev);
9095         u32 rx_mode;
9096
9097         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9098                                   RX_MODE_KEEP_VLAN_TAG);
9099
9100 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9101         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9102          * flag clear.
9103          */
9104         if (!tg3_flag(tp, ENABLE_ASF))
9105                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9106 #endif
9107
9108         if (dev->flags & IFF_PROMISC) {
9109                 /* Promiscuous mode. */
9110                 rx_mode |= RX_MODE_PROMISC;
9111         } else if (dev->flags & IFF_ALLMULTI) {
9112                 /* Accept all multicast. */
9113                 tg3_set_multi(tp, 1);
9114         } else if (netdev_mc_empty(dev)) {
9115                 /* Reject all multicast. */
9116                 tg3_set_multi(tp, 0);
9117         } else {
9118                 /* Accept one or more multicast(s). */
9119                 struct netdev_hw_addr *ha;
9120                 u32 mc_filter[4] = { 0, };
9121                 u32 regidx;
9122                 u32 bit;
9123                 u32 crc;
9124
9125                 netdev_for_each_mc_addr(ha, dev) {
9126                         crc = calc_crc(ha->addr, ETH_ALEN);
9127                         bit = ~crc & 0x7f;
9128                         regidx = (bit & 0x60) >> 5;
9129                         bit &= 0x1f;
9130                         mc_filter[regidx] |= (1 << bit);
9131                 }
9132
9133                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9134                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9135                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9136                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9137         }
9138
9139         if (rx_mode != tp->rx_mode) {
9140                 tp->rx_mode = rx_mode;
9141                 tw32_f(MAC_RX_MODE, rx_mode);
9142                 udelay(10);
9143         }
9144 }
9145
9146 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9147 {
9148         int i;
9149
9150         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9151                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9152 }
9153
9154 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9155 {
9156         int i;
9157
9158         if (!tg3_flag(tp, SUPPORT_MSIX))
9159                 return;
9160
9161         if (tp->rxq_cnt == 1) {
9162                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9163                 return;
9164         }
9165
9166         /* Validate table against current IRQ count */
9167         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9168                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9169                         break;
9170         }
9171
9172         if (i != TG3_RSS_INDIR_TBL_SIZE)
9173                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9174 }
9175
9176 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9177 {
9178         int i = 0;
9179         u32 reg = MAC_RSS_INDIR_TBL_0;
9180
9181         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9182                 u32 val = tp->rss_ind_tbl[i];
9183                 i++;
9184                 for (; i % 8; i++) {
9185                         val <<= 4;
9186                         val |= tp->rss_ind_tbl[i];
9187                 }
9188                 tw32(reg, val);
9189                 reg += 4;
9190         }
9191 }
9192
9193 /* tp->lock is held. */
9194 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
9195 {
9196         u32 val, rdmac_mode;
9197         int i, err, limit;
9198         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9199
9200         tg3_disable_ints(tp);
9201
9202         tg3_stop_fw(tp);
9203
9204         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9205
9206         if (tg3_flag(tp, INIT_COMPLETE))
9207                 tg3_abort_hw(tp, 1);
9208
9209         /* Enable MAC control of LPI */
9210         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
9211                 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
9212                       TG3_CPMU_EEE_LNKIDL_UART_IDL;
9213                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9214                         val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
9215
9216                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
9217
9218                 tw32_f(TG3_CPMU_EEE_CTRL,
9219                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9220
9221                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9222                       TG3_CPMU_EEEMD_LPI_IN_TX |
9223                       TG3_CPMU_EEEMD_LPI_IN_RX |
9224                       TG3_CPMU_EEEMD_EEE_ENABLE;
9225
9226                 if (tg3_asic_rev(tp) != ASIC_REV_5717)
9227                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9228
9229                 if (tg3_flag(tp, ENABLE_APE))
9230                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9231
9232                 tw32_f(TG3_CPMU_EEE_MODE, val);
9233
9234                 tw32_f(TG3_CPMU_EEE_DBTMR1,
9235                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9236                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9237
9238                 tw32_f(TG3_CPMU_EEE_DBTMR2,
9239                        TG3_CPMU_DBTMR2_APE_TX_2047US |
9240                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9241         }
9242
9243         if (reset_phy)
9244                 tg3_phy_reset(tp);
9245
9246         err = tg3_chip_reset(tp);
9247         if (err)
9248                 return err;
9249
9250         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9251
9252         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9253                 val = tr32(TG3_CPMU_CTRL);
9254                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9255                 tw32(TG3_CPMU_CTRL, val);
9256
9257                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9258                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9259                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9260                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9261
9262                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9263                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9264                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9265                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9266
9267                 val = tr32(TG3_CPMU_HST_ACC);
9268                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9269                 val |= CPMU_HST_ACC_MACCLK_6_25;
9270                 tw32(TG3_CPMU_HST_ACC, val);
9271         }
9272
9273         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9274                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9275                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9276                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9277                 tw32(PCIE_PWR_MGMT_THRESH, val);
9278
9279                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9280                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9281
9282                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9283
9284                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9285                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9286         }
9287
9288         if (tg3_flag(tp, L1PLLPD_EN)) {
9289                 u32 grc_mode = tr32(GRC_MODE);
9290
9291                 /* Access the lower 1K of PL PCIE block registers. */
9292                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9293                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9294
9295                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9296                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9297                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9298
9299                 tw32(GRC_MODE, grc_mode);
9300         }
9301
9302         if (tg3_flag(tp, 57765_CLASS)) {
9303                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9304                         u32 grc_mode = tr32(GRC_MODE);
9305
9306                         /* Access the lower 1K of PL PCIE block registers. */
9307                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9308                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9309
9310                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9311                                    TG3_PCIE_PL_LO_PHYCTL5);
9312                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9313                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9314
9315                         tw32(GRC_MODE, grc_mode);
9316                 }
9317
9318                 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9319                         u32 grc_mode;
9320
9321                         /* Fix transmit hangs */
9322                         val = tr32(TG3_CPMU_PADRNG_CTL);
9323                         val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9324                         tw32(TG3_CPMU_PADRNG_CTL, val);
9325
9326                         grc_mode = tr32(GRC_MODE);
9327
9328                         /* Access the lower 1K of DL PCIE block registers. */
9329                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9330                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9331
9332                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9333                                    TG3_PCIE_DL_LO_FTSMAX);
9334                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9335                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9336                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9337
9338                         tw32(GRC_MODE, grc_mode);
9339                 }
9340
9341                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9342                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9343                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9344                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9345         }
9346
9347         /* This works around an issue with Athlon chipsets on
9348          * B3 tigon3 silicon.  This bit has no effect on any
9349          * other revision.  But do not set this on PCI Express
9350          * chips and don't even touch the clocks if the CPMU is present.
9351          */
9352         if (!tg3_flag(tp, CPMU_PRESENT)) {
9353                 if (!tg3_flag(tp, PCI_EXPRESS))
9354                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9355                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9356         }
9357
9358         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9359             tg3_flag(tp, PCIX_MODE)) {
9360                 val = tr32(TG3PCI_PCISTATE);
9361                 val |= PCISTATE_RETRY_SAME_DMA;
9362                 tw32(TG3PCI_PCISTATE, val);
9363         }
9364
9365         if (tg3_flag(tp, ENABLE_APE)) {
9366                 /* Allow reads and writes to the
9367                  * APE register and memory space.
9368                  */
9369                 val = tr32(TG3PCI_PCISTATE);
9370                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9371                        PCISTATE_ALLOW_APE_SHMEM_WR |
9372                        PCISTATE_ALLOW_APE_PSPACE_WR;
9373                 tw32(TG3PCI_PCISTATE, val);
9374         }
9375
9376         if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9377                 /* Enable some hw fixes.  */
9378                 val = tr32(TG3PCI_MSI_DATA);
9379                 val |= (1 << 26) | (1 << 28) | (1 << 29);
9380                 tw32(TG3PCI_MSI_DATA, val);
9381         }
9382
9383         /* Descriptor ring init may make accesses to the
9384          * NIC SRAM area to setup the TX descriptors, so we
9385          * can only do this after the hardware has been
9386          * successfully reset.
9387          */
9388         err = tg3_init_rings(tp);
9389         if (err)
9390                 return err;
9391
9392         if (tg3_flag(tp, 57765_PLUS)) {
9393                 val = tr32(TG3PCI_DMA_RW_CTRL) &
9394                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9395                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9396                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9397                 if (!tg3_flag(tp, 57765_CLASS) &&
9398                     tg3_asic_rev(tp) != ASIC_REV_5717 &&
9399                     tg3_asic_rev(tp) != ASIC_REV_5762)
9400                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
9401                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9402         } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9403                    tg3_asic_rev(tp) != ASIC_REV_5761) {
9404                 /* This value is determined during the probe time DMA
9405                  * engine test, tg3_test_dma.
9406                  */
9407                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9408         }
9409
9410         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9411                           GRC_MODE_4X_NIC_SEND_RINGS |
9412                           GRC_MODE_NO_TX_PHDR_CSUM |
9413                           GRC_MODE_NO_RX_PHDR_CSUM);
9414         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9415
9416         /* Pseudo-header checksum is done by hardware logic and not
9417          * the offload processers, so make the chip do the pseudo-
9418          * header checksums on receive.  For transmit it is more
9419          * convenient to do the pseudo-header checksum in software
9420          * as Linux does that on transmit for us in all cases.
9421          */
9422         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9423
9424         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9425         if (tp->rxptpctl)
9426                 tw32(TG3_RX_PTP_CTL,
9427                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9428
9429         if (tg3_flag(tp, PTP_CAPABLE))
9430                 val |= GRC_MODE_TIME_SYNC_ENABLE;
9431
9432         tw32(GRC_MODE, tp->grc_mode | val);
9433
9434         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
9435         val = tr32(GRC_MISC_CFG);
9436         val &= ~0xff;
9437         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9438         tw32(GRC_MISC_CFG, val);
9439
9440         /* Initialize MBUF/DESC pool. */
9441         if (tg3_flag(tp, 5750_PLUS)) {
9442                 /* Do nothing.  */
9443         } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9444                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9445                 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9446                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9447                 else
9448                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9449                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9450                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9451         } else if (tg3_flag(tp, TSO_CAPABLE)) {
9452                 int fw_len;
9453
9454                 fw_len = tp->fw_len;
9455                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9456                 tw32(BUFMGR_MB_POOL_ADDR,
9457                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9458                 tw32(BUFMGR_MB_POOL_SIZE,
9459                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9460         }
9461
9462         if (tp->dev->mtu <= ETH_DATA_LEN) {
9463                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9464                      tp->bufmgr_config.mbuf_read_dma_low_water);
9465                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9466                      tp->bufmgr_config.mbuf_mac_rx_low_water);
9467                 tw32(BUFMGR_MB_HIGH_WATER,
9468                      tp->bufmgr_config.mbuf_high_water);
9469         } else {
9470                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9471                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9472                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9473                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9474                 tw32(BUFMGR_MB_HIGH_WATER,
9475                      tp->bufmgr_config.mbuf_high_water_jumbo);
9476         }
9477         tw32(BUFMGR_DMA_LOW_WATER,
9478              tp->bufmgr_config.dma_low_water);
9479         tw32(BUFMGR_DMA_HIGH_WATER,
9480              tp->bufmgr_config.dma_high_water);
9481
9482         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9483         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9484                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9485         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9486             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9487             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9488                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9489         tw32(BUFMGR_MODE, val);
9490         for (i = 0; i < 2000; i++) {
9491                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9492                         break;
9493                 udelay(10);
9494         }
9495         if (i >= 2000) {
9496                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9497                 return -ENODEV;
9498         }
9499
9500         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9501                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9502
9503         tg3_setup_rxbd_thresholds(tp);
9504
9505         /* Initialize TG3_BDINFO's at:
9506          *  RCVDBDI_STD_BD:     standard eth size rx ring
9507          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
9508          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
9509          *
9510          * like so:
9511          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
9512          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
9513          *                              ring attribute flags
9514          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
9515          *
9516          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9517          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9518          *
9519          * The size of each ring is fixed in the firmware, but the location is
9520          * configurable.
9521          */
9522         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9523              ((u64) tpr->rx_std_mapping >> 32));
9524         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9525              ((u64) tpr->rx_std_mapping & 0xffffffff));
9526         if (!tg3_flag(tp, 5717_PLUS))
9527                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9528                      NIC_SRAM_RX_BUFFER_DESC);
9529
9530         /* Disable the mini ring */
9531         if (!tg3_flag(tp, 5705_PLUS))
9532                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9533                      BDINFO_FLAGS_DISABLED);
9534
9535         /* Program the jumbo buffer descriptor ring control
9536          * blocks on those devices that have them.
9537          */
9538         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9539             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9540
9541                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9542                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9543                              ((u64) tpr->rx_jmb_mapping >> 32));
9544                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9545                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9546                         val = TG3_RX_JMB_RING_SIZE(tp) <<
9547                               BDINFO_FLAGS_MAXLEN_SHIFT;
9548                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9549                              val | BDINFO_FLAGS_USE_EXT_RECV);
9550                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9551                             tg3_flag(tp, 57765_CLASS) ||
9552                             tg3_asic_rev(tp) == ASIC_REV_5762)
9553                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9554                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9555                 } else {
9556                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9557                              BDINFO_FLAGS_DISABLED);
9558                 }
9559
9560                 if (tg3_flag(tp, 57765_PLUS)) {
9561                         val = TG3_RX_STD_RING_SIZE(tp);
9562                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9563                         val |= (TG3_RX_STD_DMA_SZ << 2);
9564                 } else
9565                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9566         } else
9567                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9568
9569         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9570
9571         tpr->rx_std_prod_idx = tp->rx_pending;
9572         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9573
9574         tpr->rx_jmb_prod_idx =
9575                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9576         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9577
9578         tg3_rings_reset(tp);
9579
9580         /* Initialize MAC address and backoff seed. */
9581         __tg3_set_mac_addr(tp, 0);
9582
9583         /* MTU + ethernet header + FCS + optional VLAN tag */
9584         tw32(MAC_RX_MTU_SIZE,
9585              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9586
9587         /* The slot time is changed by tg3_setup_phy if we
9588          * run at gigabit with half duplex.
9589          */
9590         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9591               (6 << TX_LENGTHS_IPG_SHIFT) |
9592               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9593
9594         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9595             tg3_asic_rev(tp) == ASIC_REV_5762)
9596                 val |= tr32(MAC_TX_LENGTHS) &
9597                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
9598                         TX_LENGTHS_CNT_DWN_VAL_MSK);
9599
9600         tw32(MAC_TX_LENGTHS, val);
9601
9602         /* Receive rules. */
9603         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9604         tw32(RCVLPC_CONFIG, 0x0181);
9605
9606         /* Calculate RDMAC_MODE setting early, we need it to determine
9607          * the RCVLPC_STATE_ENABLE mask.
9608          */
9609         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9610                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9611                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9612                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9613                       RDMAC_MODE_LNGREAD_ENAB);
9614
9615         if (tg3_asic_rev(tp) == ASIC_REV_5717)
9616                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9617
9618         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
9619             tg3_asic_rev(tp) == ASIC_REV_5785 ||
9620             tg3_asic_rev(tp) == ASIC_REV_57780)
9621                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9622                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9623                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9624
9625         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9626             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9627                 if (tg3_flag(tp, TSO_CAPABLE) &&
9628                     tg3_asic_rev(tp) == ASIC_REV_5705) {
9629                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9630                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9631                            !tg3_flag(tp, IS_5788)) {
9632                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9633                 }
9634         }
9635
9636         if (tg3_flag(tp, PCI_EXPRESS))
9637                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9638
9639         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9640                 tp->dma_limit = 0;
9641                 if (tp->dev->mtu <= ETH_DATA_LEN) {
9642                         rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
9643                         tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
9644                 }
9645         }
9646
9647         if (tg3_flag(tp, HW_TSO_1) ||
9648             tg3_flag(tp, HW_TSO_2) ||
9649             tg3_flag(tp, HW_TSO_3))
9650                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9651
9652         if (tg3_flag(tp, 57765_PLUS) ||
9653             tg3_asic_rev(tp) == ASIC_REV_5785 ||
9654             tg3_asic_rev(tp) == ASIC_REV_57780)
9655                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9656
9657         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9658             tg3_asic_rev(tp) == ASIC_REV_5762)
9659                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9660
9661         if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
9662             tg3_asic_rev(tp) == ASIC_REV_5784 ||
9663             tg3_asic_rev(tp) == ASIC_REV_5785 ||
9664             tg3_asic_rev(tp) == ASIC_REV_57780 ||
9665             tg3_flag(tp, 57765_PLUS)) {
9666                 u32 tgtreg;
9667
9668                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9669                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
9670                 else
9671                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
9672
9673                 val = tr32(tgtreg);
9674                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9675                     tg3_asic_rev(tp) == ASIC_REV_5762) {
9676                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9677                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9678                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9679                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9680                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9681                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9682                 }
9683                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9684         }
9685
9686         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
9687             tg3_asic_rev(tp) == ASIC_REV_5720 ||
9688             tg3_asic_rev(tp) == ASIC_REV_5762) {
9689                 u32 tgtreg;
9690
9691                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9692                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
9693                 else
9694                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
9695
9696                 val = tr32(tgtreg);
9697                 tw32(tgtreg, val |
9698                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9699                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9700         }
9701
9702         /* Receive/send statistics. */
9703         if (tg3_flag(tp, 5750_PLUS)) {
9704                 val = tr32(RCVLPC_STATS_ENABLE);
9705                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9706                 tw32(RCVLPC_STATS_ENABLE, val);
9707         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9708                    tg3_flag(tp, TSO_CAPABLE)) {
9709                 val = tr32(RCVLPC_STATS_ENABLE);
9710                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9711                 tw32(RCVLPC_STATS_ENABLE, val);
9712         } else {
9713                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9714         }
9715         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9716         tw32(SNDDATAI_STATSENAB, 0xffffff);
9717         tw32(SNDDATAI_STATSCTRL,
9718              (SNDDATAI_SCTRL_ENABLE |
9719               SNDDATAI_SCTRL_FASTUPD));
9720
9721         /* Setup host coalescing engine. */
9722         tw32(HOSTCC_MODE, 0);
9723         for (i = 0; i < 2000; i++) {
9724                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9725                         break;
9726                 udelay(10);
9727         }
9728
9729         __tg3_set_coalesce(tp, &tp->coal);
9730
9731         if (!tg3_flag(tp, 5705_PLUS)) {
9732                 /* Status/statistics block address.  See tg3_timer,
9733                  * the tg3_periodic_fetch_stats call there, and
9734                  * tg3_get_stats to see how this works for 5705/5750 chips.
9735                  */
9736                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9737                      ((u64) tp->stats_mapping >> 32));
9738                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9739                      ((u64) tp->stats_mapping & 0xffffffff));
9740                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9741
9742                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9743
9744                 /* Clear statistics and status block memory areas */
9745                 for (i = NIC_SRAM_STATS_BLK;
9746                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9747                      i += sizeof(u32)) {
9748                         tg3_write_mem(tp, i, 0);
9749                         udelay(40);
9750                 }
9751         }
9752
9753         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9754
9755         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9756         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9757         if (!tg3_flag(tp, 5705_PLUS))
9758                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9759
9760         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9761                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9762                 /* reset to prevent losing 1st rx packet intermittently */
9763                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9764                 udelay(10);
9765         }
9766
9767         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9768                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9769                         MAC_MODE_FHDE_ENABLE;
9770         if (tg3_flag(tp, ENABLE_APE))
9771                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9772         if (!tg3_flag(tp, 5705_PLUS) &&
9773             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9774             tg3_asic_rev(tp) != ASIC_REV_5700)
9775                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9776         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9777         udelay(40);
9778
9779         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9780          * If TG3_FLAG_IS_NIC is zero, we should read the
9781          * register to preserve the GPIO settings for LOMs. The GPIOs,
9782          * whether used as inputs or outputs, are set by boot code after
9783          * reset.
9784          */
9785         if (!tg3_flag(tp, IS_NIC)) {
9786                 u32 gpio_mask;
9787
9788                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9789                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9790                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9791
9792                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
9793                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9794                                      GRC_LCLCTRL_GPIO_OUTPUT3;
9795
9796                 if (tg3_asic_rev(tp) == ASIC_REV_5755)
9797                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9798
9799                 tp->grc_local_ctrl &= ~gpio_mask;
9800                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9801
9802                 /* GPIO1 must be driven high for eeprom write protect */
9803                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9804                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9805                                                GRC_LCLCTRL_GPIO_OUTPUT1);
9806         }
9807         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9808         udelay(100);
9809
9810         if (tg3_flag(tp, USING_MSIX)) {
9811                 val = tr32(MSGINT_MODE);
9812                 val |= MSGINT_MODE_ENABLE;
9813                 if (tp->irq_cnt > 1)
9814                         val |= MSGINT_MODE_MULTIVEC_EN;
9815                 if (!tg3_flag(tp, 1SHOT_MSI))
9816                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9817                 tw32(MSGINT_MODE, val);
9818         }
9819
9820         if (!tg3_flag(tp, 5705_PLUS)) {
9821                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9822                 udelay(40);
9823         }
9824
9825         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9826                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9827                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9828                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9829                WDMAC_MODE_LNGREAD_ENAB);
9830
9831         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9832             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9833                 if (tg3_flag(tp, TSO_CAPABLE) &&
9834                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
9835                      tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
9836                         /* nothing */
9837                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9838                            !tg3_flag(tp, IS_5788)) {
9839                         val |= WDMAC_MODE_RX_ACCEL;
9840                 }
9841         }
9842
9843         /* Enable host coalescing bug fix */
9844         if (tg3_flag(tp, 5755_PLUS))
9845                 val |= WDMAC_MODE_STATUS_TAG_FIX;
9846
9847         if (tg3_asic_rev(tp) == ASIC_REV_5785)
9848                 val |= WDMAC_MODE_BURST_ALL_DATA;
9849
9850         tw32_f(WDMAC_MODE, val);
9851         udelay(40);
9852
9853         if (tg3_flag(tp, PCIX_MODE)) {
9854                 u16 pcix_cmd;
9855
9856                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9857                                      &pcix_cmd);
9858                 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
9859                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9860                         pcix_cmd |= PCI_X_CMD_READ_2K;
9861                 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
9862                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9863                         pcix_cmd |= PCI_X_CMD_READ_2K;
9864                 }
9865                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9866                                       pcix_cmd);
9867         }
9868
9869         tw32_f(RDMAC_MODE, rdmac_mode);
9870         udelay(40);
9871
9872         if (tg3_asic_rev(tp) == ASIC_REV_5719) {
9873                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9874                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9875                                 break;
9876                 }
9877                 if (i < TG3_NUM_RDMA_CHANNELS) {
9878                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9879                         val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9880                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9881                         tg3_flag_set(tp, 5719_RDMA_BUG);
9882                 }
9883         }
9884
9885         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9886         if (!tg3_flag(tp, 5705_PLUS))
9887                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9888
9889         if (tg3_asic_rev(tp) == ASIC_REV_5761)
9890                 tw32(SNDDATAC_MODE,
9891                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9892         else
9893                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9894
9895         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9896         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9897         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9898         if (tg3_flag(tp, LRG_PROD_RING_CAP))
9899                 val |= RCVDBDI_MODE_LRG_RING_SZ;
9900         tw32(RCVDBDI_MODE, val);
9901         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9902         if (tg3_flag(tp, HW_TSO_1) ||
9903             tg3_flag(tp, HW_TSO_2) ||
9904             tg3_flag(tp, HW_TSO_3))
9905                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9906         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9907         if (tg3_flag(tp, ENABLE_TSS))
9908                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9909         tw32(SNDBDI_MODE, val);
9910         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9911
9912         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
9913                 err = tg3_load_5701_a0_firmware_fix(tp);
9914                 if (err)
9915                         return err;
9916         }
9917
9918         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9919                 /* Ignore any errors for the firmware download. If download
9920                  * fails, the device will operate with EEE disabled
9921                  */
9922                 tg3_load_57766_firmware(tp);
9923         }
9924
9925         if (tg3_flag(tp, TSO_CAPABLE)) {
9926                 err = tg3_load_tso_firmware(tp);
9927                 if (err)
9928                         return err;
9929         }
9930
9931         tp->tx_mode = TX_MODE_ENABLE;
9932
9933         if (tg3_flag(tp, 5755_PLUS) ||
9934             tg3_asic_rev(tp) == ASIC_REV_5906)
9935                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9936
9937         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9938             tg3_asic_rev(tp) == ASIC_REV_5762) {
9939                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9940                 tp->tx_mode &= ~val;
9941                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9942         }
9943
9944         tw32_f(MAC_TX_MODE, tp->tx_mode);
9945         udelay(100);
9946
9947         if (tg3_flag(tp, ENABLE_RSS)) {
9948                 tg3_rss_write_indir_tbl(tp);
9949
9950                 /* Setup the "secret" hash key. */
9951                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9952                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9953                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9954                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9955                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9956                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9957                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9958                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9959                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9960                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9961         }
9962
9963         tp->rx_mode = RX_MODE_ENABLE;
9964         if (tg3_flag(tp, 5755_PLUS))
9965                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9966
9967         if (tg3_flag(tp, ENABLE_RSS))
9968                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9969                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
9970                                RX_MODE_RSS_IPV6_HASH_EN |
9971                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
9972                                RX_MODE_RSS_IPV4_HASH_EN |
9973                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
9974
9975         tw32_f(MAC_RX_MODE, tp->rx_mode);
9976         udelay(10);
9977
9978         tw32(MAC_LED_CTRL, tp->led_ctrl);
9979
9980         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9981         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9982                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9983                 udelay(10);
9984         }
9985         tw32_f(MAC_RX_MODE, tp->rx_mode);
9986         udelay(10);
9987
9988         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9989                 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
9990                     !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9991                         /* Set drive transmission level to 1.2V  */
9992                         /* only if the signal pre-emphasis bit is not set  */
9993                         val = tr32(MAC_SERDES_CFG);
9994                         val &= 0xfffff000;
9995                         val |= 0x880;
9996                         tw32(MAC_SERDES_CFG, val);
9997                 }
9998                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
9999                         tw32(MAC_SERDES_CFG, 0x616000);
10000         }
10001
10002         /* Prevent chip from dropping frames when flow control
10003          * is enabled.
10004          */
10005         if (tg3_flag(tp, 57765_CLASS))
10006                 val = 1;
10007         else
10008                 val = 2;
10009         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10010
10011         if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10012             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10013                 /* Use hardware link auto-negotiation */
10014                 tg3_flag_set(tp, HW_AUTONEG);
10015         }
10016
10017         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10018             tg3_asic_rev(tp) == ASIC_REV_5714) {
10019                 u32 tmp;
10020
10021                 tmp = tr32(SERDES_RX_CTRL);
10022                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10023                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10024                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10025                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10026         }
10027
10028         if (!tg3_flag(tp, USE_PHYLIB)) {
10029                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10030                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10031
10032                 err = tg3_setup_phy(tp, 0);
10033                 if (err)
10034                         return err;
10035
10036                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10037                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10038                         u32 tmp;
10039
10040                         /* Clear CRC stats. */
10041                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10042                                 tg3_writephy(tp, MII_TG3_TEST1,
10043                                              tmp | MII_TG3_TEST1_CRC_EN);
10044                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10045                         }
10046                 }
10047         }
10048
10049         __tg3_set_rx_mode(tp->dev);
10050
10051         /* Initialize receive rules. */
10052         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10053         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10054         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10055         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10056
10057         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10058                 limit = 8;
10059         else
10060                 limit = 16;
10061         if (tg3_flag(tp, ENABLE_ASF))
10062                 limit -= 4;
10063         switch (limit) {
10064         case 16:
10065                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10066         case 15:
10067                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10068         case 14:
10069                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10070         case 13:
10071                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10072         case 12:
10073                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10074         case 11:
10075                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10076         case 10:
10077                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10078         case 9:
10079                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10080         case 8:
10081                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10082         case 7:
10083                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10084         case 6:
10085                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10086         case 5:
10087                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10088         case 4:
10089                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10090         case 3:
10091                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10092         case 2:
10093         case 1:
10094
10095         default:
10096                 break;
10097         }
10098
10099         if (tg3_flag(tp, ENABLE_APE))
10100                 /* Write our heartbeat update interval to APE. */
10101                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10102                                 APE_HOST_HEARTBEAT_INT_DISABLE);
10103
10104         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10105
10106         return 0;
10107 }
10108
10109 /* Called at device open time to get the chip ready for
10110  * packet processing.  Invoked with tp->lock held.
10111  */
10112 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
10113 {
10114         tg3_switch_clocks(tp);
10115
10116         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10117
10118         return tg3_reset_hw(tp, reset_phy);
10119 }
10120
10121 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10122 {
10123         int i;
10124
10125         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10126                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10127
10128                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10129                 off += len;
10130
10131                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10132                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10133                         memset(ocir, 0, TG3_OCIR_LEN);
10134         }
10135 }
10136
10137 /* sysfs attributes for hwmon */
10138 static ssize_t tg3_show_temp(struct device *dev,
10139                              struct device_attribute *devattr, char *buf)
10140 {
10141         struct pci_dev *pdev = to_pci_dev(dev);
10142         struct net_device *netdev = pci_get_drvdata(pdev);
10143         struct tg3 *tp = netdev_priv(netdev);
10144         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10145         u32 temperature;
10146
10147         spin_lock_bh(&tp->lock);
10148         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10149                                 sizeof(temperature));
10150         spin_unlock_bh(&tp->lock);
10151         return sprintf(buf, "%u\n", temperature);
10152 }
10153
10154
10155 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10156                           TG3_TEMP_SENSOR_OFFSET);
10157 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10158                           TG3_TEMP_CAUTION_OFFSET);
10159 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10160                           TG3_TEMP_MAX_OFFSET);
10161
10162 static struct attribute *tg3_attributes[] = {
10163         &sensor_dev_attr_temp1_input.dev_attr.attr,
10164         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10165         &sensor_dev_attr_temp1_max.dev_attr.attr,
10166         NULL
10167 };
10168
10169 static const struct attribute_group tg3_group = {
10170         .attrs = tg3_attributes,
10171 };
10172
10173 static void tg3_hwmon_close(struct tg3 *tp)
10174 {
10175         if (tp->hwmon_dev) {
10176                 hwmon_device_unregister(tp->hwmon_dev);
10177                 tp->hwmon_dev = NULL;
10178                 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10179         }
10180 }
10181
10182 static void tg3_hwmon_open(struct tg3 *tp)
10183 {
10184         int i, err;
10185         u32 size = 0;
10186         struct pci_dev *pdev = tp->pdev;
10187         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10188
10189         tg3_sd_scan_scratchpad(tp, ocirs);
10190
10191         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10192                 if (!ocirs[i].src_data_length)
10193                         continue;
10194
10195                 size += ocirs[i].src_hdr_length;
10196                 size += ocirs[i].src_data_length;
10197         }
10198
10199         if (!size)
10200                 return;
10201
10202         /* Register hwmon sysfs hooks */
10203         err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10204         if (err) {
10205                 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10206                 return;
10207         }
10208
10209         tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10210         if (IS_ERR(tp->hwmon_dev)) {
10211                 tp->hwmon_dev = NULL;
10212                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10213                 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10214         }
10215 }
10216
10217
10218 #define TG3_STAT_ADD32(PSTAT, REG) \
10219 do {    u32 __val = tr32(REG); \
10220         (PSTAT)->low += __val; \
10221         if ((PSTAT)->low < __val) \
10222                 (PSTAT)->high += 1; \
10223 } while (0)
10224
10225 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10226 {
10227         struct tg3_hw_stats *sp = tp->hw_stats;
10228
10229         if (!tp->link_up)
10230                 return;
10231
10232         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10233         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10234         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10235         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10236         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10237         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10238         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10239         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10240         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10241         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10242         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10243         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10244         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10245         if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
10246                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10247                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10248                 u32 val;
10249
10250                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10251                 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
10252                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10253                 tg3_flag_clear(tp, 5719_RDMA_BUG);
10254         }
10255
10256         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10257         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10258         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10259         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10260         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10261         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10262         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10263         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10264         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10265         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10266         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10267         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10268         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10269         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10270
10271         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10272         if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10273             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10274             tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10275                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10276         } else {
10277                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10278                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10279                 if (val) {
10280                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10281                         sp->rx_discards.low += val;
10282                         if (sp->rx_discards.low < val)
10283                                 sp->rx_discards.high += 1;
10284                 }
10285                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10286         }
10287         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10288 }
10289
10290 static void tg3_chk_missed_msi(struct tg3 *tp)
10291 {
10292         u32 i;
10293
10294         for (i = 0; i < tp->irq_cnt; i++) {
10295                 struct tg3_napi *tnapi = &tp->napi[i];
10296
10297                 if (tg3_has_work(tnapi)) {
10298                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10299                             tnapi->last_tx_cons == tnapi->tx_cons) {
10300                                 if (tnapi->chk_msi_cnt < 1) {
10301                                         tnapi->chk_msi_cnt++;
10302                                         return;
10303                                 }
10304                                 tg3_msi(0, tnapi);
10305                         }
10306                 }
10307                 tnapi->chk_msi_cnt = 0;
10308                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10309                 tnapi->last_tx_cons = tnapi->tx_cons;
10310         }
10311 }
10312
10313 static void tg3_timer(unsigned long __opaque)
10314 {
10315         struct tg3 *tp = (struct tg3 *) __opaque;
10316
10317         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10318                 goto restart_timer;
10319
10320         spin_lock(&tp->lock);
10321
10322         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10323             tg3_flag(tp, 57765_CLASS))
10324                 tg3_chk_missed_msi(tp);
10325
10326         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10327                 /* BCM4785: Flush posted writes from GbE to host memory. */
10328                 tr32(HOSTCC_MODE);
10329         }
10330
10331         if (!tg3_flag(tp, TAGGED_STATUS)) {
10332                 /* All of this garbage is because when using non-tagged
10333                  * IRQ status the mailbox/status_block protocol the chip
10334                  * uses with the cpu is race prone.
10335                  */
10336                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10337                         tw32(GRC_LOCAL_CTRL,
10338                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10339                 } else {
10340                         tw32(HOSTCC_MODE, tp->coalesce_mode |
10341                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10342                 }
10343
10344                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10345                         spin_unlock(&tp->lock);
10346                         tg3_reset_task_schedule(tp);
10347                         goto restart_timer;
10348                 }
10349         }
10350
10351         /* This part only runs once per second. */
10352         if (!--tp->timer_counter) {
10353                 if (tg3_flag(tp, 5705_PLUS))
10354                         tg3_periodic_fetch_stats(tp);
10355
10356                 if (tp->setlpicnt && !--tp->setlpicnt)
10357                         tg3_phy_eee_enable(tp);
10358
10359                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10360                         u32 mac_stat;
10361                         int phy_event;
10362
10363                         mac_stat = tr32(MAC_STATUS);
10364
10365                         phy_event = 0;
10366                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10367                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10368                                         phy_event = 1;
10369                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10370                                 phy_event = 1;
10371
10372                         if (phy_event)
10373                                 tg3_setup_phy(tp, 0);
10374                 } else if (tg3_flag(tp, POLL_SERDES)) {
10375                         u32 mac_stat = tr32(MAC_STATUS);
10376                         int need_setup = 0;
10377
10378                         if (tp->link_up &&
10379                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10380                                 need_setup = 1;
10381                         }
10382                         if (!tp->link_up &&
10383                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
10384                                          MAC_STATUS_SIGNAL_DET))) {
10385                                 need_setup = 1;
10386                         }
10387                         if (need_setup) {
10388                                 if (!tp->serdes_counter) {
10389                                         tw32_f(MAC_MODE,
10390                                              (tp->mac_mode &
10391                                               ~MAC_MODE_PORT_MODE_MASK));
10392                                         udelay(40);
10393                                         tw32_f(MAC_MODE, tp->mac_mode);
10394                                         udelay(40);
10395                                 }
10396                                 tg3_setup_phy(tp, 0);
10397                         }
10398                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10399                            tg3_flag(tp, 5780_CLASS)) {
10400                         tg3_serdes_parallel_detect(tp);
10401                 }
10402
10403                 tp->timer_counter = tp->timer_multiplier;
10404         }
10405
10406         /* Heartbeat is only sent once every 2 seconds.
10407          *
10408          * The heartbeat is to tell the ASF firmware that the host
10409          * driver is still alive.  In the event that the OS crashes,
10410          * ASF needs to reset the hardware to free up the FIFO space
10411          * that may be filled with rx packets destined for the host.
10412          * If the FIFO is full, ASF will no longer function properly.
10413          *
10414          * Unintended resets have been reported on real time kernels
10415          * where the timer doesn't run on time.  Netpoll will also have
10416          * same problem.
10417          *
10418          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10419          * to check the ring condition when the heartbeat is expiring
10420          * before doing the reset.  This will prevent most unintended
10421          * resets.
10422          */
10423         if (!--tp->asf_counter) {
10424                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10425                         tg3_wait_for_event_ack(tp);
10426
10427                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10428                                       FWCMD_NICDRV_ALIVE3);
10429                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10430                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10431                                       TG3_FW_UPDATE_TIMEOUT_SEC);
10432
10433                         tg3_generate_fw_event(tp);
10434                 }
10435                 tp->asf_counter = tp->asf_multiplier;
10436         }
10437
10438         spin_unlock(&tp->lock);
10439
10440 restart_timer:
10441         tp->timer.expires = jiffies + tp->timer_offset;
10442         add_timer(&tp->timer);
10443 }
10444
10445 static void tg3_timer_init(struct tg3 *tp)
10446 {
10447         if (tg3_flag(tp, TAGGED_STATUS) &&
10448             tg3_asic_rev(tp) != ASIC_REV_5717 &&
10449             !tg3_flag(tp, 57765_CLASS))
10450                 tp->timer_offset = HZ;
10451         else
10452                 tp->timer_offset = HZ / 10;
10453
10454         BUG_ON(tp->timer_offset > HZ);
10455
10456         tp->timer_multiplier = (HZ / tp->timer_offset);
10457         tp->asf_multiplier = (HZ / tp->timer_offset) *
10458                              TG3_FW_UPDATE_FREQ_SEC;
10459
10460         init_timer(&tp->timer);
10461         tp->timer.data = (unsigned long) tp;
10462         tp->timer.function = tg3_timer;
10463 }
10464
10465 static void tg3_timer_start(struct tg3 *tp)
10466 {
10467         tp->asf_counter   = tp->asf_multiplier;
10468         tp->timer_counter = tp->timer_multiplier;
10469
10470         tp->timer.expires = jiffies + tp->timer_offset;
10471         add_timer(&tp->timer);
10472 }
10473
10474 static void tg3_timer_stop(struct tg3 *tp)
10475 {
10476         del_timer_sync(&tp->timer);
10477 }
10478
10479 /* Restart hardware after configuration changes, self-test, etc.
10480  * Invoked with tp->lock held.
10481  */
10482 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
10483         __releases(tp->lock)
10484         __acquires(tp->lock)
10485 {
10486         int err;
10487
10488         err = tg3_init_hw(tp, reset_phy);
10489         if (err) {
10490                 netdev_err(tp->dev,
10491                            "Failed to re-initialize device, aborting\n");
10492                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10493                 tg3_full_unlock(tp);
10494                 tg3_timer_stop(tp);
10495                 tp->irq_sync = 0;
10496                 tg3_napi_enable(tp);
10497                 dev_close(tp->dev);
10498                 tg3_full_lock(tp, 0);
10499         }
10500         return err;
10501 }
10502
10503 static void tg3_reset_task(struct work_struct *work)
10504 {
10505         struct tg3 *tp = container_of(work, struct tg3, reset_task);
10506         int err;
10507
10508         tg3_full_lock(tp, 0);
10509
10510         if (!netif_running(tp->dev)) {
10511                 tg3_flag_clear(tp, RESET_TASK_PENDING);
10512                 tg3_full_unlock(tp);
10513                 return;
10514         }
10515
10516         tg3_full_unlock(tp);
10517
10518         tg3_phy_stop(tp);
10519
10520         tg3_netif_stop(tp);
10521
10522         tg3_full_lock(tp, 1);
10523
10524         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10525                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10526                 tp->write32_rx_mbox = tg3_write_flush_reg32;
10527                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10528                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10529         }
10530
10531         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10532         err = tg3_init_hw(tp, 1);
10533         if (err)
10534                 goto out;
10535
10536         tg3_netif_start(tp);
10537
10538 out:
10539         tg3_full_unlock(tp);
10540
10541         if (!err)
10542                 tg3_phy_start(tp);
10543
10544         tg3_flag_clear(tp, RESET_TASK_PENDING);
10545 }
10546
10547 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10548 {
10549         irq_handler_t fn;
10550         unsigned long flags;
10551         char *name;
10552         struct tg3_napi *tnapi = &tp->napi[irq_num];
10553
10554         if (tp->irq_cnt == 1)
10555                 name = tp->dev->name;
10556         else {
10557                 name = &tnapi->irq_lbl[0];
10558                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10559                 name[IFNAMSIZ-1] = 0;
10560         }
10561
10562         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10563                 fn = tg3_msi;
10564                 if (tg3_flag(tp, 1SHOT_MSI))
10565                         fn = tg3_msi_1shot;
10566                 flags = 0;
10567         } else {
10568                 fn = tg3_interrupt;
10569                 if (tg3_flag(tp, TAGGED_STATUS))
10570                         fn = tg3_interrupt_tagged;
10571                 flags = IRQF_SHARED;
10572         }
10573
10574         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10575 }
10576
10577 static int tg3_test_interrupt(struct tg3 *tp)
10578 {
10579         struct tg3_napi *tnapi = &tp->napi[0];
10580         struct net_device *dev = tp->dev;
10581         int err, i, intr_ok = 0;
10582         u32 val;
10583
10584         if (!netif_running(dev))
10585                 return -ENODEV;
10586
10587         tg3_disable_ints(tp);
10588
10589         free_irq(tnapi->irq_vec, tnapi);
10590
10591         /*
10592          * Turn off MSI one shot mode.  Otherwise this test has no
10593          * observable way to know whether the interrupt was delivered.
10594          */
10595         if (tg3_flag(tp, 57765_PLUS)) {
10596                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10597                 tw32(MSGINT_MODE, val);
10598         }
10599
10600         err = request_irq(tnapi->irq_vec, tg3_test_isr,
10601                           IRQF_SHARED, dev->name, tnapi);
10602         if (err)
10603                 return err;
10604
10605         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10606         tg3_enable_ints(tp);
10607
10608         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10609                tnapi->coal_now);
10610
10611         for (i = 0; i < 5; i++) {
10612                 u32 int_mbox, misc_host_ctrl;
10613
10614                 int_mbox = tr32_mailbox(tnapi->int_mbox);
10615                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10616
10617                 if ((int_mbox != 0) ||
10618                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10619                         intr_ok = 1;
10620                         break;
10621                 }
10622
10623                 if (tg3_flag(tp, 57765_PLUS) &&
10624                     tnapi->hw_status->status_tag != tnapi->last_tag)
10625                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10626
10627                 msleep(10);
10628         }
10629
10630         tg3_disable_ints(tp);
10631
10632         free_irq(tnapi->irq_vec, tnapi);
10633
10634         err = tg3_request_irq(tp, 0);
10635
10636         if (err)
10637                 return err;
10638
10639         if (intr_ok) {
10640                 /* Reenable MSI one shot mode. */
10641                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10642                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10643                         tw32(MSGINT_MODE, val);
10644                 }
10645                 return 0;
10646         }
10647
10648         return -EIO;
10649 }
10650
10651 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10652  * successfully restored
10653  */
10654 static int tg3_test_msi(struct tg3 *tp)
10655 {
10656         int err;
10657         u16 pci_cmd;
10658
10659         if (!tg3_flag(tp, USING_MSI))
10660                 return 0;
10661
10662         /* Turn off SERR reporting in case MSI terminates with Master
10663          * Abort.
10664          */
10665         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10666         pci_write_config_word(tp->pdev, PCI_COMMAND,
10667                               pci_cmd & ~PCI_COMMAND_SERR);
10668
10669         err = tg3_test_interrupt(tp);
10670
10671         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10672
10673         if (!err)
10674                 return 0;
10675
10676         /* other failures */
10677         if (err != -EIO)
10678                 return err;
10679
10680         /* MSI test failed, go back to INTx mode */
10681         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10682                     "to INTx mode. Please report this failure to the PCI "
10683                     "maintainer and include system chipset information\n");
10684
10685         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10686
10687         pci_disable_msi(tp->pdev);
10688
10689         tg3_flag_clear(tp, USING_MSI);
10690         tp->napi[0].irq_vec = tp->pdev->irq;
10691
10692         err = tg3_request_irq(tp, 0);
10693         if (err)
10694                 return err;
10695
10696         /* Need to reset the chip because the MSI cycle may have terminated
10697          * with Master Abort.
10698          */
10699         tg3_full_lock(tp, 1);
10700
10701         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10702         err = tg3_init_hw(tp, 1);
10703
10704         tg3_full_unlock(tp);
10705
10706         if (err)
10707                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10708
10709         return err;
10710 }
10711
10712 static int tg3_request_firmware(struct tg3 *tp)
10713 {
10714         const struct tg3_firmware_hdr *fw_hdr;
10715
10716         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10717                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10718                            tp->fw_needed);
10719                 return -ENOENT;
10720         }
10721
10722         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
10723
10724         /* Firmware blob starts with version numbers, followed by
10725          * start address and _full_ length including BSS sections
10726          * (which must be longer than the actual data, of course
10727          */
10728
10729         tp->fw_len = be32_to_cpu(fw_hdr->len);  /* includes bss */
10730         if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
10731                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10732                            tp->fw_len, tp->fw_needed);
10733                 release_firmware(tp->fw);
10734                 tp->fw = NULL;
10735                 return -EINVAL;
10736         }
10737
10738         /* We no longer need firmware; we have it. */
10739         tp->fw_needed = NULL;
10740         return 0;
10741 }
10742
10743 static u32 tg3_irq_count(struct tg3 *tp)
10744 {
10745         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10746
10747         if (irq_cnt > 1) {
10748                 /* We want as many rx rings enabled as there are cpus.
10749                  * In multiqueue MSI-X mode, the first MSI-X vector
10750                  * only deals with link interrupts, etc, so we add
10751                  * one to the number of vectors we are requesting.
10752                  */
10753                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
10754         }
10755
10756         return irq_cnt;
10757 }
10758
10759 static bool tg3_enable_msix(struct tg3 *tp)
10760 {
10761         int i, rc;
10762         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
10763
10764         tp->txq_cnt = tp->txq_req;
10765         tp->rxq_cnt = tp->rxq_req;
10766         if (!tp->rxq_cnt)
10767                 tp->rxq_cnt = netif_get_num_default_rss_queues();
10768         if (tp->rxq_cnt > tp->rxq_max)
10769                 tp->rxq_cnt = tp->rxq_max;
10770
10771         /* Disable multiple TX rings by default.  Simple round-robin hardware
10772          * scheduling of the TX rings can cause starvation of rings with
10773          * small packets when other rings have TSO or jumbo packets.
10774          */
10775         if (!tp->txq_req)
10776                 tp->txq_cnt = 1;
10777
10778         tp->irq_cnt = tg3_irq_count(tp);
10779
10780         for (i = 0; i < tp->irq_max; i++) {
10781                 msix_ent[i].entry  = i;
10782                 msix_ent[i].vector = 0;
10783         }
10784
10785         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10786         if (rc < 0) {
10787                 return false;
10788         } else if (rc != 0) {
10789                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10790                         return false;
10791                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10792                               tp->irq_cnt, rc);
10793                 tp->irq_cnt = rc;
10794                 tp->rxq_cnt = max(rc - 1, 1);
10795                 if (tp->txq_cnt)
10796                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
10797         }
10798
10799         for (i = 0; i < tp->irq_max; i++)
10800                 tp->napi[i].irq_vec = msix_ent[i].vector;
10801
10802         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
10803                 pci_disable_msix(tp->pdev);
10804                 return false;
10805         }
10806
10807         if (tp->irq_cnt == 1)
10808                 return true;
10809
10810         tg3_flag_set(tp, ENABLE_RSS);
10811
10812         if (tp->txq_cnt > 1)
10813                 tg3_flag_set(tp, ENABLE_TSS);
10814
10815         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
10816
10817         return true;
10818 }
10819
10820 static void tg3_ints_init(struct tg3 *tp)
10821 {
10822         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10823             !tg3_flag(tp, TAGGED_STATUS)) {
10824                 /* All MSI supporting chips should support tagged
10825                  * status.  Assert that this is the case.
10826                  */
10827                 netdev_warn(tp->dev,
10828                             "MSI without TAGGED_STATUS? Not using MSI\n");
10829                 goto defcfg;
10830         }
10831
10832         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10833                 tg3_flag_set(tp, USING_MSIX);
10834         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10835                 tg3_flag_set(tp, USING_MSI);
10836
10837         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10838                 u32 msi_mode = tr32(MSGINT_MODE);
10839                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10840                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10841                 if (!tg3_flag(tp, 1SHOT_MSI))
10842                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10843                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10844         }
10845 defcfg:
10846         if (!tg3_flag(tp, USING_MSIX)) {
10847                 tp->irq_cnt = 1;
10848                 tp->napi[0].irq_vec = tp->pdev->irq;
10849         }
10850
10851         if (tp->irq_cnt == 1) {
10852                 tp->txq_cnt = 1;
10853                 tp->rxq_cnt = 1;
10854                 netif_set_real_num_tx_queues(tp->dev, 1);
10855                 netif_set_real_num_rx_queues(tp->dev, 1);
10856         }
10857 }
10858
10859 static void tg3_ints_fini(struct tg3 *tp)
10860 {
10861         if (tg3_flag(tp, USING_MSIX))
10862                 pci_disable_msix(tp->pdev);
10863         else if (tg3_flag(tp, USING_MSI))
10864                 pci_disable_msi(tp->pdev);
10865         tg3_flag_clear(tp, USING_MSI);
10866         tg3_flag_clear(tp, USING_MSIX);
10867         tg3_flag_clear(tp, ENABLE_RSS);
10868         tg3_flag_clear(tp, ENABLE_TSS);
10869 }
10870
10871 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
10872                      bool init)
10873 {
10874         struct net_device *dev = tp->dev;
10875         int i, err;
10876
10877         /*
10878          * Setup interrupts first so we know how
10879          * many NAPI resources to allocate
10880          */
10881         tg3_ints_init(tp);
10882
10883         tg3_rss_check_indir_tbl(tp);
10884
10885         /* The placement of this call is tied
10886          * to the setup and use of Host TX descriptors.
10887          */
10888         err = tg3_alloc_consistent(tp);
10889         if (err)
10890                 goto err_out1;
10891
10892         tg3_napi_init(tp);
10893
10894         tg3_napi_enable(tp);
10895
10896         for (i = 0; i < tp->irq_cnt; i++) {
10897                 struct tg3_napi *tnapi = &tp->napi[i];
10898                 err = tg3_request_irq(tp, i);
10899                 if (err) {
10900                         for (i--; i >= 0; i--) {
10901                                 tnapi = &tp->napi[i];
10902                                 free_irq(tnapi->irq_vec, tnapi);
10903                         }
10904                         goto err_out2;
10905                 }
10906         }
10907
10908         tg3_full_lock(tp, 0);
10909
10910         err = tg3_init_hw(tp, reset_phy);
10911         if (err) {
10912                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10913                 tg3_free_rings(tp);
10914         }
10915
10916         tg3_full_unlock(tp);
10917
10918         if (err)
10919                 goto err_out3;
10920
10921         if (test_irq && tg3_flag(tp, USING_MSI)) {
10922                 err = tg3_test_msi(tp);
10923
10924                 if (err) {
10925                         tg3_full_lock(tp, 0);
10926                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10927                         tg3_free_rings(tp);
10928                         tg3_full_unlock(tp);
10929
10930                         goto err_out2;
10931                 }
10932
10933                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10934                         u32 val = tr32(PCIE_TRANSACTION_CFG);
10935
10936                         tw32(PCIE_TRANSACTION_CFG,
10937                              val | PCIE_TRANS_CFG_1SHOT_MSI);
10938                 }
10939         }
10940
10941         tg3_phy_start(tp);
10942
10943         tg3_hwmon_open(tp);
10944
10945         tg3_full_lock(tp, 0);
10946
10947         tg3_timer_start(tp);
10948         tg3_flag_set(tp, INIT_COMPLETE);
10949         tg3_enable_ints(tp);
10950
10951         if (init)
10952                 tg3_ptp_init(tp);
10953         else
10954                 tg3_ptp_resume(tp);
10955
10956
10957         tg3_full_unlock(tp);
10958
10959         netif_tx_start_all_queues(dev);
10960
10961         /*
10962          * Reset loopback feature if it was turned on while the device was down
10963          * make sure that it's installed properly now.
10964          */
10965         if (dev->features & NETIF_F_LOOPBACK)
10966                 tg3_set_loopback(dev, dev->features);
10967
10968         return 0;
10969
10970 err_out3:
10971         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10972                 struct tg3_napi *tnapi = &tp->napi[i];
10973                 free_irq(tnapi->irq_vec, tnapi);
10974         }
10975
10976 err_out2:
10977         tg3_napi_disable(tp);
10978         tg3_napi_fini(tp);
10979         tg3_free_consistent(tp);
10980
10981 err_out1:
10982         tg3_ints_fini(tp);
10983
10984         return err;
10985 }
10986
10987 static void tg3_stop(struct tg3 *tp)
10988 {
10989         int i;
10990
10991         tg3_reset_task_cancel(tp);
10992         tg3_netif_stop(tp);
10993
10994         tg3_timer_stop(tp);
10995
10996         tg3_hwmon_close(tp);
10997
10998         tg3_phy_stop(tp);
10999
11000         tg3_full_lock(tp, 1);
11001
11002         tg3_disable_ints(tp);
11003
11004         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11005         tg3_free_rings(tp);
11006         tg3_flag_clear(tp, INIT_COMPLETE);
11007
11008         tg3_full_unlock(tp);
11009
11010         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11011                 struct tg3_napi *tnapi = &tp->napi[i];
11012                 free_irq(tnapi->irq_vec, tnapi);
11013         }
11014
11015         tg3_ints_fini(tp);
11016
11017         tg3_napi_fini(tp);
11018
11019         tg3_free_consistent(tp);
11020 }
11021
11022 static int tg3_open(struct net_device *dev)
11023 {
11024         struct tg3 *tp = netdev_priv(dev);
11025         int err;
11026
11027         if (tp->fw_needed) {
11028                 err = tg3_request_firmware(tp);
11029                 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11030                         if (err) {
11031                                 netdev_warn(tp->dev, "EEE capability disabled\n");
11032                                 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11033                         } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11034                                 netdev_warn(tp->dev, "EEE capability restored\n");
11035                                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11036                         }
11037                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11038                         if (err)
11039                                 return err;
11040                 } else if (err) {
11041                         netdev_warn(tp->dev, "TSO capability disabled\n");
11042                         tg3_flag_clear(tp, TSO_CAPABLE);
11043                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11044                         netdev_notice(tp->dev, "TSO capability restored\n");
11045                         tg3_flag_set(tp, TSO_CAPABLE);
11046                 }
11047         }
11048
11049         tg3_carrier_off(tp);
11050
11051         err = tg3_power_up(tp);
11052         if (err)
11053                 return err;
11054
11055         tg3_full_lock(tp, 0);
11056
11057         tg3_disable_ints(tp);
11058         tg3_flag_clear(tp, INIT_COMPLETE);
11059
11060         tg3_full_unlock(tp);
11061
11062         err = tg3_start(tp, true, true, true);
11063         if (err) {
11064                 tg3_frob_aux_power(tp, false);
11065                 pci_set_power_state(tp->pdev, PCI_D3hot);
11066         }
11067
11068         if (tg3_flag(tp, PTP_CAPABLE)) {
11069                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11070                                                    &tp->pdev->dev);
11071                 if (IS_ERR(tp->ptp_clock))
11072                         tp->ptp_clock = NULL;
11073         }
11074
11075         return err;
11076 }
11077
11078 static int tg3_close(struct net_device *dev)
11079 {
11080         struct tg3 *tp = netdev_priv(dev);
11081
11082         tg3_ptp_fini(tp);
11083
11084         tg3_stop(tp);
11085
11086         /* Clear stats across close / open calls */
11087         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11088         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11089
11090         tg3_power_down(tp);
11091
11092         tg3_carrier_off(tp);
11093
11094         return 0;
11095 }
11096
11097 static inline u64 get_stat64(tg3_stat64_t *val)
11098 {
11099        return ((u64)val->high << 32) | ((u64)val->low);
11100 }
11101
11102 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11103 {
11104         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11105
11106         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11107             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11108              tg3_asic_rev(tp) == ASIC_REV_5701)) {
11109                 u32 val;
11110
11111                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11112                         tg3_writephy(tp, MII_TG3_TEST1,
11113                                      val | MII_TG3_TEST1_CRC_EN);
11114                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11115                 } else
11116                         val = 0;
11117
11118                 tp->phy_crc_errors += val;
11119
11120                 return tp->phy_crc_errors;
11121         }
11122
11123         return get_stat64(&hw_stats->rx_fcs_errors);
11124 }
11125
11126 #define ESTAT_ADD(member) \
11127         estats->member =        old_estats->member + \
11128                                 get_stat64(&hw_stats->member)
11129
11130 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11131 {
11132         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11133         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11134
11135         ESTAT_ADD(rx_octets);
11136         ESTAT_ADD(rx_fragments);
11137         ESTAT_ADD(rx_ucast_packets);
11138         ESTAT_ADD(rx_mcast_packets);
11139         ESTAT_ADD(rx_bcast_packets);
11140         ESTAT_ADD(rx_fcs_errors);
11141         ESTAT_ADD(rx_align_errors);
11142         ESTAT_ADD(rx_xon_pause_rcvd);
11143         ESTAT_ADD(rx_xoff_pause_rcvd);
11144         ESTAT_ADD(rx_mac_ctrl_rcvd);
11145         ESTAT_ADD(rx_xoff_entered);
11146         ESTAT_ADD(rx_frame_too_long_errors);
11147         ESTAT_ADD(rx_jabbers);
11148         ESTAT_ADD(rx_undersize_packets);
11149         ESTAT_ADD(rx_in_length_errors);
11150         ESTAT_ADD(rx_out_length_errors);
11151         ESTAT_ADD(rx_64_or_less_octet_packets);
11152         ESTAT_ADD(rx_65_to_127_octet_packets);
11153         ESTAT_ADD(rx_128_to_255_octet_packets);
11154         ESTAT_ADD(rx_256_to_511_octet_packets);
11155         ESTAT_ADD(rx_512_to_1023_octet_packets);
11156         ESTAT_ADD(rx_1024_to_1522_octet_packets);
11157         ESTAT_ADD(rx_1523_to_2047_octet_packets);
11158         ESTAT_ADD(rx_2048_to_4095_octet_packets);
11159         ESTAT_ADD(rx_4096_to_8191_octet_packets);
11160         ESTAT_ADD(rx_8192_to_9022_octet_packets);
11161
11162         ESTAT_ADD(tx_octets);
11163         ESTAT_ADD(tx_collisions);
11164         ESTAT_ADD(tx_xon_sent);
11165         ESTAT_ADD(tx_xoff_sent);
11166         ESTAT_ADD(tx_flow_control);
11167         ESTAT_ADD(tx_mac_errors);
11168         ESTAT_ADD(tx_single_collisions);
11169         ESTAT_ADD(tx_mult_collisions);
11170         ESTAT_ADD(tx_deferred);
11171         ESTAT_ADD(tx_excessive_collisions);
11172         ESTAT_ADD(tx_late_collisions);
11173         ESTAT_ADD(tx_collide_2times);
11174         ESTAT_ADD(tx_collide_3times);
11175         ESTAT_ADD(tx_collide_4times);
11176         ESTAT_ADD(tx_collide_5times);
11177         ESTAT_ADD(tx_collide_6times);
11178         ESTAT_ADD(tx_collide_7times);
11179         ESTAT_ADD(tx_collide_8times);
11180         ESTAT_ADD(tx_collide_9times);
11181         ESTAT_ADD(tx_collide_10times);
11182         ESTAT_ADD(tx_collide_11times);
11183         ESTAT_ADD(tx_collide_12times);
11184         ESTAT_ADD(tx_collide_13times);
11185         ESTAT_ADD(tx_collide_14times);
11186         ESTAT_ADD(tx_collide_15times);
11187         ESTAT_ADD(tx_ucast_packets);
11188         ESTAT_ADD(tx_mcast_packets);
11189         ESTAT_ADD(tx_bcast_packets);
11190         ESTAT_ADD(tx_carrier_sense_errors);
11191         ESTAT_ADD(tx_discards);
11192         ESTAT_ADD(tx_errors);
11193
11194         ESTAT_ADD(dma_writeq_full);
11195         ESTAT_ADD(dma_write_prioq_full);
11196         ESTAT_ADD(rxbds_empty);
11197         ESTAT_ADD(rx_discards);
11198         ESTAT_ADD(rx_errors);
11199         ESTAT_ADD(rx_threshold_hit);
11200
11201         ESTAT_ADD(dma_readq_full);
11202         ESTAT_ADD(dma_read_prioq_full);
11203         ESTAT_ADD(tx_comp_queue_full);
11204
11205         ESTAT_ADD(ring_set_send_prod_index);
11206         ESTAT_ADD(ring_status_update);
11207         ESTAT_ADD(nic_irqs);
11208         ESTAT_ADD(nic_avoided_irqs);
11209         ESTAT_ADD(nic_tx_threshold_hit);
11210
11211         ESTAT_ADD(mbuf_lwm_thresh_hit);
11212 }
11213
11214 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11215 {
11216         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11217         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11218
11219         stats->rx_packets = old_stats->rx_packets +
11220                 get_stat64(&hw_stats->rx_ucast_packets) +
11221                 get_stat64(&hw_stats->rx_mcast_packets) +
11222                 get_stat64(&hw_stats->rx_bcast_packets);
11223
11224         stats->tx_packets = old_stats->tx_packets +
11225                 get_stat64(&hw_stats->tx_ucast_packets) +
11226                 get_stat64(&hw_stats->tx_mcast_packets) +
11227                 get_stat64(&hw_stats->tx_bcast_packets);
11228
11229         stats->rx_bytes = old_stats->rx_bytes +
11230                 get_stat64(&hw_stats->rx_octets);
11231         stats->tx_bytes = old_stats->tx_bytes +
11232                 get_stat64(&hw_stats->tx_octets);
11233
11234         stats->rx_errors = old_stats->rx_errors +
11235                 get_stat64(&hw_stats->rx_errors);
11236         stats->tx_errors = old_stats->tx_errors +
11237                 get_stat64(&hw_stats->tx_errors) +
11238                 get_stat64(&hw_stats->tx_mac_errors) +
11239                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11240                 get_stat64(&hw_stats->tx_discards);
11241
11242         stats->multicast = old_stats->multicast +
11243                 get_stat64(&hw_stats->rx_mcast_packets);
11244         stats->collisions = old_stats->collisions +
11245                 get_stat64(&hw_stats->tx_collisions);
11246
11247         stats->rx_length_errors = old_stats->rx_length_errors +
11248                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11249                 get_stat64(&hw_stats->rx_undersize_packets);
11250
11251         stats->rx_over_errors = old_stats->rx_over_errors +
11252                 get_stat64(&hw_stats->rxbds_empty);
11253         stats->rx_frame_errors = old_stats->rx_frame_errors +
11254                 get_stat64(&hw_stats->rx_align_errors);
11255         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11256                 get_stat64(&hw_stats->tx_discards);
11257         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11258                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11259
11260         stats->rx_crc_errors = old_stats->rx_crc_errors +
11261                 tg3_calc_crc_errors(tp);
11262
11263         stats->rx_missed_errors = old_stats->rx_missed_errors +
11264                 get_stat64(&hw_stats->rx_discards);
11265
11266         stats->rx_dropped = tp->rx_dropped;
11267         stats->tx_dropped = tp->tx_dropped;
11268 }
11269
11270 static int tg3_get_regs_len(struct net_device *dev)
11271 {
11272         return TG3_REG_BLK_SIZE;
11273 }
11274
11275 static void tg3_get_regs(struct net_device *dev,
11276                 struct ethtool_regs *regs, void *_p)
11277 {
11278         struct tg3 *tp = netdev_priv(dev);
11279
11280         regs->version = 0;
11281
11282         memset(_p, 0, TG3_REG_BLK_SIZE);
11283
11284         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11285                 return;
11286
11287         tg3_full_lock(tp, 0);
11288
11289         tg3_dump_legacy_regs(tp, (u32 *)_p);
11290
11291         tg3_full_unlock(tp);
11292 }
11293
11294 static int tg3_get_eeprom_len(struct net_device *dev)
11295 {
11296         struct tg3 *tp = netdev_priv(dev);
11297
11298         return tp->nvram_size;
11299 }
11300
11301 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11302 {
11303         struct tg3 *tp = netdev_priv(dev);
11304         int ret;
11305         u8  *pd;
11306         u32 i, offset, len, b_offset, b_count;
11307         __be32 val;
11308
11309         if (tg3_flag(tp, NO_NVRAM))
11310                 return -EINVAL;
11311
11312         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11313                 return -EAGAIN;
11314
11315         offset = eeprom->offset;
11316         len = eeprom->len;
11317         eeprom->len = 0;
11318
11319         eeprom->magic = TG3_EEPROM_MAGIC;
11320
11321         if (offset & 3) {
11322                 /* adjustments to start on required 4 byte boundary */
11323                 b_offset = offset & 3;
11324                 b_count = 4 - b_offset;
11325                 if (b_count > len) {
11326                         /* i.e. offset=1 len=2 */
11327                         b_count = len;
11328                 }
11329                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11330                 if (ret)
11331                         return ret;
11332                 memcpy(data, ((char *)&val) + b_offset, b_count);
11333                 len -= b_count;
11334                 offset += b_count;
11335                 eeprom->len += b_count;
11336         }
11337
11338         /* read bytes up to the last 4 byte boundary */
11339         pd = &data[eeprom->len];
11340         for (i = 0; i < (len - (len & 3)); i += 4) {
11341                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11342                 if (ret) {
11343                         eeprom->len += i;
11344                         return ret;
11345                 }
11346                 memcpy(pd + i, &val, 4);
11347         }
11348         eeprom->len += i;
11349
11350         if (len & 3) {
11351                 /* read last bytes not ending on 4 byte boundary */
11352                 pd = &data[eeprom->len];
11353                 b_count = len & 3;
11354                 b_offset = offset + len - b_count;
11355                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11356                 if (ret)
11357                         return ret;
11358                 memcpy(pd, &val, b_count);
11359                 eeprom->len += b_count;
11360         }
11361         return 0;
11362 }
11363
11364 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11365 {
11366         struct tg3 *tp = netdev_priv(dev);
11367         int ret;
11368         u32 offset, len, b_offset, odd_len;
11369         u8 *buf;
11370         __be32 start, end;
11371
11372         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11373                 return -EAGAIN;
11374
11375         if (tg3_flag(tp, NO_NVRAM) ||
11376             eeprom->magic != TG3_EEPROM_MAGIC)
11377                 return -EINVAL;
11378
11379         offset = eeprom->offset;
11380         len = eeprom->len;
11381
11382         if ((b_offset = (offset & 3))) {
11383                 /* adjustments to start on required 4 byte boundary */
11384                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11385                 if (ret)
11386                         return ret;
11387                 len += b_offset;
11388                 offset &= ~3;
11389                 if (len < 4)
11390                         len = 4;
11391         }
11392
11393         odd_len = 0;
11394         if (len & 3) {
11395                 /* adjustments to end on required 4 byte boundary */
11396                 odd_len = 1;
11397                 len = (len + 3) & ~3;
11398                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11399                 if (ret)
11400                         return ret;
11401         }
11402
11403         buf = data;
11404         if (b_offset || odd_len) {
11405                 buf = kmalloc(len, GFP_KERNEL);
11406                 if (!buf)
11407                         return -ENOMEM;
11408                 if (b_offset)
11409                         memcpy(buf, &start, 4);
11410                 if (odd_len)
11411                         memcpy(buf+len-4, &end, 4);
11412                 memcpy(buf + b_offset, data, eeprom->len);
11413         }
11414
11415         ret = tg3_nvram_write_block(tp, offset, len, buf);
11416
11417         if (buf != data)
11418                 kfree(buf);
11419
11420         return ret;
11421 }
11422
11423 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11424 {
11425         struct tg3 *tp = netdev_priv(dev);
11426
11427         if (tg3_flag(tp, USE_PHYLIB)) {
11428                 struct phy_device *phydev;
11429                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11430                         return -EAGAIN;
11431                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11432                 return phy_ethtool_gset(phydev, cmd);
11433         }
11434
11435         cmd->supported = (SUPPORTED_Autoneg);
11436
11437         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11438                 cmd->supported |= (SUPPORTED_1000baseT_Half |
11439                                    SUPPORTED_1000baseT_Full);
11440
11441         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11442                 cmd->supported |= (SUPPORTED_100baseT_Half |
11443                                   SUPPORTED_100baseT_Full |
11444                                   SUPPORTED_10baseT_Half |
11445                                   SUPPORTED_10baseT_Full |
11446                                   SUPPORTED_TP);
11447                 cmd->port = PORT_TP;
11448         } else {
11449                 cmd->supported |= SUPPORTED_FIBRE;
11450                 cmd->port = PORT_FIBRE;
11451         }
11452
11453         cmd->advertising = tp->link_config.advertising;
11454         if (tg3_flag(tp, PAUSE_AUTONEG)) {
11455                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11456                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11457                                 cmd->advertising |= ADVERTISED_Pause;
11458                         } else {
11459                                 cmd->advertising |= ADVERTISED_Pause |
11460                                                     ADVERTISED_Asym_Pause;
11461                         }
11462                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11463                         cmd->advertising |= ADVERTISED_Asym_Pause;
11464                 }
11465         }
11466         if (netif_running(dev) && tp->link_up) {
11467                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11468                 cmd->duplex = tp->link_config.active_duplex;
11469                 cmd->lp_advertising = tp->link_config.rmt_adv;
11470                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11471                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11472                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11473                         else
11474                                 cmd->eth_tp_mdix = ETH_TP_MDI;
11475                 }
11476         } else {
11477                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11478                 cmd->duplex = DUPLEX_UNKNOWN;
11479                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11480         }
11481         cmd->phy_address = tp->phy_addr;
11482         cmd->transceiver = XCVR_INTERNAL;
11483         cmd->autoneg = tp->link_config.autoneg;
11484         cmd->maxtxpkt = 0;
11485         cmd->maxrxpkt = 0;
11486         return 0;
11487 }
11488
11489 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11490 {
11491         struct tg3 *tp = netdev_priv(dev);
11492         u32 speed = ethtool_cmd_speed(cmd);
11493
11494         if (tg3_flag(tp, USE_PHYLIB)) {
11495                 struct phy_device *phydev;
11496                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11497                         return -EAGAIN;
11498                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11499                 return phy_ethtool_sset(phydev, cmd);
11500         }
11501
11502         if (cmd->autoneg != AUTONEG_ENABLE &&
11503             cmd->autoneg != AUTONEG_DISABLE)
11504                 return -EINVAL;
11505
11506         if (cmd->autoneg == AUTONEG_DISABLE &&
11507             cmd->duplex != DUPLEX_FULL &&
11508             cmd->duplex != DUPLEX_HALF)
11509                 return -EINVAL;
11510
11511         if (cmd->autoneg == AUTONEG_ENABLE) {
11512                 u32 mask = ADVERTISED_Autoneg |
11513                            ADVERTISED_Pause |
11514                            ADVERTISED_Asym_Pause;
11515
11516                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11517                         mask |= ADVERTISED_1000baseT_Half |
11518                                 ADVERTISED_1000baseT_Full;
11519
11520                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11521                         mask |= ADVERTISED_100baseT_Half |
11522                                 ADVERTISED_100baseT_Full |
11523                                 ADVERTISED_10baseT_Half |
11524                                 ADVERTISED_10baseT_Full |
11525                                 ADVERTISED_TP;
11526                 else
11527                         mask |= ADVERTISED_FIBRE;
11528
11529                 if (cmd->advertising & ~mask)
11530                         return -EINVAL;
11531
11532                 mask &= (ADVERTISED_1000baseT_Half |
11533                          ADVERTISED_1000baseT_Full |
11534                          ADVERTISED_100baseT_Half |
11535                          ADVERTISED_100baseT_Full |
11536                          ADVERTISED_10baseT_Half |
11537                          ADVERTISED_10baseT_Full);
11538
11539                 cmd->advertising &= mask;
11540         } else {
11541                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11542                         if (speed != SPEED_1000)
11543                                 return -EINVAL;
11544
11545                         if (cmd->duplex != DUPLEX_FULL)
11546                                 return -EINVAL;
11547                 } else {
11548                         if (speed != SPEED_100 &&
11549                             speed != SPEED_10)
11550                                 return -EINVAL;
11551                 }
11552         }
11553
11554         tg3_full_lock(tp, 0);
11555
11556         tp->link_config.autoneg = cmd->autoneg;
11557         if (cmd->autoneg == AUTONEG_ENABLE) {
11558                 tp->link_config.advertising = (cmd->advertising |
11559                                               ADVERTISED_Autoneg);
11560                 tp->link_config.speed = SPEED_UNKNOWN;
11561                 tp->link_config.duplex = DUPLEX_UNKNOWN;
11562         } else {
11563                 tp->link_config.advertising = 0;
11564                 tp->link_config.speed = speed;
11565                 tp->link_config.duplex = cmd->duplex;
11566         }
11567
11568         if (netif_running(dev))
11569                 tg3_setup_phy(tp, 1);
11570
11571         tg3_full_unlock(tp);
11572
11573         return 0;
11574 }
11575
11576 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11577 {
11578         struct tg3 *tp = netdev_priv(dev);
11579
11580         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11581         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11582         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11583         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11584 }
11585
11586 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11587 {
11588         struct tg3 *tp = netdev_priv(dev);
11589
11590         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11591                 wol->supported = WAKE_MAGIC;
11592         else
11593                 wol->supported = 0;
11594         wol->wolopts = 0;
11595         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11596                 wol->wolopts = WAKE_MAGIC;
11597         memset(&wol->sopass, 0, sizeof(wol->sopass));
11598 }
11599
11600 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11601 {
11602         struct tg3 *tp = netdev_priv(dev);
11603         struct device *dp = &tp->pdev->dev;
11604
11605         if (wol->wolopts & ~WAKE_MAGIC)
11606                 return -EINVAL;
11607         if ((wol->wolopts & WAKE_MAGIC) &&
11608             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11609                 return -EINVAL;
11610
11611         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11612
11613         spin_lock_bh(&tp->lock);
11614         if (device_may_wakeup(dp))
11615                 tg3_flag_set(tp, WOL_ENABLE);
11616         else
11617                 tg3_flag_clear(tp, WOL_ENABLE);
11618         spin_unlock_bh(&tp->lock);
11619
11620         return 0;
11621 }
11622
11623 static u32 tg3_get_msglevel(struct net_device *dev)
11624 {
11625         struct tg3 *tp = netdev_priv(dev);
11626         return tp->msg_enable;
11627 }
11628
11629 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11630 {
11631         struct tg3 *tp = netdev_priv(dev);
11632         tp->msg_enable = value;
11633 }
11634
11635 static int tg3_nway_reset(struct net_device *dev)
11636 {
11637         struct tg3 *tp = netdev_priv(dev);
11638         int r;
11639
11640         if (!netif_running(dev))
11641                 return -EAGAIN;
11642
11643         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11644                 return -EINVAL;
11645
11646         if (tg3_flag(tp, USE_PHYLIB)) {
11647                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11648                         return -EAGAIN;
11649                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11650         } else {
11651                 u32 bmcr;
11652
11653                 spin_lock_bh(&tp->lock);
11654                 r = -EINVAL;
11655                 tg3_readphy(tp, MII_BMCR, &bmcr);
11656                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11657                     ((bmcr & BMCR_ANENABLE) ||
11658                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11659                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11660                                                    BMCR_ANENABLE);
11661                         r = 0;
11662                 }
11663                 spin_unlock_bh(&tp->lock);
11664         }
11665
11666         return r;
11667 }
11668
11669 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11670 {
11671         struct tg3 *tp = netdev_priv(dev);
11672
11673         ering->rx_max_pending = tp->rx_std_ring_mask;
11674         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11675                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11676         else
11677                 ering->rx_jumbo_max_pending = 0;
11678
11679         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11680
11681         ering->rx_pending = tp->rx_pending;
11682         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11683                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11684         else
11685                 ering->rx_jumbo_pending = 0;
11686
11687         ering->tx_pending = tp->napi[0].tx_pending;
11688 }
11689
11690 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11691 {
11692         struct tg3 *tp = netdev_priv(dev);
11693         int i, irq_sync = 0, err = 0;
11694
11695         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11696             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11697             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11698             (ering->tx_pending <= MAX_SKB_FRAGS) ||
11699             (tg3_flag(tp, TSO_BUG) &&
11700              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11701                 return -EINVAL;
11702
11703         if (netif_running(dev)) {
11704                 tg3_phy_stop(tp);
11705                 tg3_netif_stop(tp);
11706                 irq_sync = 1;
11707         }
11708
11709         tg3_full_lock(tp, irq_sync);
11710
11711         tp->rx_pending = ering->rx_pending;
11712
11713         if (tg3_flag(tp, MAX_RXPEND_64) &&
11714             tp->rx_pending > 63)
11715                 tp->rx_pending = 63;
11716         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11717
11718         for (i = 0; i < tp->irq_max; i++)
11719                 tp->napi[i].tx_pending = ering->tx_pending;
11720
11721         if (netif_running(dev)) {
11722                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11723                 err = tg3_restart_hw(tp, 0);
11724                 if (!err)
11725                         tg3_netif_start(tp);
11726         }
11727
11728         tg3_full_unlock(tp);
11729
11730         if (irq_sync && !err)
11731                 tg3_phy_start(tp);
11732
11733         return err;
11734 }
11735
11736 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11737 {
11738         struct tg3 *tp = netdev_priv(dev);
11739
11740         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11741
11742         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11743                 epause->rx_pause = 1;
11744         else
11745                 epause->rx_pause = 0;
11746
11747         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11748                 epause->tx_pause = 1;
11749         else
11750                 epause->tx_pause = 0;
11751 }
11752
11753 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11754 {
11755         struct tg3 *tp = netdev_priv(dev);
11756         int err = 0;
11757
11758         if (tg3_flag(tp, USE_PHYLIB)) {
11759                 u32 newadv;
11760                 struct phy_device *phydev;
11761
11762                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11763
11764                 if (!(phydev->supported & SUPPORTED_Pause) ||
11765                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11766                      (epause->rx_pause != epause->tx_pause)))
11767                         return -EINVAL;
11768
11769                 tp->link_config.flowctrl = 0;
11770                 if (epause->rx_pause) {
11771                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11772
11773                         if (epause->tx_pause) {
11774                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11775                                 newadv = ADVERTISED_Pause;
11776                         } else
11777                                 newadv = ADVERTISED_Pause |
11778                                          ADVERTISED_Asym_Pause;
11779                 } else if (epause->tx_pause) {
11780                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11781                         newadv = ADVERTISED_Asym_Pause;
11782                 } else
11783                         newadv = 0;
11784
11785                 if (epause->autoneg)
11786                         tg3_flag_set(tp, PAUSE_AUTONEG);
11787                 else
11788                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11789
11790                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11791                         u32 oldadv = phydev->advertising &
11792                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11793                         if (oldadv != newadv) {
11794                                 phydev->advertising &=
11795                                         ~(ADVERTISED_Pause |
11796                                           ADVERTISED_Asym_Pause);
11797                                 phydev->advertising |= newadv;
11798                                 if (phydev->autoneg) {
11799                                         /*
11800                                          * Always renegotiate the link to
11801                                          * inform our link partner of our
11802                                          * flow control settings, even if the
11803                                          * flow control is forced.  Let
11804                                          * tg3_adjust_link() do the final
11805                                          * flow control setup.
11806                                          */
11807                                         return phy_start_aneg(phydev);
11808                                 }
11809                         }
11810
11811                         if (!epause->autoneg)
11812                                 tg3_setup_flow_control(tp, 0, 0);
11813                 } else {
11814                         tp->link_config.advertising &=
11815                                         ~(ADVERTISED_Pause |
11816                                           ADVERTISED_Asym_Pause);
11817                         tp->link_config.advertising |= newadv;
11818                 }
11819         } else {
11820                 int irq_sync = 0;
11821
11822                 if (netif_running(dev)) {
11823                         tg3_netif_stop(tp);
11824                         irq_sync = 1;
11825                 }
11826
11827                 tg3_full_lock(tp, irq_sync);
11828
11829                 if (epause->autoneg)
11830                         tg3_flag_set(tp, PAUSE_AUTONEG);
11831                 else
11832                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11833                 if (epause->rx_pause)
11834                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11835                 else
11836                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11837                 if (epause->tx_pause)
11838                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11839                 else
11840                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11841
11842                 if (netif_running(dev)) {
11843                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11844                         err = tg3_restart_hw(tp, 0);
11845                         if (!err)
11846                                 tg3_netif_start(tp);
11847                 }
11848
11849                 tg3_full_unlock(tp);
11850         }
11851
11852         return err;
11853 }
11854
11855 static int tg3_get_sset_count(struct net_device *dev, int sset)
11856 {
11857         switch (sset) {
11858         case ETH_SS_TEST:
11859                 return TG3_NUM_TEST;
11860         case ETH_SS_STATS:
11861                 return TG3_NUM_STATS;
11862         default:
11863                 return -EOPNOTSUPP;
11864         }
11865 }
11866
11867 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11868                          u32 *rules __always_unused)
11869 {
11870         struct tg3 *tp = netdev_priv(dev);
11871
11872         if (!tg3_flag(tp, SUPPORT_MSIX))
11873                 return -EOPNOTSUPP;
11874
11875         switch (info->cmd) {
11876         case ETHTOOL_GRXRINGS:
11877                 if (netif_running(tp->dev))
11878                         info->data = tp->rxq_cnt;
11879                 else {
11880                         info->data = num_online_cpus();
11881                         if (info->data > TG3_RSS_MAX_NUM_QS)
11882                                 info->data = TG3_RSS_MAX_NUM_QS;
11883                 }
11884
11885                 /* The first interrupt vector only
11886                  * handles link interrupts.
11887                  */
11888                 info->data -= 1;
11889                 return 0;
11890
11891         default:
11892                 return -EOPNOTSUPP;
11893         }
11894 }
11895
11896 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11897 {
11898         u32 size = 0;
11899         struct tg3 *tp = netdev_priv(dev);
11900
11901         if (tg3_flag(tp, SUPPORT_MSIX))
11902                 size = TG3_RSS_INDIR_TBL_SIZE;
11903
11904         return size;
11905 }
11906
11907 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11908 {
11909         struct tg3 *tp = netdev_priv(dev);
11910         int i;
11911
11912         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11913                 indir[i] = tp->rss_ind_tbl[i];
11914
11915         return 0;
11916 }
11917
11918 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11919 {
11920         struct tg3 *tp = netdev_priv(dev);
11921         size_t i;
11922
11923         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11924                 tp->rss_ind_tbl[i] = indir[i];
11925
11926         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11927                 return 0;
11928
11929         /* It is legal to write the indirection
11930          * table while the device is running.
11931          */
11932         tg3_full_lock(tp, 0);
11933         tg3_rss_write_indir_tbl(tp);
11934         tg3_full_unlock(tp);
11935
11936         return 0;
11937 }
11938
11939 static void tg3_get_channels(struct net_device *dev,
11940                              struct ethtool_channels *channel)
11941 {
11942         struct tg3 *tp = netdev_priv(dev);
11943         u32 deflt_qs = netif_get_num_default_rss_queues();
11944
11945         channel->max_rx = tp->rxq_max;
11946         channel->max_tx = tp->txq_max;
11947
11948         if (netif_running(dev)) {
11949                 channel->rx_count = tp->rxq_cnt;
11950                 channel->tx_count = tp->txq_cnt;
11951         } else {
11952                 if (tp->rxq_req)
11953                         channel->rx_count = tp->rxq_req;
11954                 else
11955                         channel->rx_count = min(deflt_qs, tp->rxq_max);
11956
11957                 if (tp->txq_req)
11958                         channel->tx_count = tp->txq_req;
11959                 else
11960                         channel->tx_count = min(deflt_qs, tp->txq_max);
11961         }
11962 }
11963
11964 static int tg3_set_channels(struct net_device *dev,
11965                             struct ethtool_channels *channel)
11966 {
11967         struct tg3 *tp = netdev_priv(dev);
11968
11969         if (!tg3_flag(tp, SUPPORT_MSIX))
11970                 return -EOPNOTSUPP;
11971
11972         if (channel->rx_count > tp->rxq_max ||
11973             channel->tx_count > tp->txq_max)
11974                 return -EINVAL;
11975
11976         tp->rxq_req = channel->rx_count;
11977         tp->txq_req = channel->tx_count;
11978
11979         if (!netif_running(dev))
11980                 return 0;
11981
11982         tg3_stop(tp);
11983
11984         tg3_carrier_off(tp);
11985
11986         tg3_start(tp, true, false, false);
11987
11988         return 0;
11989 }
11990
11991 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11992 {
11993         switch (stringset) {
11994         case ETH_SS_STATS:
11995                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11996                 break;
11997         case ETH_SS_TEST:
11998                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11999                 break;
12000         default:
12001                 WARN_ON(1);     /* we need a WARN() */
12002                 break;
12003         }
12004 }
12005
12006 static int tg3_set_phys_id(struct net_device *dev,
12007                             enum ethtool_phys_id_state state)
12008 {
12009         struct tg3 *tp = netdev_priv(dev);
12010
12011         if (!netif_running(tp->dev))
12012                 return -EAGAIN;
12013
12014         switch (state) {
12015         case ETHTOOL_ID_ACTIVE:
12016                 return 1;       /* cycle on/off once per second */
12017
12018         case ETHTOOL_ID_ON:
12019                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12020                      LED_CTRL_1000MBPS_ON |
12021                      LED_CTRL_100MBPS_ON |
12022                      LED_CTRL_10MBPS_ON |
12023                      LED_CTRL_TRAFFIC_OVERRIDE |
12024                      LED_CTRL_TRAFFIC_BLINK |
12025                      LED_CTRL_TRAFFIC_LED);
12026                 break;
12027
12028         case ETHTOOL_ID_OFF:
12029                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12030                      LED_CTRL_TRAFFIC_OVERRIDE);
12031                 break;
12032
12033         case ETHTOOL_ID_INACTIVE:
12034                 tw32(MAC_LED_CTRL, tp->led_ctrl);
12035                 break;
12036         }
12037
12038         return 0;
12039 }
12040
12041 static void tg3_get_ethtool_stats(struct net_device *dev,
12042                                    struct ethtool_stats *estats, u64 *tmp_stats)
12043 {
12044         struct tg3 *tp = netdev_priv(dev);
12045
12046         if (tp->hw_stats)
12047                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12048         else
12049                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12050 }
12051
12052 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12053 {
12054         int i;
12055         __be32 *buf;
12056         u32 offset = 0, len = 0;
12057         u32 magic, val;
12058
12059         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12060                 return NULL;
12061
12062         if (magic == TG3_EEPROM_MAGIC) {
12063                 for (offset = TG3_NVM_DIR_START;
12064                      offset < TG3_NVM_DIR_END;
12065                      offset += TG3_NVM_DIRENT_SIZE) {
12066                         if (tg3_nvram_read(tp, offset, &val))
12067                                 return NULL;
12068
12069                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12070                             TG3_NVM_DIRTYPE_EXTVPD)
12071                                 break;
12072                 }
12073
12074                 if (offset != TG3_NVM_DIR_END) {
12075                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12076                         if (tg3_nvram_read(tp, offset + 4, &offset))
12077                                 return NULL;
12078
12079                         offset = tg3_nvram_logical_addr(tp, offset);
12080                 }
12081         }
12082
12083         if (!offset || !len) {
12084                 offset = TG3_NVM_VPD_OFF;
12085                 len = TG3_NVM_VPD_LEN;
12086         }
12087
12088         buf = kmalloc(len, GFP_KERNEL);
12089         if (buf == NULL)
12090                 return NULL;
12091
12092         if (magic == TG3_EEPROM_MAGIC) {
12093                 for (i = 0; i < len; i += 4) {
12094                         /* The data is in little-endian format in NVRAM.
12095                          * Use the big-endian read routines to preserve
12096                          * the byte order as it exists in NVRAM.
12097                          */
12098                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12099                                 goto error;
12100                 }
12101         } else {
12102                 u8 *ptr;
12103                 ssize_t cnt;
12104                 unsigned int pos = 0;
12105
12106                 ptr = (u8 *)&buf[0];
12107                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12108                         cnt = pci_read_vpd(tp->pdev, pos,
12109                                            len - pos, ptr);
12110                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
12111                                 cnt = 0;
12112                         else if (cnt < 0)
12113                                 goto error;
12114                 }
12115                 if (pos != len)
12116                         goto error;
12117         }
12118
12119         *vpdlen = len;
12120
12121         return buf;
12122
12123 error:
12124         kfree(buf);
12125         return NULL;
12126 }
12127
12128 #define NVRAM_TEST_SIZE 0x100
12129 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
12130 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
12131 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
12132 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
12133 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
12134 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
12135 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12136 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12137
12138 static int tg3_test_nvram(struct tg3 *tp)
12139 {
12140         u32 csum, magic, len;
12141         __be32 *buf;
12142         int i, j, k, err = 0, size;
12143
12144         if (tg3_flag(tp, NO_NVRAM))
12145                 return 0;
12146
12147         if (tg3_nvram_read(tp, 0, &magic) != 0)
12148                 return -EIO;
12149
12150         if (magic == TG3_EEPROM_MAGIC)
12151                 size = NVRAM_TEST_SIZE;
12152         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12153                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12154                     TG3_EEPROM_SB_FORMAT_1) {
12155                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12156                         case TG3_EEPROM_SB_REVISION_0:
12157                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12158                                 break;
12159                         case TG3_EEPROM_SB_REVISION_2:
12160                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12161                                 break;
12162                         case TG3_EEPROM_SB_REVISION_3:
12163                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12164                                 break;
12165                         case TG3_EEPROM_SB_REVISION_4:
12166                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12167                                 break;
12168                         case TG3_EEPROM_SB_REVISION_5:
12169                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12170                                 break;
12171                         case TG3_EEPROM_SB_REVISION_6:
12172                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12173                                 break;
12174                         default:
12175                                 return -EIO;
12176                         }
12177                 } else
12178                         return 0;
12179         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12180                 size = NVRAM_SELFBOOT_HW_SIZE;
12181         else
12182                 return -EIO;
12183
12184         buf = kmalloc(size, GFP_KERNEL);
12185         if (buf == NULL)
12186                 return -ENOMEM;
12187
12188         err = -EIO;
12189         for (i = 0, j = 0; i < size; i += 4, j++) {
12190                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12191                 if (err)
12192                         break;
12193         }
12194         if (i < size)
12195                 goto out;
12196
12197         /* Selfboot format */
12198         magic = be32_to_cpu(buf[0]);
12199         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12200             TG3_EEPROM_MAGIC_FW) {
12201                 u8 *buf8 = (u8 *) buf, csum8 = 0;
12202
12203                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12204                     TG3_EEPROM_SB_REVISION_2) {
12205                         /* For rev 2, the csum doesn't include the MBA. */
12206                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12207                                 csum8 += buf8[i];
12208                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12209                                 csum8 += buf8[i];
12210                 } else {
12211                         for (i = 0; i < size; i++)
12212                                 csum8 += buf8[i];
12213                 }
12214
12215                 if (csum8 == 0) {
12216                         err = 0;
12217                         goto out;
12218                 }
12219
12220                 err = -EIO;
12221                 goto out;
12222         }
12223
12224         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12225             TG3_EEPROM_MAGIC_HW) {
12226                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12227                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12228                 u8 *buf8 = (u8 *) buf;
12229
12230                 /* Separate the parity bits and the data bytes.  */
12231                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12232                         if ((i == 0) || (i == 8)) {
12233                                 int l;
12234                                 u8 msk;
12235
12236                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12237                                         parity[k++] = buf8[i] & msk;
12238                                 i++;
12239                         } else if (i == 16) {
12240                                 int l;
12241                                 u8 msk;
12242
12243                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12244                                         parity[k++] = buf8[i] & msk;
12245                                 i++;
12246
12247                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12248                                         parity[k++] = buf8[i] & msk;
12249                                 i++;
12250                         }
12251                         data[j++] = buf8[i];
12252                 }
12253
12254                 err = -EIO;
12255                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12256                         u8 hw8 = hweight8(data[i]);
12257
12258                         if ((hw8 & 0x1) && parity[i])
12259                                 goto out;
12260                         else if (!(hw8 & 0x1) && !parity[i])
12261                                 goto out;
12262                 }
12263                 err = 0;
12264                 goto out;
12265         }
12266
12267         err = -EIO;
12268
12269         /* Bootstrap checksum at offset 0x10 */
12270         csum = calc_crc((unsigned char *) buf, 0x10);
12271         if (csum != le32_to_cpu(buf[0x10/4]))
12272                 goto out;
12273
12274         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12275         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12276         if (csum != le32_to_cpu(buf[0xfc/4]))
12277                 goto out;
12278
12279         kfree(buf);
12280
12281         buf = tg3_vpd_readblock(tp, &len);
12282         if (!buf)
12283                 return -ENOMEM;
12284
12285         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12286         if (i > 0) {
12287                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12288                 if (j < 0)
12289                         goto out;
12290
12291                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12292                         goto out;
12293
12294                 i += PCI_VPD_LRDT_TAG_SIZE;
12295                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12296                                               PCI_VPD_RO_KEYWORD_CHKSUM);
12297                 if (j > 0) {
12298                         u8 csum8 = 0;
12299
12300                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
12301
12302                         for (i = 0; i <= j; i++)
12303                                 csum8 += ((u8 *)buf)[i];
12304
12305                         if (csum8)
12306                                 goto out;
12307                 }
12308         }
12309
12310         err = 0;
12311
12312 out:
12313         kfree(buf);
12314         return err;
12315 }
12316
12317 #define TG3_SERDES_TIMEOUT_SEC  2
12318 #define TG3_COPPER_TIMEOUT_SEC  6
12319
12320 static int tg3_test_link(struct tg3 *tp)
12321 {
12322         int i, max;
12323
12324         if (!netif_running(tp->dev))
12325                 return -ENODEV;
12326
12327         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12328                 max = TG3_SERDES_TIMEOUT_SEC;
12329         else
12330                 max = TG3_COPPER_TIMEOUT_SEC;
12331
12332         for (i = 0; i < max; i++) {
12333                 if (tp->link_up)
12334                         return 0;
12335
12336                 if (msleep_interruptible(1000))
12337                         break;
12338         }
12339
12340         return -EIO;
12341 }
12342
12343 /* Only test the commonly used registers */
12344 static int tg3_test_registers(struct tg3 *tp)
12345 {
12346         int i, is_5705, is_5750;
12347         u32 offset, read_mask, write_mask, val, save_val, read_val;
12348         static struct {
12349                 u16 offset;
12350                 u16 flags;
12351 #define TG3_FL_5705     0x1
12352 #define TG3_FL_NOT_5705 0x2
12353 #define TG3_FL_NOT_5788 0x4
12354 #define TG3_FL_NOT_5750 0x8
12355                 u32 read_mask;
12356                 u32 write_mask;
12357         } reg_tbl[] = {
12358                 /* MAC Control Registers */
12359                 { MAC_MODE, TG3_FL_NOT_5705,
12360                         0x00000000, 0x00ef6f8c },
12361                 { MAC_MODE, TG3_FL_5705,
12362                         0x00000000, 0x01ef6b8c },
12363                 { MAC_STATUS, TG3_FL_NOT_5705,
12364                         0x03800107, 0x00000000 },
12365                 { MAC_STATUS, TG3_FL_5705,
12366                         0x03800100, 0x00000000 },
12367                 { MAC_ADDR_0_HIGH, 0x0000,
12368                         0x00000000, 0x0000ffff },
12369                 { MAC_ADDR_0_LOW, 0x0000,
12370                         0x00000000, 0xffffffff },
12371                 { MAC_RX_MTU_SIZE, 0x0000,
12372                         0x00000000, 0x0000ffff },
12373                 { MAC_TX_MODE, 0x0000,
12374                         0x00000000, 0x00000070 },
12375                 { MAC_TX_LENGTHS, 0x0000,
12376                         0x00000000, 0x00003fff },
12377                 { MAC_RX_MODE, TG3_FL_NOT_5705,
12378                         0x00000000, 0x000007fc },
12379                 { MAC_RX_MODE, TG3_FL_5705,
12380                         0x00000000, 0x000007dc },
12381                 { MAC_HASH_REG_0, 0x0000,
12382                         0x00000000, 0xffffffff },
12383                 { MAC_HASH_REG_1, 0x0000,
12384                         0x00000000, 0xffffffff },
12385                 { MAC_HASH_REG_2, 0x0000,
12386                         0x00000000, 0xffffffff },
12387                 { MAC_HASH_REG_3, 0x0000,
12388                         0x00000000, 0xffffffff },
12389
12390                 /* Receive Data and Receive BD Initiator Control Registers. */
12391                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12392                         0x00000000, 0xffffffff },
12393                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12394                         0x00000000, 0xffffffff },
12395                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12396                         0x00000000, 0x00000003 },
12397                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12398                         0x00000000, 0xffffffff },
12399                 { RCVDBDI_STD_BD+0, 0x0000,
12400                         0x00000000, 0xffffffff },
12401                 { RCVDBDI_STD_BD+4, 0x0000,
12402                         0x00000000, 0xffffffff },
12403                 { RCVDBDI_STD_BD+8, 0x0000,
12404                         0x00000000, 0xffff0002 },
12405                 { RCVDBDI_STD_BD+0xc, 0x0000,
12406                         0x00000000, 0xffffffff },
12407
12408                 /* Receive BD Initiator Control Registers. */
12409                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12410                         0x00000000, 0xffffffff },
12411                 { RCVBDI_STD_THRESH, TG3_FL_5705,
12412                         0x00000000, 0x000003ff },
12413                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12414                         0x00000000, 0xffffffff },
12415
12416                 /* Host Coalescing Control Registers. */
12417                 { HOSTCC_MODE, TG3_FL_NOT_5705,
12418                         0x00000000, 0x00000004 },
12419                 { HOSTCC_MODE, TG3_FL_5705,
12420                         0x00000000, 0x000000f6 },
12421                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12422                         0x00000000, 0xffffffff },
12423                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12424                         0x00000000, 0x000003ff },
12425                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12426                         0x00000000, 0xffffffff },
12427                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12428                         0x00000000, 0x000003ff },
12429                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12430                         0x00000000, 0xffffffff },
12431                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12432                         0x00000000, 0x000000ff },
12433                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12434                         0x00000000, 0xffffffff },
12435                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12436                         0x00000000, 0x000000ff },
12437                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12438                         0x00000000, 0xffffffff },
12439                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12440                         0x00000000, 0xffffffff },
12441                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12442                         0x00000000, 0xffffffff },
12443                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12444                         0x00000000, 0x000000ff },
12445                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12446                         0x00000000, 0xffffffff },
12447                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12448                         0x00000000, 0x000000ff },
12449                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12450                         0x00000000, 0xffffffff },
12451                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12452                         0x00000000, 0xffffffff },
12453                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12454                         0x00000000, 0xffffffff },
12455                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12456                         0x00000000, 0xffffffff },
12457                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12458                         0x00000000, 0xffffffff },
12459                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12460                         0xffffffff, 0x00000000 },
12461                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12462                         0xffffffff, 0x00000000 },
12463
12464                 /* Buffer Manager Control Registers. */
12465                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12466                         0x00000000, 0x007fff80 },
12467                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12468                         0x00000000, 0x007fffff },
12469                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12470                         0x00000000, 0x0000003f },
12471                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12472                         0x00000000, 0x000001ff },
12473                 { BUFMGR_MB_HIGH_WATER, 0x0000,
12474                         0x00000000, 0x000001ff },
12475                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12476                         0xffffffff, 0x00000000 },
12477                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12478                         0xffffffff, 0x00000000 },
12479
12480                 /* Mailbox Registers */
12481                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12482                         0x00000000, 0x000001ff },
12483                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12484                         0x00000000, 0x000001ff },
12485                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12486                         0x00000000, 0x000007ff },
12487                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12488                         0x00000000, 0x000001ff },
12489
12490                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12491         };
12492
12493         is_5705 = is_5750 = 0;
12494         if (tg3_flag(tp, 5705_PLUS)) {
12495                 is_5705 = 1;
12496                 if (tg3_flag(tp, 5750_PLUS))
12497                         is_5750 = 1;
12498         }
12499
12500         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12501                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12502                         continue;
12503
12504                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12505                         continue;
12506
12507                 if (tg3_flag(tp, IS_5788) &&
12508                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
12509                         continue;
12510
12511                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12512                         continue;
12513
12514                 offset = (u32) reg_tbl[i].offset;
12515                 read_mask = reg_tbl[i].read_mask;
12516                 write_mask = reg_tbl[i].write_mask;
12517
12518                 /* Save the original register content */
12519                 save_val = tr32(offset);
12520
12521                 /* Determine the read-only value. */
12522                 read_val = save_val & read_mask;
12523
12524                 /* Write zero to the register, then make sure the read-only bits
12525                  * are not changed and the read/write bits are all zeros.
12526                  */
12527                 tw32(offset, 0);
12528
12529                 val = tr32(offset);
12530
12531                 /* Test the read-only and read/write bits. */
12532                 if (((val & read_mask) != read_val) || (val & write_mask))
12533                         goto out;
12534
12535                 /* Write ones to all the bits defined by RdMask and WrMask, then
12536                  * make sure the read-only bits are not changed and the
12537                  * read/write bits are all ones.
12538                  */
12539                 tw32(offset, read_mask | write_mask);
12540
12541                 val = tr32(offset);
12542
12543                 /* Test the read-only bits. */
12544                 if ((val & read_mask) != read_val)
12545                         goto out;
12546
12547                 /* Test the read/write bits. */
12548                 if ((val & write_mask) != write_mask)
12549                         goto out;
12550
12551                 tw32(offset, save_val);
12552         }
12553
12554         return 0;
12555
12556 out:
12557         if (netif_msg_hw(tp))
12558                 netdev_err(tp->dev,
12559                            "Register test failed at offset %x\n", offset);
12560         tw32(offset, save_val);
12561         return -EIO;
12562 }
12563
12564 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12565 {
12566         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12567         int i;
12568         u32 j;
12569
12570         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12571                 for (j = 0; j < len; j += 4) {
12572                         u32 val;
12573
12574                         tg3_write_mem(tp, offset + j, test_pattern[i]);
12575                         tg3_read_mem(tp, offset + j, &val);
12576                         if (val != test_pattern[i])
12577                                 return -EIO;
12578                 }
12579         }
12580         return 0;
12581 }
12582
12583 static int tg3_test_memory(struct tg3 *tp)
12584 {
12585         static struct mem_entry {
12586                 u32 offset;
12587                 u32 len;
12588         } mem_tbl_570x[] = {
12589                 { 0x00000000, 0x00b50},
12590                 { 0x00002000, 0x1c000},
12591                 { 0xffffffff, 0x00000}
12592         }, mem_tbl_5705[] = {
12593                 { 0x00000100, 0x0000c},
12594                 { 0x00000200, 0x00008},
12595                 { 0x00004000, 0x00800},
12596                 { 0x00006000, 0x01000},
12597                 { 0x00008000, 0x02000},
12598                 { 0x00010000, 0x0e000},
12599                 { 0xffffffff, 0x00000}
12600         }, mem_tbl_5755[] = {
12601                 { 0x00000200, 0x00008},
12602                 { 0x00004000, 0x00800},
12603                 { 0x00006000, 0x00800},
12604                 { 0x00008000, 0x02000},
12605                 { 0x00010000, 0x0c000},
12606                 { 0xffffffff, 0x00000}
12607         }, mem_tbl_5906[] = {
12608                 { 0x00000200, 0x00008},
12609                 { 0x00004000, 0x00400},
12610                 { 0x00006000, 0x00400},
12611                 { 0x00008000, 0x01000},
12612                 { 0x00010000, 0x01000},
12613                 { 0xffffffff, 0x00000}
12614         }, mem_tbl_5717[] = {
12615                 { 0x00000200, 0x00008},
12616                 { 0x00010000, 0x0a000},
12617                 { 0x00020000, 0x13c00},
12618                 { 0xffffffff, 0x00000}
12619         }, mem_tbl_57765[] = {
12620                 { 0x00000200, 0x00008},
12621                 { 0x00004000, 0x00800},
12622                 { 0x00006000, 0x09800},
12623                 { 0x00010000, 0x0a000},
12624                 { 0xffffffff, 0x00000}
12625         };
12626         struct mem_entry *mem_tbl;
12627         int err = 0;
12628         int i;
12629
12630         if (tg3_flag(tp, 5717_PLUS))
12631                 mem_tbl = mem_tbl_5717;
12632         else if (tg3_flag(tp, 57765_CLASS) ||
12633                  tg3_asic_rev(tp) == ASIC_REV_5762)
12634                 mem_tbl = mem_tbl_57765;
12635         else if (tg3_flag(tp, 5755_PLUS))
12636                 mem_tbl = mem_tbl_5755;
12637         else if (tg3_asic_rev(tp) == ASIC_REV_5906)
12638                 mem_tbl = mem_tbl_5906;
12639         else if (tg3_flag(tp, 5705_PLUS))
12640                 mem_tbl = mem_tbl_5705;
12641         else
12642                 mem_tbl = mem_tbl_570x;
12643
12644         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12645                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12646                 if (err)
12647                         break;
12648         }
12649
12650         return err;
12651 }
12652
12653 #define TG3_TSO_MSS             500
12654
12655 #define TG3_TSO_IP_HDR_LEN      20
12656 #define TG3_TSO_TCP_HDR_LEN     20
12657 #define TG3_TSO_TCP_OPT_LEN     12
12658
12659 static const u8 tg3_tso_header[] = {
12660 0x08, 0x00,
12661 0x45, 0x00, 0x00, 0x00,
12662 0x00, 0x00, 0x40, 0x00,
12663 0x40, 0x06, 0x00, 0x00,
12664 0x0a, 0x00, 0x00, 0x01,
12665 0x0a, 0x00, 0x00, 0x02,
12666 0x0d, 0x00, 0xe0, 0x00,
12667 0x00, 0x00, 0x01, 0x00,
12668 0x00, 0x00, 0x02, 0x00,
12669 0x80, 0x10, 0x10, 0x00,
12670 0x14, 0x09, 0x00, 0x00,
12671 0x01, 0x01, 0x08, 0x0a,
12672 0x11, 0x11, 0x11, 0x11,
12673 0x11, 0x11, 0x11, 0x11,
12674 };
12675
12676 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12677 {
12678         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12679         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12680         u32 budget;
12681         struct sk_buff *skb;
12682         u8 *tx_data, *rx_data;
12683         dma_addr_t map;
12684         int num_pkts, tx_len, rx_len, i, err;
12685         struct tg3_rx_buffer_desc *desc;
12686         struct tg3_napi *tnapi, *rnapi;
12687         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12688
12689         tnapi = &tp->napi[0];
12690         rnapi = &tp->napi[0];
12691         if (tp->irq_cnt > 1) {
12692                 if (tg3_flag(tp, ENABLE_RSS))
12693                         rnapi = &tp->napi[1];
12694                 if (tg3_flag(tp, ENABLE_TSS))
12695                         tnapi = &tp->napi[1];
12696         }
12697         coal_now = tnapi->coal_now | rnapi->coal_now;
12698
12699         err = -EIO;
12700
12701         tx_len = pktsz;
12702         skb = netdev_alloc_skb(tp->dev, tx_len);
12703         if (!skb)
12704                 return -ENOMEM;
12705
12706         tx_data = skb_put(skb, tx_len);
12707         memcpy(tx_data, tp->dev->dev_addr, 6);
12708         memset(tx_data + 6, 0x0, 8);
12709
12710         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12711
12712         if (tso_loopback) {
12713                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12714
12715                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12716                               TG3_TSO_TCP_OPT_LEN;
12717
12718                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12719                        sizeof(tg3_tso_header));
12720                 mss = TG3_TSO_MSS;
12721
12722                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12723                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12724
12725                 /* Set the total length field in the IP header */
12726                 iph->tot_len = htons((u16)(mss + hdr_len));
12727
12728                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12729                               TXD_FLAG_CPU_POST_DMA);
12730
12731                 if (tg3_flag(tp, HW_TSO_1) ||
12732                     tg3_flag(tp, HW_TSO_2) ||
12733                     tg3_flag(tp, HW_TSO_3)) {
12734                         struct tcphdr *th;
12735                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12736                         th = (struct tcphdr *)&tx_data[val];
12737                         th->check = 0;
12738                 } else
12739                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
12740
12741                 if (tg3_flag(tp, HW_TSO_3)) {
12742                         mss |= (hdr_len & 0xc) << 12;
12743                         if (hdr_len & 0x10)
12744                                 base_flags |= 0x00000010;
12745                         base_flags |= (hdr_len & 0x3e0) << 5;
12746                 } else if (tg3_flag(tp, HW_TSO_2))
12747                         mss |= hdr_len << 9;
12748                 else if (tg3_flag(tp, HW_TSO_1) ||
12749                          tg3_asic_rev(tp) == ASIC_REV_5705) {
12750                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12751                 } else {
12752                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12753                 }
12754
12755                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12756         } else {
12757                 num_pkts = 1;
12758                 data_off = ETH_HLEN;
12759
12760                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12761                     tx_len > VLAN_ETH_FRAME_LEN)
12762                         base_flags |= TXD_FLAG_JMB_PKT;
12763         }
12764
12765         for (i = data_off; i < tx_len; i++)
12766                 tx_data[i] = (u8) (i & 0xff);
12767
12768         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12769         if (pci_dma_mapping_error(tp->pdev, map)) {
12770                 dev_kfree_skb(skb);
12771                 return -EIO;
12772         }
12773
12774         val = tnapi->tx_prod;
12775         tnapi->tx_buffers[val].skb = skb;
12776         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12777
12778         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12779                rnapi->coal_now);
12780
12781         udelay(10);
12782
12783         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12784
12785         budget = tg3_tx_avail(tnapi);
12786         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12787                             base_flags | TXD_FLAG_END, mss, 0)) {
12788                 tnapi->tx_buffers[val].skb = NULL;
12789                 dev_kfree_skb(skb);
12790                 return -EIO;
12791         }
12792
12793         tnapi->tx_prod++;
12794
12795         /* Sync BD data before updating mailbox */
12796         wmb();
12797
12798         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12799         tr32_mailbox(tnapi->prodmbox);
12800
12801         udelay(10);
12802
12803         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
12804         for (i = 0; i < 35; i++) {
12805                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12806                        coal_now);
12807
12808                 udelay(10);
12809
12810                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12811                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
12812                 if ((tx_idx == tnapi->tx_prod) &&
12813                     (rx_idx == (rx_start_idx + num_pkts)))
12814                         break;
12815         }
12816
12817         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12818         dev_kfree_skb(skb);
12819
12820         if (tx_idx != tnapi->tx_prod)
12821                 goto out;
12822
12823         if (rx_idx != rx_start_idx + num_pkts)
12824                 goto out;
12825
12826         val = data_off;
12827         while (rx_idx != rx_start_idx) {
12828                 desc = &rnapi->rx_rcb[rx_start_idx++];
12829                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12830                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12831
12832                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12833                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12834                         goto out;
12835
12836                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12837                          - ETH_FCS_LEN;
12838
12839                 if (!tso_loopback) {
12840                         if (rx_len != tx_len)
12841                                 goto out;
12842
12843                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12844                                 if (opaque_key != RXD_OPAQUE_RING_STD)
12845                                         goto out;
12846                         } else {
12847                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12848                                         goto out;
12849                         }
12850                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12851                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12852                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
12853                         goto out;
12854                 }
12855
12856                 if (opaque_key == RXD_OPAQUE_RING_STD) {
12857                         rx_data = tpr->rx_std_buffers[desc_idx].data;
12858                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12859                                              mapping);
12860                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12861                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12862                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12863                                              mapping);
12864                 } else
12865                         goto out;
12866
12867                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12868                                             PCI_DMA_FROMDEVICE);
12869
12870                 rx_data += TG3_RX_OFFSET(tp);
12871                 for (i = data_off; i < rx_len; i++, val++) {
12872                         if (*(rx_data + i) != (u8) (val & 0xff))
12873                                 goto out;
12874                 }
12875         }
12876
12877         err = 0;
12878
12879         /* tg3_free_rings will unmap and free the rx_data */
12880 out:
12881         return err;
12882 }
12883
12884 #define TG3_STD_LOOPBACK_FAILED         1
12885 #define TG3_JMB_LOOPBACK_FAILED         2
12886 #define TG3_TSO_LOOPBACK_FAILED         4
12887 #define TG3_LOOPBACK_FAILED \
12888         (TG3_STD_LOOPBACK_FAILED | \
12889          TG3_JMB_LOOPBACK_FAILED | \
12890          TG3_TSO_LOOPBACK_FAILED)
12891
12892 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12893 {
12894         int err = -EIO;
12895         u32 eee_cap;
12896         u32 jmb_pkt_sz = 9000;
12897
12898         if (tp->dma_limit)
12899                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12900
12901         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12902         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12903
12904         if (!netif_running(tp->dev)) {
12905                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12906                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12907                 if (do_extlpbk)
12908                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12909                 goto done;
12910         }
12911
12912         err = tg3_reset_hw(tp, 1);
12913         if (err) {
12914                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12915                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12916                 if (do_extlpbk)
12917                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12918                 goto done;
12919         }
12920
12921         if (tg3_flag(tp, ENABLE_RSS)) {
12922                 int i;
12923
12924                 /* Reroute all rx packets to the 1st queue */
12925                 for (i = MAC_RSS_INDIR_TBL_0;
12926                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12927                         tw32(i, 0x0);
12928         }
12929
12930         /* HW errata - mac loopback fails in some cases on 5780.
12931          * Normal traffic and PHY loopback are not affected by
12932          * errata.  Also, the MAC loopback test is deprecated for
12933          * all newer ASIC revisions.
12934          */
12935         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
12936             !tg3_flag(tp, CPMU_PRESENT)) {
12937                 tg3_mac_loopback(tp, true);
12938
12939                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12940                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12941
12942                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12943                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12944                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12945
12946                 tg3_mac_loopback(tp, false);
12947         }
12948
12949         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12950             !tg3_flag(tp, USE_PHYLIB)) {
12951                 int i;
12952
12953                 tg3_phy_lpbk_set(tp, 0, false);
12954
12955                 /* Wait for link */
12956                 for (i = 0; i < 100; i++) {
12957                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12958                                 break;
12959                         mdelay(1);
12960                 }
12961
12962                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12963                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12964                 if (tg3_flag(tp, TSO_CAPABLE) &&
12965                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12966                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
12967                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12968                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12969                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12970
12971                 if (do_extlpbk) {
12972                         tg3_phy_lpbk_set(tp, 0, true);
12973
12974                         /* All link indications report up, but the hardware
12975                          * isn't really ready for about 20 msec.  Double it
12976                          * to be sure.
12977                          */
12978                         mdelay(40);
12979
12980                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12981                                 data[TG3_EXT_LOOPB_TEST] |=
12982                                                         TG3_STD_LOOPBACK_FAILED;
12983                         if (tg3_flag(tp, TSO_CAPABLE) &&
12984                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12985                                 data[TG3_EXT_LOOPB_TEST] |=
12986                                                         TG3_TSO_LOOPBACK_FAILED;
12987                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12988                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12989                                 data[TG3_EXT_LOOPB_TEST] |=
12990                                                         TG3_JMB_LOOPBACK_FAILED;
12991                 }
12992
12993                 /* Re-enable gphy autopowerdown. */
12994                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12995                         tg3_phy_toggle_apd(tp, true);
12996         }
12997
12998         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
12999                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13000
13001 done:
13002         tp->phy_flags |= eee_cap;
13003
13004         return err;
13005 }
13006
13007 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13008                           u64 *data)
13009 {
13010         struct tg3 *tp = netdev_priv(dev);
13011         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13012
13013         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
13014             tg3_power_up(tp)) {
13015                 etest->flags |= ETH_TEST_FL_FAILED;
13016                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13017                 return;
13018         }
13019
13020         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13021
13022         if (tg3_test_nvram(tp) != 0) {
13023                 etest->flags |= ETH_TEST_FL_FAILED;
13024                 data[TG3_NVRAM_TEST] = 1;
13025         }
13026         if (!doextlpbk && tg3_test_link(tp)) {
13027                 etest->flags |= ETH_TEST_FL_FAILED;
13028                 data[TG3_LINK_TEST] = 1;
13029         }
13030         if (etest->flags & ETH_TEST_FL_OFFLINE) {
13031                 int err, err2 = 0, irq_sync = 0;
13032
13033                 if (netif_running(dev)) {
13034                         tg3_phy_stop(tp);
13035                         tg3_netif_stop(tp);
13036                         irq_sync = 1;
13037                 }
13038
13039                 tg3_full_lock(tp, irq_sync);
13040                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13041                 err = tg3_nvram_lock(tp);
13042                 tg3_halt_cpu(tp, RX_CPU_BASE);
13043                 if (!tg3_flag(tp, 5705_PLUS))
13044                         tg3_halt_cpu(tp, TX_CPU_BASE);
13045                 if (!err)
13046                         tg3_nvram_unlock(tp);
13047
13048                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13049                         tg3_phy_reset(tp);
13050
13051                 if (tg3_test_registers(tp) != 0) {
13052                         etest->flags |= ETH_TEST_FL_FAILED;
13053                         data[TG3_REGISTER_TEST] = 1;
13054                 }
13055
13056                 if (tg3_test_memory(tp) != 0) {
13057                         etest->flags |= ETH_TEST_FL_FAILED;
13058                         data[TG3_MEMORY_TEST] = 1;
13059                 }
13060
13061                 if (doextlpbk)
13062                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13063
13064                 if (tg3_test_loopback(tp, data, doextlpbk))
13065                         etest->flags |= ETH_TEST_FL_FAILED;
13066
13067                 tg3_full_unlock(tp);
13068
13069                 if (tg3_test_interrupt(tp) != 0) {
13070                         etest->flags |= ETH_TEST_FL_FAILED;
13071                         data[TG3_INTERRUPT_TEST] = 1;
13072                 }
13073
13074                 tg3_full_lock(tp, 0);
13075
13076                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13077                 if (netif_running(dev)) {
13078                         tg3_flag_set(tp, INIT_COMPLETE);
13079                         err2 = tg3_restart_hw(tp, 1);
13080                         if (!err2)
13081                                 tg3_netif_start(tp);
13082                 }
13083
13084                 tg3_full_unlock(tp);
13085
13086                 if (irq_sync && !err2)
13087                         tg3_phy_start(tp);
13088         }
13089         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13090                 tg3_power_down(tp);
13091
13092 }
13093
13094 static int tg3_hwtstamp_ioctl(struct net_device *dev,
13095                               struct ifreq *ifr, int cmd)
13096 {
13097         struct tg3 *tp = netdev_priv(dev);
13098         struct hwtstamp_config stmpconf;
13099
13100         if (!tg3_flag(tp, PTP_CAPABLE))
13101                 return -EINVAL;
13102
13103         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13104                 return -EFAULT;
13105
13106         if (stmpconf.flags)
13107                 return -EINVAL;
13108
13109         switch (stmpconf.tx_type) {
13110         case HWTSTAMP_TX_ON:
13111                 tg3_flag_set(tp, TX_TSTAMP_EN);
13112                 break;
13113         case HWTSTAMP_TX_OFF:
13114                 tg3_flag_clear(tp, TX_TSTAMP_EN);
13115                 break;
13116         default:
13117                 return -ERANGE;
13118         }
13119
13120         switch (stmpconf.rx_filter) {
13121         case HWTSTAMP_FILTER_NONE:
13122                 tp->rxptpctl = 0;
13123                 break;
13124         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13125                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13126                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13127                 break;
13128         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13129                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13130                                TG3_RX_PTP_CTL_SYNC_EVNT;
13131                 break;
13132         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13133                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13134                                TG3_RX_PTP_CTL_DELAY_REQ;
13135                 break;
13136         case HWTSTAMP_FILTER_PTP_V2_EVENT:
13137                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13138                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13139                 break;
13140         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13141                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13142                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13143                 break;
13144         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13145                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13146                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13147                 break;
13148         case HWTSTAMP_FILTER_PTP_V2_SYNC:
13149                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13150                                TG3_RX_PTP_CTL_SYNC_EVNT;
13151                 break;
13152         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13153                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13154                                TG3_RX_PTP_CTL_SYNC_EVNT;
13155                 break;
13156         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13157                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13158                                TG3_RX_PTP_CTL_SYNC_EVNT;
13159                 break;
13160         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13161                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13162                                TG3_RX_PTP_CTL_DELAY_REQ;
13163                 break;
13164         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13165                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13166                                TG3_RX_PTP_CTL_DELAY_REQ;
13167                 break;
13168         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13169                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13170                                TG3_RX_PTP_CTL_DELAY_REQ;
13171                 break;
13172         default:
13173                 return -ERANGE;
13174         }
13175
13176         if (netif_running(dev) && tp->rxptpctl)
13177                 tw32(TG3_RX_PTP_CTL,
13178                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13179
13180         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13181                 -EFAULT : 0;
13182 }
13183
13184 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13185 {
13186         struct mii_ioctl_data *data = if_mii(ifr);
13187         struct tg3 *tp = netdev_priv(dev);
13188         int err;
13189
13190         if (tg3_flag(tp, USE_PHYLIB)) {
13191                 struct phy_device *phydev;
13192                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13193                         return -EAGAIN;
13194                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13195                 return phy_mii_ioctl(phydev, ifr, cmd);
13196         }
13197
13198         switch (cmd) {
13199         case SIOCGMIIPHY:
13200                 data->phy_id = tp->phy_addr;
13201
13202                 /* fallthru */
13203         case SIOCGMIIREG: {
13204                 u32 mii_regval;
13205
13206                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13207                         break;                  /* We have no PHY */
13208
13209                 if (!netif_running(dev))
13210                         return -EAGAIN;
13211
13212                 spin_lock_bh(&tp->lock);
13213                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13214                                     data->reg_num & 0x1f, &mii_regval);
13215                 spin_unlock_bh(&tp->lock);
13216
13217                 data->val_out = mii_regval;
13218
13219                 return err;
13220         }
13221
13222         case SIOCSMIIREG:
13223                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13224                         break;                  /* We have no PHY */
13225
13226                 if (!netif_running(dev))
13227                         return -EAGAIN;
13228
13229                 spin_lock_bh(&tp->lock);
13230                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13231                                      data->reg_num & 0x1f, data->val_in);
13232                 spin_unlock_bh(&tp->lock);
13233
13234                 return err;
13235
13236         case SIOCSHWTSTAMP:
13237                 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13238
13239         default:
13240                 /* do nothing */
13241                 break;
13242         }
13243         return -EOPNOTSUPP;
13244 }
13245
13246 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13247 {
13248         struct tg3 *tp = netdev_priv(dev);
13249
13250         memcpy(ec, &tp->coal, sizeof(*ec));
13251         return 0;
13252 }
13253
13254 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13255 {
13256         struct tg3 *tp = netdev_priv(dev);
13257         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13258         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13259
13260         if (!tg3_flag(tp, 5705_PLUS)) {
13261                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13262                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13263                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13264                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13265         }
13266
13267         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13268             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13269             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13270             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13271             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13272             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13273             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13274             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13275             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13276             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13277                 return -EINVAL;
13278
13279         /* No rx interrupts will be generated if both are zero */
13280         if ((ec->rx_coalesce_usecs == 0) &&
13281             (ec->rx_max_coalesced_frames == 0))
13282                 return -EINVAL;
13283
13284         /* No tx interrupts will be generated if both are zero */
13285         if ((ec->tx_coalesce_usecs == 0) &&
13286             (ec->tx_max_coalesced_frames == 0))
13287                 return -EINVAL;
13288
13289         /* Only copy relevant parameters, ignore all others. */
13290         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13291         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13292         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13293         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13294         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13295         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13296         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13297         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13298         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13299
13300         if (netif_running(dev)) {
13301                 tg3_full_lock(tp, 0);
13302                 __tg3_set_coalesce(tp, &tp->coal);
13303                 tg3_full_unlock(tp);
13304         }
13305         return 0;
13306 }
13307
13308 static const struct ethtool_ops tg3_ethtool_ops = {
13309         .get_settings           = tg3_get_settings,
13310         .set_settings           = tg3_set_settings,
13311         .get_drvinfo            = tg3_get_drvinfo,
13312         .get_regs_len           = tg3_get_regs_len,
13313         .get_regs               = tg3_get_regs,
13314         .get_wol                = tg3_get_wol,
13315         .set_wol                = tg3_set_wol,
13316         .get_msglevel           = tg3_get_msglevel,
13317         .set_msglevel           = tg3_set_msglevel,
13318         .nway_reset             = tg3_nway_reset,
13319         .get_link               = ethtool_op_get_link,
13320         .get_eeprom_len         = tg3_get_eeprom_len,
13321         .get_eeprom             = tg3_get_eeprom,
13322         .set_eeprom             = tg3_set_eeprom,
13323         .get_ringparam          = tg3_get_ringparam,
13324         .set_ringparam          = tg3_set_ringparam,
13325         .get_pauseparam         = tg3_get_pauseparam,
13326         .set_pauseparam         = tg3_set_pauseparam,
13327         .self_test              = tg3_self_test,
13328         .get_strings            = tg3_get_strings,
13329         .set_phys_id            = tg3_set_phys_id,
13330         .get_ethtool_stats      = tg3_get_ethtool_stats,
13331         .get_coalesce           = tg3_get_coalesce,
13332         .set_coalesce           = tg3_set_coalesce,
13333         .get_sset_count         = tg3_get_sset_count,
13334         .get_rxnfc              = tg3_get_rxnfc,
13335         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
13336         .get_rxfh_indir         = tg3_get_rxfh_indir,
13337         .set_rxfh_indir         = tg3_set_rxfh_indir,
13338         .get_channels           = tg3_get_channels,
13339         .set_channels           = tg3_set_channels,
13340         .get_ts_info            = tg3_get_ts_info,
13341 };
13342
13343 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13344                                                 struct rtnl_link_stats64 *stats)
13345 {
13346         struct tg3 *tp = netdev_priv(dev);
13347
13348         spin_lock_bh(&tp->lock);
13349         if (!tp->hw_stats) {
13350                 spin_unlock_bh(&tp->lock);
13351                 return &tp->net_stats_prev;
13352         }
13353
13354         tg3_get_nstats(tp, stats);
13355         spin_unlock_bh(&tp->lock);
13356
13357         return stats;
13358 }
13359
13360 static void tg3_set_rx_mode(struct net_device *dev)
13361 {
13362         struct tg3 *tp = netdev_priv(dev);
13363
13364         if (!netif_running(dev))
13365                 return;
13366
13367         tg3_full_lock(tp, 0);
13368         __tg3_set_rx_mode(dev);
13369         tg3_full_unlock(tp);
13370 }
13371
13372 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13373                                int new_mtu)
13374 {
13375         dev->mtu = new_mtu;
13376
13377         if (new_mtu > ETH_DATA_LEN) {
13378                 if (tg3_flag(tp, 5780_CLASS)) {
13379                         netdev_update_features(dev);
13380                         tg3_flag_clear(tp, TSO_CAPABLE);
13381                 } else {
13382                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
13383                 }
13384         } else {
13385                 if (tg3_flag(tp, 5780_CLASS)) {
13386                         tg3_flag_set(tp, TSO_CAPABLE);
13387                         netdev_update_features(dev);
13388                 }
13389                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13390         }
13391 }
13392
13393 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13394 {
13395         struct tg3 *tp = netdev_priv(dev);
13396         int err, reset_phy = 0;
13397
13398         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13399                 return -EINVAL;
13400
13401         if (!netif_running(dev)) {
13402                 /* We'll just catch it later when the
13403                  * device is up'd.
13404                  */
13405                 tg3_set_mtu(dev, tp, new_mtu);
13406                 return 0;
13407         }
13408
13409         tg3_phy_stop(tp);
13410
13411         tg3_netif_stop(tp);
13412
13413         tg3_full_lock(tp, 1);
13414
13415         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13416
13417         tg3_set_mtu(dev, tp, new_mtu);
13418
13419         /* Reset PHY, otherwise the read DMA engine will be in a mode that
13420          * breaks all requests to 256 bytes.
13421          */
13422         if (tg3_asic_rev(tp) == ASIC_REV_57766)
13423                 reset_phy = 1;
13424
13425         err = tg3_restart_hw(tp, reset_phy);
13426
13427         if (!err)
13428                 tg3_netif_start(tp);
13429
13430         tg3_full_unlock(tp);
13431
13432         if (!err)
13433                 tg3_phy_start(tp);
13434
13435         return err;
13436 }
13437
13438 static const struct net_device_ops tg3_netdev_ops = {
13439         .ndo_open               = tg3_open,
13440         .ndo_stop               = tg3_close,
13441         .ndo_start_xmit         = tg3_start_xmit,
13442         .ndo_get_stats64        = tg3_get_stats64,
13443         .ndo_validate_addr      = eth_validate_addr,
13444         .ndo_set_rx_mode        = tg3_set_rx_mode,
13445         .ndo_set_mac_address    = tg3_set_mac_addr,
13446         .ndo_do_ioctl           = tg3_ioctl,
13447         .ndo_tx_timeout         = tg3_tx_timeout,
13448         .ndo_change_mtu         = tg3_change_mtu,
13449         .ndo_fix_features       = tg3_fix_features,
13450         .ndo_set_features       = tg3_set_features,
13451 #ifdef CONFIG_NET_POLL_CONTROLLER
13452         .ndo_poll_controller    = tg3_poll_controller,
13453 #endif
13454 };
13455
13456 static void tg3_get_eeprom_size(struct tg3 *tp)
13457 {
13458         u32 cursize, val, magic;
13459
13460         tp->nvram_size = EEPROM_CHIP_SIZE;
13461
13462         if (tg3_nvram_read(tp, 0, &magic) != 0)
13463                 return;
13464
13465         if ((magic != TG3_EEPROM_MAGIC) &&
13466             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13467             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13468                 return;
13469
13470         /*
13471          * Size the chip by reading offsets at increasing powers of two.
13472          * When we encounter our validation signature, we know the addressing
13473          * has wrapped around, and thus have our chip size.
13474          */
13475         cursize = 0x10;
13476
13477         while (cursize < tp->nvram_size) {
13478                 if (tg3_nvram_read(tp, cursize, &val) != 0)
13479                         return;
13480
13481                 if (val == magic)
13482                         break;
13483
13484                 cursize <<= 1;
13485         }
13486
13487         tp->nvram_size = cursize;
13488 }
13489
13490 static void tg3_get_nvram_size(struct tg3 *tp)
13491 {
13492         u32 val;
13493
13494         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13495                 return;
13496
13497         /* Selfboot format */
13498         if (val != TG3_EEPROM_MAGIC) {
13499                 tg3_get_eeprom_size(tp);
13500                 return;
13501         }
13502
13503         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13504                 if (val != 0) {
13505                         /* This is confusing.  We want to operate on the
13506                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
13507                          * call will read from NVRAM and byteswap the data
13508                          * according to the byteswapping settings for all
13509                          * other register accesses.  This ensures the data we
13510                          * want will always reside in the lower 16-bits.
13511                          * However, the data in NVRAM is in LE format, which
13512                          * means the data from the NVRAM read will always be
13513                          * opposite the endianness of the CPU.  The 16-bit
13514                          * byteswap then brings the data to CPU endianness.
13515                          */
13516                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13517                         return;
13518                 }
13519         }
13520         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13521 }
13522
13523 static void tg3_get_nvram_info(struct tg3 *tp)
13524 {
13525         u32 nvcfg1;
13526
13527         nvcfg1 = tr32(NVRAM_CFG1);
13528         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13529                 tg3_flag_set(tp, FLASH);
13530         } else {
13531                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13532                 tw32(NVRAM_CFG1, nvcfg1);
13533         }
13534
13535         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
13536             tg3_flag(tp, 5780_CLASS)) {
13537                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13538                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13539                         tp->nvram_jedecnum = JEDEC_ATMEL;
13540                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13541                         tg3_flag_set(tp, NVRAM_BUFFERED);
13542                         break;
13543                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13544                         tp->nvram_jedecnum = JEDEC_ATMEL;
13545                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13546                         break;
13547                 case FLASH_VENDOR_ATMEL_EEPROM:
13548                         tp->nvram_jedecnum = JEDEC_ATMEL;
13549                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13550                         tg3_flag_set(tp, NVRAM_BUFFERED);
13551                         break;
13552                 case FLASH_VENDOR_ST:
13553                         tp->nvram_jedecnum = JEDEC_ST;
13554                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13555                         tg3_flag_set(tp, NVRAM_BUFFERED);
13556                         break;
13557                 case FLASH_VENDOR_SAIFUN:
13558                         tp->nvram_jedecnum = JEDEC_SAIFUN;
13559                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13560                         break;
13561                 case FLASH_VENDOR_SST_SMALL:
13562                 case FLASH_VENDOR_SST_LARGE:
13563                         tp->nvram_jedecnum = JEDEC_SST;
13564                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13565                         break;
13566                 }
13567         } else {
13568                 tp->nvram_jedecnum = JEDEC_ATMEL;
13569                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13570                 tg3_flag_set(tp, NVRAM_BUFFERED);
13571         }
13572 }
13573
13574 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13575 {
13576         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13577         case FLASH_5752PAGE_SIZE_256:
13578                 tp->nvram_pagesize = 256;
13579                 break;
13580         case FLASH_5752PAGE_SIZE_512:
13581                 tp->nvram_pagesize = 512;
13582                 break;
13583         case FLASH_5752PAGE_SIZE_1K:
13584                 tp->nvram_pagesize = 1024;
13585                 break;
13586         case FLASH_5752PAGE_SIZE_2K:
13587                 tp->nvram_pagesize = 2048;
13588                 break;
13589         case FLASH_5752PAGE_SIZE_4K:
13590                 tp->nvram_pagesize = 4096;
13591                 break;
13592         case FLASH_5752PAGE_SIZE_264:
13593                 tp->nvram_pagesize = 264;
13594                 break;
13595         case FLASH_5752PAGE_SIZE_528:
13596                 tp->nvram_pagesize = 528;
13597                 break;
13598         }
13599 }
13600
13601 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13602 {
13603         u32 nvcfg1;
13604
13605         nvcfg1 = tr32(NVRAM_CFG1);
13606
13607         /* NVRAM protection for TPM */
13608         if (nvcfg1 & (1 << 27))
13609                 tg3_flag_set(tp, PROTECTED_NVRAM);
13610
13611         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13612         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13613         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13614                 tp->nvram_jedecnum = JEDEC_ATMEL;
13615                 tg3_flag_set(tp, NVRAM_BUFFERED);
13616                 break;
13617         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13618                 tp->nvram_jedecnum = JEDEC_ATMEL;
13619                 tg3_flag_set(tp, NVRAM_BUFFERED);
13620                 tg3_flag_set(tp, FLASH);
13621                 break;
13622         case FLASH_5752VENDOR_ST_M45PE10:
13623         case FLASH_5752VENDOR_ST_M45PE20:
13624         case FLASH_5752VENDOR_ST_M45PE40:
13625                 tp->nvram_jedecnum = JEDEC_ST;
13626                 tg3_flag_set(tp, NVRAM_BUFFERED);
13627                 tg3_flag_set(tp, FLASH);
13628                 break;
13629         }
13630
13631         if (tg3_flag(tp, FLASH)) {
13632                 tg3_nvram_get_pagesize(tp, nvcfg1);
13633         } else {
13634                 /* For eeprom, set pagesize to maximum eeprom size */
13635                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13636
13637                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13638                 tw32(NVRAM_CFG1, nvcfg1);
13639         }
13640 }
13641
13642 static void tg3_get_5755_nvram_info(struct tg3 *tp)
13643 {
13644         u32 nvcfg1, protect = 0;
13645
13646         nvcfg1 = tr32(NVRAM_CFG1);
13647
13648         /* NVRAM protection for TPM */
13649         if (nvcfg1 & (1 << 27)) {
13650                 tg3_flag_set(tp, PROTECTED_NVRAM);
13651                 protect = 1;
13652         }
13653
13654         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13655         switch (nvcfg1) {
13656         case FLASH_5755VENDOR_ATMEL_FLASH_1:
13657         case FLASH_5755VENDOR_ATMEL_FLASH_2:
13658         case FLASH_5755VENDOR_ATMEL_FLASH_3:
13659         case FLASH_5755VENDOR_ATMEL_FLASH_5:
13660                 tp->nvram_jedecnum = JEDEC_ATMEL;
13661                 tg3_flag_set(tp, NVRAM_BUFFERED);
13662                 tg3_flag_set(tp, FLASH);
13663                 tp->nvram_pagesize = 264;
13664                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13665                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13666                         tp->nvram_size = (protect ? 0x3e200 :
13667                                           TG3_NVRAM_SIZE_512KB);
13668                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13669                         tp->nvram_size = (protect ? 0x1f200 :
13670                                           TG3_NVRAM_SIZE_256KB);
13671                 else
13672                         tp->nvram_size = (protect ? 0x1f200 :
13673                                           TG3_NVRAM_SIZE_128KB);
13674                 break;
13675         case FLASH_5752VENDOR_ST_M45PE10:
13676         case FLASH_5752VENDOR_ST_M45PE20:
13677         case FLASH_5752VENDOR_ST_M45PE40:
13678                 tp->nvram_jedecnum = JEDEC_ST;
13679                 tg3_flag_set(tp, NVRAM_BUFFERED);
13680                 tg3_flag_set(tp, FLASH);
13681                 tp->nvram_pagesize = 256;
13682                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13683                         tp->nvram_size = (protect ?
13684                                           TG3_NVRAM_SIZE_64KB :
13685                                           TG3_NVRAM_SIZE_128KB);
13686                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13687                         tp->nvram_size = (protect ?
13688                                           TG3_NVRAM_SIZE_64KB :
13689                                           TG3_NVRAM_SIZE_256KB);
13690                 else
13691                         tp->nvram_size = (protect ?
13692                                           TG3_NVRAM_SIZE_128KB :
13693                                           TG3_NVRAM_SIZE_512KB);
13694                 break;
13695         }
13696 }
13697
13698 static void tg3_get_5787_nvram_info(struct tg3 *tp)
13699 {
13700         u32 nvcfg1;
13701
13702         nvcfg1 = tr32(NVRAM_CFG1);
13703
13704         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13705         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13706         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13707         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13708         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13709                 tp->nvram_jedecnum = JEDEC_ATMEL;
13710                 tg3_flag_set(tp, NVRAM_BUFFERED);
13711                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13712
13713                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13714                 tw32(NVRAM_CFG1, nvcfg1);
13715                 break;
13716         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13717         case FLASH_5755VENDOR_ATMEL_FLASH_1:
13718         case FLASH_5755VENDOR_ATMEL_FLASH_2:
13719         case FLASH_5755VENDOR_ATMEL_FLASH_3:
13720                 tp->nvram_jedecnum = JEDEC_ATMEL;
13721                 tg3_flag_set(tp, NVRAM_BUFFERED);
13722                 tg3_flag_set(tp, FLASH);
13723                 tp->nvram_pagesize = 264;
13724                 break;
13725         case FLASH_5752VENDOR_ST_M45PE10:
13726         case FLASH_5752VENDOR_ST_M45PE20:
13727         case FLASH_5752VENDOR_ST_M45PE40:
13728                 tp->nvram_jedecnum = JEDEC_ST;
13729                 tg3_flag_set(tp, NVRAM_BUFFERED);
13730                 tg3_flag_set(tp, FLASH);
13731                 tp->nvram_pagesize = 256;
13732                 break;
13733         }
13734 }
13735
13736 static void tg3_get_5761_nvram_info(struct tg3 *tp)
13737 {
13738         u32 nvcfg1, protect = 0;
13739
13740         nvcfg1 = tr32(NVRAM_CFG1);
13741
13742         /* NVRAM protection for TPM */
13743         if (nvcfg1 & (1 << 27)) {
13744                 tg3_flag_set(tp, PROTECTED_NVRAM);
13745                 protect = 1;
13746         }
13747
13748         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13749         switch (nvcfg1) {
13750         case FLASH_5761VENDOR_ATMEL_ADB021D:
13751         case FLASH_5761VENDOR_ATMEL_ADB041D:
13752         case FLASH_5761VENDOR_ATMEL_ADB081D:
13753         case FLASH_5761VENDOR_ATMEL_ADB161D:
13754         case FLASH_5761VENDOR_ATMEL_MDB021D:
13755         case FLASH_5761VENDOR_ATMEL_MDB041D:
13756         case FLASH_5761VENDOR_ATMEL_MDB081D:
13757         case FLASH_5761VENDOR_ATMEL_MDB161D:
13758                 tp->nvram_jedecnum = JEDEC_ATMEL;
13759                 tg3_flag_set(tp, NVRAM_BUFFERED);
13760                 tg3_flag_set(tp, FLASH);
13761                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13762                 tp->nvram_pagesize = 256;
13763                 break;
13764         case FLASH_5761VENDOR_ST_A_M45PE20:
13765         case FLASH_5761VENDOR_ST_A_M45PE40:
13766         case FLASH_5761VENDOR_ST_A_M45PE80:
13767         case FLASH_5761VENDOR_ST_A_M45PE16:
13768         case FLASH_5761VENDOR_ST_M_M45PE20:
13769         case FLASH_5761VENDOR_ST_M_M45PE40:
13770         case FLASH_5761VENDOR_ST_M_M45PE80:
13771         case FLASH_5761VENDOR_ST_M_M45PE16:
13772                 tp->nvram_jedecnum = JEDEC_ST;
13773                 tg3_flag_set(tp, NVRAM_BUFFERED);
13774                 tg3_flag_set(tp, FLASH);
13775                 tp->nvram_pagesize = 256;
13776                 break;
13777         }
13778
13779         if (protect) {
13780                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13781         } else {
13782                 switch (nvcfg1) {
13783                 case FLASH_5761VENDOR_ATMEL_ADB161D:
13784                 case FLASH_5761VENDOR_ATMEL_MDB161D:
13785                 case FLASH_5761VENDOR_ST_A_M45PE16:
13786                 case FLASH_5761VENDOR_ST_M_M45PE16:
13787                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13788                         break;
13789                 case FLASH_5761VENDOR_ATMEL_ADB081D:
13790                 case FLASH_5761VENDOR_ATMEL_MDB081D:
13791                 case FLASH_5761VENDOR_ST_A_M45PE80:
13792                 case FLASH_5761VENDOR_ST_M_M45PE80:
13793                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13794                         break;
13795                 case FLASH_5761VENDOR_ATMEL_ADB041D:
13796                 case FLASH_5761VENDOR_ATMEL_MDB041D:
13797                 case FLASH_5761VENDOR_ST_A_M45PE40:
13798                 case FLASH_5761VENDOR_ST_M_M45PE40:
13799                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13800                         break;
13801                 case FLASH_5761VENDOR_ATMEL_ADB021D:
13802                 case FLASH_5761VENDOR_ATMEL_MDB021D:
13803                 case FLASH_5761VENDOR_ST_A_M45PE20:
13804                 case FLASH_5761VENDOR_ST_M_M45PE20:
13805                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13806                         break;
13807                 }
13808         }
13809 }
13810
13811 static void tg3_get_5906_nvram_info(struct tg3 *tp)
13812 {
13813         tp->nvram_jedecnum = JEDEC_ATMEL;
13814         tg3_flag_set(tp, NVRAM_BUFFERED);
13815         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13816 }
13817
13818 static void tg3_get_57780_nvram_info(struct tg3 *tp)
13819 {
13820         u32 nvcfg1;
13821
13822         nvcfg1 = tr32(NVRAM_CFG1);
13823
13824         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13825         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13826         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13827                 tp->nvram_jedecnum = JEDEC_ATMEL;
13828                 tg3_flag_set(tp, NVRAM_BUFFERED);
13829                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13830
13831                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13832                 tw32(NVRAM_CFG1, nvcfg1);
13833                 return;
13834         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13835         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13836         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13837         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13838         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13839         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13840         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13841                 tp->nvram_jedecnum = JEDEC_ATMEL;
13842                 tg3_flag_set(tp, NVRAM_BUFFERED);
13843                 tg3_flag_set(tp, FLASH);
13844
13845                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13846                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13847                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13848                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13849                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13850                         break;
13851                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13852                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13853                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13854                         break;
13855                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13856                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13857                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13858                         break;
13859                 }
13860                 break;
13861         case FLASH_5752VENDOR_ST_M45PE10:
13862         case FLASH_5752VENDOR_ST_M45PE20:
13863         case FLASH_5752VENDOR_ST_M45PE40:
13864                 tp->nvram_jedecnum = JEDEC_ST;
13865                 tg3_flag_set(tp, NVRAM_BUFFERED);
13866                 tg3_flag_set(tp, FLASH);
13867
13868                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13869                 case FLASH_5752VENDOR_ST_M45PE10:
13870                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13871                         break;
13872                 case FLASH_5752VENDOR_ST_M45PE20:
13873                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13874                         break;
13875                 case FLASH_5752VENDOR_ST_M45PE40:
13876                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13877                         break;
13878                 }
13879                 break;
13880         default:
13881                 tg3_flag_set(tp, NO_NVRAM);
13882                 return;
13883         }
13884
13885         tg3_nvram_get_pagesize(tp, nvcfg1);
13886         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13887                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13888 }
13889
13890
13891 static void tg3_get_5717_nvram_info(struct tg3 *tp)
13892 {
13893         u32 nvcfg1;
13894
13895         nvcfg1 = tr32(NVRAM_CFG1);
13896
13897         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13898         case FLASH_5717VENDOR_ATMEL_EEPROM:
13899         case FLASH_5717VENDOR_MICRO_EEPROM:
13900                 tp->nvram_jedecnum = JEDEC_ATMEL;
13901                 tg3_flag_set(tp, NVRAM_BUFFERED);
13902                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13903
13904                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13905                 tw32(NVRAM_CFG1, nvcfg1);
13906                 return;
13907         case FLASH_5717VENDOR_ATMEL_MDB011D:
13908         case FLASH_5717VENDOR_ATMEL_ADB011B:
13909         case FLASH_5717VENDOR_ATMEL_ADB011D:
13910         case FLASH_5717VENDOR_ATMEL_MDB021D:
13911         case FLASH_5717VENDOR_ATMEL_ADB021B:
13912         case FLASH_5717VENDOR_ATMEL_ADB021D:
13913         case FLASH_5717VENDOR_ATMEL_45USPT:
13914                 tp->nvram_jedecnum = JEDEC_ATMEL;
13915                 tg3_flag_set(tp, NVRAM_BUFFERED);
13916                 tg3_flag_set(tp, FLASH);
13917
13918                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13919                 case FLASH_5717VENDOR_ATMEL_MDB021D:
13920                         /* Detect size with tg3_nvram_get_size() */
13921                         break;
13922                 case FLASH_5717VENDOR_ATMEL_ADB021B:
13923                 case FLASH_5717VENDOR_ATMEL_ADB021D:
13924                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13925                         break;
13926                 default:
13927                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13928                         break;
13929                 }
13930                 break;
13931         case FLASH_5717VENDOR_ST_M_M25PE10:
13932         case FLASH_5717VENDOR_ST_A_M25PE10:
13933         case FLASH_5717VENDOR_ST_M_M45PE10:
13934         case FLASH_5717VENDOR_ST_A_M45PE10:
13935         case FLASH_5717VENDOR_ST_M_M25PE20:
13936         case FLASH_5717VENDOR_ST_A_M25PE20:
13937         case FLASH_5717VENDOR_ST_M_M45PE20:
13938         case FLASH_5717VENDOR_ST_A_M45PE20:
13939         case FLASH_5717VENDOR_ST_25USPT:
13940         case FLASH_5717VENDOR_ST_45USPT:
13941                 tp->nvram_jedecnum = JEDEC_ST;
13942                 tg3_flag_set(tp, NVRAM_BUFFERED);
13943                 tg3_flag_set(tp, FLASH);
13944
13945                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13946                 case FLASH_5717VENDOR_ST_M_M25PE20:
13947                 case FLASH_5717VENDOR_ST_M_M45PE20:
13948                         /* Detect size with tg3_nvram_get_size() */
13949                         break;
13950                 case FLASH_5717VENDOR_ST_A_M25PE20:
13951                 case FLASH_5717VENDOR_ST_A_M45PE20:
13952                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13953                         break;
13954                 default:
13955                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13956                         break;
13957                 }
13958                 break;
13959         default:
13960                 tg3_flag_set(tp, NO_NVRAM);
13961                 return;
13962         }
13963
13964         tg3_nvram_get_pagesize(tp, nvcfg1);
13965         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13966                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13967 }
13968
13969 static void tg3_get_5720_nvram_info(struct tg3 *tp)
13970 {
13971         u32 nvcfg1, nvmpinstrp;
13972
13973         nvcfg1 = tr32(NVRAM_CFG1);
13974         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13975
13976         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
13977                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
13978                         tg3_flag_set(tp, NO_NVRAM);
13979                         return;
13980                 }
13981
13982                 switch (nvmpinstrp) {
13983                 case FLASH_5762_EEPROM_HD:
13984                         nvmpinstrp = FLASH_5720_EEPROM_HD;
13985                         break;
13986                 case FLASH_5762_EEPROM_LD:
13987                         nvmpinstrp = FLASH_5720_EEPROM_LD;
13988                         break;
13989                 case FLASH_5720VENDOR_M_ST_M45PE20:
13990                         /* This pinstrap supports multiple sizes, so force it
13991                          * to read the actual size from location 0xf0.
13992                          */
13993                         nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
13994                         break;
13995                 }
13996         }
13997
13998         switch (nvmpinstrp) {
13999         case FLASH_5720_EEPROM_HD:
14000         case FLASH_5720_EEPROM_LD:
14001                 tp->nvram_jedecnum = JEDEC_ATMEL;
14002                 tg3_flag_set(tp, NVRAM_BUFFERED);
14003
14004                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14005                 tw32(NVRAM_CFG1, nvcfg1);
14006                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14007                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14008                 else
14009                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14010                 return;
14011         case FLASH_5720VENDOR_M_ATMEL_DB011D:
14012         case FLASH_5720VENDOR_A_ATMEL_DB011B:
14013         case FLASH_5720VENDOR_A_ATMEL_DB011D:
14014         case FLASH_5720VENDOR_M_ATMEL_DB021D:
14015         case FLASH_5720VENDOR_A_ATMEL_DB021B:
14016         case FLASH_5720VENDOR_A_ATMEL_DB021D:
14017         case FLASH_5720VENDOR_M_ATMEL_DB041D:
14018         case FLASH_5720VENDOR_A_ATMEL_DB041B:
14019         case FLASH_5720VENDOR_A_ATMEL_DB041D:
14020         case FLASH_5720VENDOR_M_ATMEL_DB081D:
14021         case FLASH_5720VENDOR_A_ATMEL_DB081D:
14022         case FLASH_5720VENDOR_ATMEL_45USPT:
14023                 tp->nvram_jedecnum = JEDEC_ATMEL;
14024                 tg3_flag_set(tp, NVRAM_BUFFERED);
14025                 tg3_flag_set(tp, FLASH);
14026
14027                 switch (nvmpinstrp) {
14028                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14029                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14030                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14031                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14032                         break;
14033                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14034                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14035                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14036                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14037                         break;
14038                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14039                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14040                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14041                         break;
14042                 default:
14043                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14044                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14045                         break;
14046                 }
14047                 break;
14048         case FLASH_5720VENDOR_M_ST_M25PE10:
14049         case FLASH_5720VENDOR_M_ST_M45PE10:
14050         case FLASH_5720VENDOR_A_ST_M25PE10:
14051         case FLASH_5720VENDOR_A_ST_M45PE10:
14052         case FLASH_5720VENDOR_M_ST_M25PE20:
14053         case FLASH_5720VENDOR_M_ST_M45PE20:
14054         case FLASH_5720VENDOR_A_ST_M25PE20:
14055         case FLASH_5720VENDOR_A_ST_M45PE20:
14056         case FLASH_5720VENDOR_M_ST_M25PE40:
14057         case FLASH_5720VENDOR_M_ST_M45PE40:
14058         case FLASH_5720VENDOR_A_ST_M25PE40:
14059         case FLASH_5720VENDOR_A_ST_M45PE40:
14060         case FLASH_5720VENDOR_M_ST_M25PE80:
14061         case FLASH_5720VENDOR_M_ST_M45PE80:
14062         case FLASH_5720VENDOR_A_ST_M25PE80:
14063         case FLASH_5720VENDOR_A_ST_M45PE80:
14064         case FLASH_5720VENDOR_ST_25USPT:
14065         case FLASH_5720VENDOR_ST_45USPT:
14066                 tp->nvram_jedecnum = JEDEC_ST;
14067                 tg3_flag_set(tp, NVRAM_BUFFERED);
14068                 tg3_flag_set(tp, FLASH);
14069
14070                 switch (nvmpinstrp) {
14071                 case FLASH_5720VENDOR_M_ST_M25PE20:
14072                 case FLASH_5720VENDOR_M_ST_M45PE20:
14073                 case FLASH_5720VENDOR_A_ST_M25PE20:
14074                 case FLASH_5720VENDOR_A_ST_M45PE20:
14075                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14076                         break;
14077                 case FLASH_5720VENDOR_M_ST_M25PE40:
14078                 case FLASH_5720VENDOR_M_ST_M45PE40:
14079                 case FLASH_5720VENDOR_A_ST_M25PE40:
14080                 case FLASH_5720VENDOR_A_ST_M45PE40:
14081                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14082                         break;
14083                 case FLASH_5720VENDOR_M_ST_M25PE80:
14084                 case FLASH_5720VENDOR_M_ST_M45PE80:
14085                 case FLASH_5720VENDOR_A_ST_M25PE80:
14086                 case FLASH_5720VENDOR_A_ST_M45PE80:
14087                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14088                         break;
14089                 default:
14090                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14091                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14092                         break;
14093                 }
14094                 break;
14095         default:
14096                 tg3_flag_set(tp, NO_NVRAM);
14097                 return;
14098         }
14099
14100         tg3_nvram_get_pagesize(tp, nvcfg1);
14101         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14102                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14103
14104         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14105                 u32 val;
14106
14107                 if (tg3_nvram_read(tp, 0, &val))
14108                         return;
14109
14110                 if (val != TG3_EEPROM_MAGIC &&
14111                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14112                         tg3_flag_set(tp, NO_NVRAM);
14113         }
14114 }
14115
14116 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14117 static void tg3_nvram_init(struct tg3 *tp)
14118 {
14119         if (tg3_flag(tp, IS_SSB_CORE)) {
14120                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14121                 tg3_flag_clear(tp, NVRAM);
14122                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14123                 tg3_flag_set(tp, NO_NVRAM);
14124                 return;
14125         }
14126
14127         tw32_f(GRC_EEPROM_ADDR,
14128              (EEPROM_ADDR_FSM_RESET |
14129               (EEPROM_DEFAULT_CLOCK_PERIOD <<
14130                EEPROM_ADDR_CLKPERD_SHIFT)));
14131
14132         msleep(1);
14133
14134         /* Enable seeprom accesses. */
14135         tw32_f(GRC_LOCAL_CTRL,
14136              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14137         udelay(100);
14138
14139         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14140             tg3_asic_rev(tp) != ASIC_REV_5701) {
14141                 tg3_flag_set(tp, NVRAM);
14142
14143                 if (tg3_nvram_lock(tp)) {
14144                         netdev_warn(tp->dev,
14145                                     "Cannot get nvram lock, %s failed\n",
14146                                     __func__);
14147                         return;
14148                 }
14149                 tg3_enable_nvram_access(tp);
14150
14151                 tp->nvram_size = 0;
14152
14153                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14154                         tg3_get_5752_nvram_info(tp);
14155                 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14156                         tg3_get_5755_nvram_info(tp);
14157                 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14158                          tg3_asic_rev(tp) == ASIC_REV_5784 ||
14159                          tg3_asic_rev(tp) == ASIC_REV_5785)
14160                         tg3_get_5787_nvram_info(tp);
14161                 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14162                         tg3_get_5761_nvram_info(tp);
14163                 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14164                         tg3_get_5906_nvram_info(tp);
14165                 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14166                          tg3_flag(tp, 57765_CLASS))
14167                         tg3_get_57780_nvram_info(tp);
14168                 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14169                          tg3_asic_rev(tp) == ASIC_REV_5719)
14170                         tg3_get_5717_nvram_info(tp);
14171                 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14172                          tg3_asic_rev(tp) == ASIC_REV_5762)
14173                         tg3_get_5720_nvram_info(tp);
14174                 else
14175                         tg3_get_nvram_info(tp);
14176
14177                 if (tp->nvram_size == 0)
14178                         tg3_get_nvram_size(tp);
14179
14180                 tg3_disable_nvram_access(tp);
14181                 tg3_nvram_unlock(tp);
14182
14183         } else {
14184                 tg3_flag_clear(tp, NVRAM);
14185                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14186
14187                 tg3_get_eeprom_size(tp);
14188         }
14189 }
14190
14191 struct subsys_tbl_ent {
14192         u16 subsys_vendor, subsys_devid;
14193         u32 phy_id;
14194 };
14195
14196 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14197         /* Broadcom boards. */
14198         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14199           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14200         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14201           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14202         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14203           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14204         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14205           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14206         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14207           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14208         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14209           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14210         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14211           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14212         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14213           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14214         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14215           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14216         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14217           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14218         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14219           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14220
14221         /* 3com boards. */
14222         { TG3PCI_SUBVENDOR_ID_3COM,
14223           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14224         { TG3PCI_SUBVENDOR_ID_3COM,
14225           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14226         { TG3PCI_SUBVENDOR_ID_3COM,
14227           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14228         { TG3PCI_SUBVENDOR_ID_3COM,
14229           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14230         { TG3PCI_SUBVENDOR_ID_3COM,
14231           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14232
14233         /* DELL boards. */
14234         { TG3PCI_SUBVENDOR_ID_DELL,
14235           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14236         { TG3PCI_SUBVENDOR_ID_DELL,
14237           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14238         { TG3PCI_SUBVENDOR_ID_DELL,
14239           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14240         { TG3PCI_SUBVENDOR_ID_DELL,
14241           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14242
14243         /* Compaq boards. */
14244         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14245           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14246         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14247           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14248         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14249           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14250         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14251           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14252         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14253           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14254
14255         /* IBM boards. */
14256         { TG3PCI_SUBVENDOR_ID_IBM,
14257           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14258 };
14259
14260 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14261 {
14262         int i;
14263
14264         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14265                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14266                      tp->pdev->subsystem_vendor) &&
14267                     (subsys_id_to_phy_id[i].subsys_devid ==
14268                      tp->pdev->subsystem_device))
14269                         return &subsys_id_to_phy_id[i];
14270         }
14271         return NULL;
14272 }
14273
14274 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14275 {
14276         u32 val;
14277
14278         tp->phy_id = TG3_PHY_ID_INVALID;
14279         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14280
14281         /* Assume an onboard device and WOL capable by default.  */
14282         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14283         tg3_flag_set(tp, WOL_CAP);
14284
14285         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14286                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14287                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14288                         tg3_flag_set(tp, IS_NIC);
14289                 }
14290                 val = tr32(VCPU_CFGSHDW);
14291                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14292                         tg3_flag_set(tp, ASPM_WORKAROUND);
14293                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14294                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14295                         tg3_flag_set(tp, WOL_ENABLE);
14296                         device_set_wakeup_enable(&tp->pdev->dev, true);
14297                 }
14298                 goto done;
14299         }
14300
14301         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14302         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14303                 u32 nic_cfg, led_cfg;
14304                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14305                 int eeprom_phy_serdes = 0;
14306
14307                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14308                 tp->nic_sram_data_cfg = nic_cfg;
14309
14310                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14311                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14312                 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14313                     tg3_asic_rev(tp) != ASIC_REV_5701 &&
14314                     tg3_asic_rev(tp) != ASIC_REV_5703 &&
14315                     (ver > 0) && (ver < 0x100))
14316                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14317
14318                 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14319                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14320
14321                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14322                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14323                         eeprom_phy_serdes = 1;
14324
14325                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14326                 if (nic_phy_id != 0) {
14327                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14328                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14329
14330                         eeprom_phy_id  = (id1 >> 16) << 10;
14331                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
14332                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
14333                 } else
14334                         eeprom_phy_id = 0;
14335
14336                 tp->phy_id = eeprom_phy_id;
14337                 if (eeprom_phy_serdes) {
14338                         if (!tg3_flag(tp, 5705_PLUS))
14339                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14340                         else
14341                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14342                 }
14343
14344                 if (tg3_flag(tp, 5750_PLUS))
14345                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14346                                     SHASTA_EXT_LED_MODE_MASK);
14347                 else
14348                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14349
14350                 switch (led_cfg) {
14351                 default:
14352                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14353                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14354                         break;
14355
14356                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14357                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14358                         break;
14359
14360                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14361                         tp->led_ctrl = LED_CTRL_MODE_MAC;
14362
14363                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14364                          * read on some older 5700/5701 bootcode.
14365                          */
14366                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14367                             tg3_asic_rev(tp) == ASIC_REV_5701)
14368                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14369
14370                         break;
14371
14372                 case SHASTA_EXT_LED_SHARED:
14373                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
14374                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14375                             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14376                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14377                                                  LED_CTRL_MODE_PHY_2);
14378                         break;
14379
14380                 case SHASTA_EXT_LED_MAC:
14381                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14382                         break;
14383
14384                 case SHASTA_EXT_LED_COMBO:
14385                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
14386                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14387                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14388                                                  LED_CTRL_MODE_PHY_2);
14389                         break;
14390
14391                 }
14392
14393                 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14394                      tg3_asic_rev(tp) == ASIC_REV_5701) &&
14395                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14396                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14397
14398                 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14399                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14400
14401                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14402                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14403                         if ((tp->pdev->subsystem_vendor ==
14404                              PCI_VENDOR_ID_ARIMA) &&
14405                             (tp->pdev->subsystem_device == 0x205a ||
14406                              tp->pdev->subsystem_device == 0x2063))
14407                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14408                 } else {
14409                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14410                         tg3_flag_set(tp, IS_NIC);
14411                 }
14412
14413                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14414                         tg3_flag_set(tp, ENABLE_ASF);
14415                         if (tg3_flag(tp, 5750_PLUS))
14416                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14417                 }
14418
14419                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14420                     tg3_flag(tp, 5750_PLUS))
14421                         tg3_flag_set(tp, ENABLE_APE);
14422
14423                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14424                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14425                         tg3_flag_clear(tp, WOL_CAP);
14426
14427                 if (tg3_flag(tp, WOL_CAP) &&
14428                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14429                         tg3_flag_set(tp, WOL_ENABLE);
14430                         device_set_wakeup_enable(&tp->pdev->dev, true);
14431                 }
14432
14433                 if (cfg2 & (1 << 17))
14434                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14435
14436                 /* serdes signal pre-emphasis in register 0x590 set by */
14437                 /* bootcode if bit 18 is set */
14438                 if (cfg2 & (1 << 18))
14439                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14440
14441                 if ((tg3_flag(tp, 57765_PLUS) ||
14442                      (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14443                       tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14444                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14445                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14446
14447                 if (tg3_flag(tp, PCI_EXPRESS) &&
14448                     tg3_asic_rev(tp) != ASIC_REV_5785 &&
14449                     !tg3_flag(tp, 57765_PLUS)) {
14450                         u32 cfg3;
14451
14452                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14453                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
14454                                 tg3_flag_set(tp, ASPM_WORKAROUND);
14455                 }
14456
14457                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14458                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14459                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14460                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14461                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14462                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14463         }
14464 done:
14465         if (tg3_flag(tp, WOL_CAP))
14466                 device_set_wakeup_enable(&tp->pdev->dev,
14467                                          tg3_flag(tp, WOL_ENABLE));
14468         else
14469                 device_set_wakeup_capable(&tp->pdev->dev, false);
14470 }
14471
14472 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14473 {
14474         int i, err;
14475         u32 val2, off = offset * 8;
14476
14477         err = tg3_nvram_lock(tp);
14478         if (err)
14479                 return err;
14480
14481         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14482         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14483                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14484         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14485         udelay(10);
14486
14487         for (i = 0; i < 100; i++) {
14488                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14489                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14490                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14491                         break;
14492                 }
14493                 udelay(10);
14494         }
14495
14496         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14497
14498         tg3_nvram_unlock(tp);
14499         if (val2 & APE_OTP_STATUS_CMD_DONE)
14500                 return 0;
14501
14502         return -EBUSY;
14503 }
14504
14505 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14506 {
14507         int i;
14508         u32 val;
14509
14510         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14511         tw32(OTP_CTRL, cmd);
14512
14513         /* Wait for up to 1 ms for command to execute. */
14514         for (i = 0; i < 100; i++) {
14515                 val = tr32(OTP_STATUS);
14516                 if (val & OTP_STATUS_CMD_DONE)
14517                         break;
14518                 udelay(10);
14519         }
14520
14521         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14522 }
14523
14524 /* Read the gphy configuration from the OTP region of the chip.  The gphy
14525  * configuration is a 32-bit value that straddles the alignment boundary.
14526  * We do two 32-bit reads and then shift and merge the results.
14527  */
14528 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14529 {
14530         u32 bhalf_otp, thalf_otp;
14531
14532         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14533
14534         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14535                 return 0;
14536
14537         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14538
14539         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14540                 return 0;
14541
14542         thalf_otp = tr32(OTP_READ_DATA);
14543
14544         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14545
14546         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14547                 return 0;
14548
14549         bhalf_otp = tr32(OTP_READ_DATA);
14550
14551         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14552 }
14553
14554 static void tg3_phy_init_link_config(struct tg3 *tp)
14555 {
14556         u32 adv = ADVERTISED_Autoneg;
14557
14558         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14559                 adv |= ADVERTISED_1000baseT_Half |
14560                        ADVERTISED_1000baseT_Full;
14561
14562         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14563                 adv |= ADVERTISED_100baseT_Half |
14564                        ADVERTISED_100baseT_Full |
14565                        ADVERTISED_10baseT_Half |
14566                        ADVERTISED_10baseT_Full |
14567                        ADVERTISED_TP;
14568         else
14569                 adv |= ADVERTISED_FIBRE;
14570
14571         tp->link_config.advertising = adv;
14572         tp->link_config.speed = SPEED_UNKNOWN;
14573         tp->link_config.duplex = DUPLEX_UNKNOWN;
14574         tp->link_config.autoneg = AUTONEG_ENABLE;
14575         tp->link_config.active_speed = SPEED_UNKNOWN;
14576         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14577
14578         tp->old_link = -1;
14579 }
14580
14581 static int tg3_phy_probe(struct tg3 *tp)
14582 {
14583         u32 hw_phy_id_1, hw_phy_id_2;
14584         u32 hw_phy_id, hw_phy_id_masked;
14585         int err;
14586
14587         /* flow control autonegotiation is default behavior */
14588         tg3_flag_set(tp, PAUSE_AUTONEG);
14589         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14590
14591         if (tg3_flag(tp, ENABLE_APE)) {
14592                 switch (tp->pci_fn) {
14593                 case 0:
14594                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14595                         break;
14596                 case 1:
14597                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14598                         break;
14599                 case 2:
14600                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14601                         break;
14602                 case 3:
14603                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14604                         break;
14605                 }
14606         }
14607
14608         if (tg3_flag(tp, USE_PHYLIB))
14609                 return tg3_phy_init(tp);
14610
14611         /* Reading the PHY ID register can conflict with ASF
14612          * firmware access to the PHY hardware.
14613          */
14614         err = 0;
14615         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14616                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14617         } else {
14618                 /* Now read the physical PHY_ID from the chip and verify
14619                  * that it is sane.  If it doesn't look good, we fall back
14620                  * to either the hard-coded table based PHY_ID and failing
14621                  * that the value found in the eeprom area.
14622                  */
14623                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
14624                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
14625
14626                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
14627                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
14628                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
14629
14630                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
14631         }
14632
14633         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
14634                 tp->phy_id = hw_phy_id;
14635                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
14636                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14637                 else
14638                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
14639         } else {
14640                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
14641                         /* Do nothing, phy ID already set up in
14642                          * tg3_get_eeprom_hw_cfg().
14643                          */
14644                 } else {
14645                         struct subsys_tbl_ent *p;
14646
14647                         /* No eeprom signature?  Try the hardcoded
14648                          * subsys device table.
14649                          */
14650                         p = tg3_lookup_by_subsys(tp);
14651                         if (p) {
14652                                 tp->phy_id = p->phy_id;
14653                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
14654                                 /* For now we saw the IDs 0xbc050cd0,
14655                                  * 0xbc050f80 and 0xbc050c30 on devices
14656                                  * connected to an BCM4785 and there are
14657                                  * probably more. Just assume that the phy is
14658                                  * supported when it is connected to a SSB core
14659                                  * for now.
14660                                  */
14661                                 return -ENODEV;
14662                         }
14663
14664                         if (!tp->phy_id ||
14665                             tp->phy_id == TG3_PHY_ID_BCM8002)
14666                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14667                 }
14668         }
14669
14670         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14671             (tg3_asic_rev(tp) == ASIC_REV_5719 ||
14672              tg3_asic_rev(tp) == ASIC_REV_5720 ||
14673              tg3_asic_rev(tp) == ASIC_REV_57766 ||
14674              tg3_asic_rev(tp) == ASIC_REV_5762 ||
14675              (tg3_asic_rev(tp) == ASIC_REV_5717 &&
14676               tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
14677              (tg3_asic_rev(tp) == ASIC_REV_57765 &&
14678               tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0)))
14679                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
14680
14681         tg3_phy_init_link_config(tp);
14682
14683         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14684             !tg3_flag(tp, ENABLE_APE) &&
14685             !tg3_flag(tp, ENABLE_ASF)) {
14686                 u32 bmsr, dummy;
14687
14688                 tg3_readphy(tp, MII_BMSR, &bmsr);
14689                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
14690                     (bmsr & BMSR_LSTATUS))
14691                         goto skip_phy_reset;
14692
14693                 err = tg3_phy_reset(tp);
14694                 if (err)
14695                         return err;
14696
14697                 tg3_phy_set_wirespeed(tp);
14698
14699                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
14700                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
14701                                             tp->link_config.flowctrl);
14702
14703                         tg3_writephy(tp, MII_BMCR,
14704                                      BMCR_ANENABLE | BMCR_ANRESTART);
14705                 }
14706         }
14707
14708 skip_phy_reset:
14709         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
14710                 err = tg3_init_5401phy_dsp(tp);
14711                 if (err)
14712                         return err;
14713
14714                 err = tg3_init_5401phy_dsp(tp);
14715         }
14716
14717         return err;
14718 }
14719
14720 static void tg3_read_vpd(struct tg3 *tp)
14721 {
14722         u8 *vpd_data;
14723         unsigned int block_end, rosize, len;
14724         u32 vpdlen;
14725         int j, i = 0;
14726
14727         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
14728         if (!vpd_data)
14729                 goto out_no_vpd;
14730
14731         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
14732         if (i < 0)
14733                 goto out_not_found;
14734
14735         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
14736         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
14737         i += PCI_VPD_LRDT_TAG_SIZE;
14738
14739         if (block_end > vpdlen)
14740                 goto out_not_found;
14741
14742         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14743                                       PCI_VPD_RO_KEYWORD_MFR_ID);
14744         if (j > 0) {
14745                 len = pci_vpd_info_field_size(&vpd_data[j]);
14746
14747                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14748                 if (j + len > block_end || len != 4 ||
14749                     memcmp(&vpd_data[j], "1028", 4))
14750                         goto partno;
14751
14752                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14753                                               PCI_VPD_RO_KEYWORD_VENDOR0);
14754                 if (j < 0)
14755                         goto partno;
14756
14757                 len = pci_vpd_info_field_size(&vpd_data[j]);
14758
14759                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14760                 if (j + len > block_end)
14761                         goto partno;
14762
14763                 if (len >= sizeof(tp->fw_ver))
14764                         len = sizeof(tp->fw_ver) - 1;
14765                 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
14766                 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
14767                          &vpd_data[j]);
14768         }
14769
14770 partno:
14771         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14772                                       PCI_VPD_RO_KEYWORD_PARTNO);
14773         if (i < 0)
14774                 goto out_not_found;
14775
14776         len = pci_vpd_info_field_size(&vpd_data[i]);
14777
14778         i += PCI_VPD_INFO_FLD_HDR_SIZE;
14779         if (len > TG3_BPN_SIZE ||
14780             (len + i) > vpdlen)
14781                 goto out_not_found;
14782
14783         memcpy(tp->board_part_number, &vpd_data[i], len);
14784
14785 out_not_found:
14786         kfree(vpd_data);
14787         if (tp->board_part_number[0])
14788                 return;
14789
14790 out_no_vpd:
14791         if (tg3_asic_rev(tp) == ASIC_REV_5717) {
14792                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14793                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
14794                         strcpy(tp->board_part_number, "BCM5717");
14795                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14796                         strcpy(tp->board_part_number, "BCM5718");
14797                 else
14798                         goto nomatch;
14799         } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
14800                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14801                         strcpy(tp->board_part_number, "BCM57780");
14802                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14803                         strcpy(tp->board_part_number, "BCM57760");
14804                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14805                         strcpy(tp->board_part_number, "BCM57790");
14806                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14807                         strcpy(tp->board_part_number, "BCM57788");
14808                 else
14809                         goto nomatch;
14810         } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
14811                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14812                         strcpy(tp->board_part_number, "BCM57761");
14813                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14814                         strcpy(tp->board_part_number, "BCM57765");
14815                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14816                         strcpy(tp->board_part_number, "BCM57781");
14817                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14818                         strcpy(tp->board_part_number, "BCM57785");
14819                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14820                         strcpy(tp->board_part_number, "BCM57791");
14821                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14822                         strcpy(tp->board_part_number, "BCM57795");
14823                 else
14824                         goto nomatch;
14825         } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
14826                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14827                         strcpy(tp->board_part_number, "BCM57762");
14828                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14829                         strcpy(tp->board_part_number, "BCM57766");
14830                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14831                         strcpy(tp->board_part_number, "BCM57782");
14832                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14833                         strcpy(tp->board_part_number, "BCM57786");
14834                 else
14835                         goto nomatch;
14836         } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14837                 strcpy(tp->board_part_number, "BCM95906");
14838         } else {
14839 nomatch:
14840                 strcpy(tp->board_part_number, "none");
14841         }
14842 }
14843
14844 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14845 {
14846         u32 val;
14847
14848         if (tg3_nvram_read(tp, offset, &val) ||
14849             (val & 0xfc000000) != 0x0c000000 ||
14850             tg3_nvram_read(tp, offset + 4, &val) ||
14851             val != 0)
14852                 return 0;
14853
14854         return 1;
14855 }
14856
14857 static void tg3_read_bc_ver(struct tg3 *tp)
14858 {
14859         u32 val, offset, start, ver_offset;
14860         int i, dst_off;
14861         bool newver = false;
14862
14863         if (tg3_nvram_read(tp, 0xc, &offset) ||
14864             tg3_nvram_read(tp, 0x4, &start))
14865                 return;
14866
14867         offset = tg3_nvram_logical_addr(tp, offset);
14868
14869         if (tg3_nvram_read(tp, offset, &val))
14870                 return;
14871
14872         if ((val & 0xfc000000) == 0x0c000000) {
14873                 if (tg3_nvram_read(tp, offset + 4, &val))
14874                         return;
14875
14876                 if (val == 0)
14877                         newver = true;
14878         }
14879
14880         dst_off = strlen(tp->fw_ver);
14881
14882         if (newver) {
14883                 if (TG3_VER_SIZE - dst_off < 16 ||
14884                     tg3_nvram_read(tp, offset + 8, &ver_offset))
14885                         return;
14886
14887                 offset = offset + ver_offset - start;
14888                 for (i = 0; i < 16; i += 4) {
14889                         __be32 v;
14890                         if (tg3_nvram_read_be32(tp, offset + i, &v))
14891                                 return;
14892
14893                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
14894                 }
14895         } else {
14896                 u32 major, minor;
14897
14898                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14899                         return;
14900
14901                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14902                         TG3_NVM_BCVER_MAJSFT;
14903                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
14904                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14905                          "v%d.%02d", major, minor);
14906         }
14907 }
14908
14909 static void tg3_read_hwsb_ver(struct tg3 *tp)
14910 {
14911         u32 val, major, minor;
14912
14913         /* Use native endian representation */
14914         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14915                 return;
14916
14917         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14918                 TG3_NVM_HWSB_CFG1_MAJSFT;
14919         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14920                 TG3_NVM_HWSB_CFG1_MINSFT;
14921
14922         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14923 }
14924
14925 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
14926 {
14927         u32 offset, major, minor, build;
14928
14929         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
14930
14931         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14932                 return;
14933
14934         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14935         case TG3_EEPROM_SB_REVISION_0:
14936                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14937                 break;
14938         case TG3_EEPROM_SB_REVISION_2:
14939                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14940                 break;
14941         case TG3_EEPROM_SB_REVISION_3:
14942                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14943                 break;
14944         case TG3_EEPROM_SB_REVISION_4:
14945                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14946                 break;
14947         case TG3_EEPROM_SB_REVISION_5:
14948                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14949                 break;
14950         case TG3_EEPROM_SB_REVISION_6:
14951                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14952                 break;
14953         default:
14954                 return;
14955         }
14956
14957         if (tg3_nvram_read(tp, offset, &val))
14958                 return;
14959
14960         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14961                 TG3_EEPROM_SB_EDH_BLD_SHFT;
14962         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14963                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
14964         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
14965
14966         if (minor > 99 || build > 26)
14967                 return;
14968
14969         offset = strlen(tp->fw_ver);
14970         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14971                  " v%d.%02d", major, minor);
14972
14973         if (build > 0) {
14974                 offset = strlen(tp->fw_ver);
14975                 if (offset < TG3_VER_SIZE - 1)
14976                         tp->fw_ver[offset] = 'a' + build - 1;
14977         }
14978 }
14979
14980 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
14981 {
14982         u32 val, offset, start;
14983         int i, vlen;
14984
14985         for (offset = TG3_NVM_DIR_START;
14986              offset < TG3_NVM_DIR_END;
14987              offset += TG3_NVM_DIRENT_SIZE) {
14988                 if (tg3_nvram_read(tp, offset, &val))
14989                         return;
14990
14991                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14992                         break;
14993         }
14994
14995         if (offset == TG3_NVM_DIR_END)
14996                 return;
14997
14998         if (!tg3_flag(tp, 5705_PLUS))
14999                 start = 0x08000000;
15000         else if (tg3_nvram_read(tp, offset - 4, &start))
15001                 return;
15002
15003         if (tg3_nvram_read(tp, offset + 4, &offset) ||
15004             !tg3_fw_img_is_valid(tp, offset) ||
15005             tg3_nvram_read(tp, offset + 8, &val))
15006                 return;
15007
15008         offset += val - start;
15009
15010         vlen = strlen(tp->fw_ver);
15011
15012         tp->fw_ver[vlen++] = ',';
15013         tp->fw_ver[vlen++] = ' ';
15014
15015         for (i = 0; i < 4; i++) {
15016                 __be32 v;
15017                 if (tg3_nvram_read_be32(tp, offset, &v))
15018                         return;
15019
15020                 offset += sizeof(v);
15021
15022                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15023                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15024                         break;
15025                 }
15026
15027                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15028                 vlen += sizeof(v);
15029         }
15030 }
15031
15032 static void tg3_probe_ncsi(struct tg3 *tp)
15033 {
15034         u32 apedata;
15035
15036         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15037         if (apedata != APE_SEG_SIG_MAGIC)
15038                 return;
15039
15040         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15041         if (!(apedata & APE_FW_STATUS_READY))
15042                 return;
15043
15044         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15045                 tg3_flag_set(tp, APE_HAS_NCSI);
15046 }
15047
15048 static void tg3_read_dash_ver(struct tg3 *tp)
15049 {
15050         int vlen;
15051         u32 apedata;
15052         char *fwtype;
15053
15054         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15055
15056         if (tg3_flag(tp, APE_HAS_NCSI))
15057                 fwtype = "NCSI";
15058         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15059                 fwtype = "SMASH";
15060         else
15061                 fwtype = "DASH";
15062
15063         vlen = strlen(tp->fw_ver);
15064
15065         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15066                  fwtype,
15067                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15068                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15069                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15070                  (apedata & APE_FW_VERSION_BLDMSK));
15071 }
15072
15073 static void tg3_read_otp_ver(struct tg3 *tp)
15074 {
15075         u32 val, val2;
15076
15077         if (tg3_asic_rev(tp) != ASIC_REV_5762)
15078                 return;
15079
15080         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15081             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15082             TG3_OTP_MAGIC0_VALID(val)) {
15083                 u64 val64 = (u64) val << 32 | val2;
15084                 u32 ver = 0;
15085                 int i, vlen;
15086
15087                 for (i = 0; i < 7; i++) {
15088                         if ((val64 & 0xff) == 0)
15089                                 break;
15090                         ver = val64 & 0xff;
15091                         val64 >>= 8;
15092                 }
15093                 vlen = strlen(tp->fw_ver);
15094                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15095         }
15096 }
15097
15098 static void tg3_read_fw_ver(struct tg3 *tp)
15099 {
15100         u32 val;
15101         bool vpd_vers = false;
15102
15103         if (tp->fw_ver[0] != 0)
15104                 vpd_vers = true;
15105
15106         if (tg3_flag(tp, NO_NVRAM)) {
15107                 strcat(tp->fw_ver, "sb");
15108                 tg3_read_otp_ver(tp);
15109                 return;
15110         }
15111
15112         if (tg3_nvram_read(tp, 0, &val))
15113                 return;
15114
15115         if (val == TG3_EEPROM_MAGIC)
15116                 tg3_read_bc_ver(tp);
15117         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15118                 tg3_read_sb_ver(tp, val);
15119         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15120                 tg3_read_hwsb_ver(tp);
15121
15122         if (tg3_flag(tp, ENABLE_ASF)) {
15123                 if (tg3_flag(tp, ENABLE_APE)) {
15124                         tg3_probe_ncsi(tp);
15125                         if (!vpd_vers)
15126                                 tg3_read_dash_ver(tp);
15127                 } else if (!vpd_vers) {
15128                         tg3_read_mgmtfw_ver(tp);
15129                 }
15130         }
15131
15132         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15133 }
15134
15135 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15136 {
15137         if (tg3_flag(tp, LRG_PROD_RING_CAP))
15138                 return TG3_RX_RET_MAX_SIZE_5717;
15139         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15140                 return TG3_RX_RET_MAX_SIZE_5700;
15141         else
15142                 return TG3_RX_RET_MAX_SIZE_5705;
15143 }
15144
15145 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15146         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15147         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15148         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15149         { },
15150 };
15151
15152 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15153 {
15154         struct pci_dev *peer;
15155         unsigned int func, devnr = tp->pdev->devfn & ~7;
15156
15157         for (func = 0; func < 8; func++) {
15158                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15159                 if (peer && peer != tp->pdev)
15160                         break;
15161                 pci_dev_put(peer);
15162         }
15163         /* 5704 can be configured in single-port mode, set peer to
15164          * tp->pdev in that case.
15165          */
15166         if (!peer) {
15167                 peer = tp->pdev;
15168                 return peer;
15169         }
15170
15171         /*
15172          * We don't need to keep the refcount elevated; there's no way
15173          * to remove one half of this device without removing the other
15174          */
15175         pci_dev_put(peer);
15176
15177         return peer;
15178 }
15179
15180 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15181 {
15182         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15183         if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15184                 u32 reg;
15185
15186                 /* All devices that use the alternate
15187                  * ASIC REV location have a CPMU.
15188                  */
15189                 tg3_flag_set(tp, CPMU_PRESENT);
15190
15191                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15192                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15193                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15194                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15195                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15196                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15197                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15198                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15199                         reg = TG3PCI_GEN2_PRODID_ASICREV;
15200                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15201                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15202                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15203                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15204                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15205                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15206                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15207                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15208                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15209                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15210                         reg = TG3PCI_GEN15_PRODID_ASICREV;
15211                 else
15212                         reg = TG3PCI_PRODID_ASICREV;
15213
15214                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15215         }
15216
15217         /* Wrong chip ID in 5752 A0. This code can be removed later
15218          * as A0 is not in production.
15219          */
15220         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15221                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15222
15223         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15224                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15225
15226         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15227             tg3_asic_rev(tp) == ASIC_REV_5719 ||
15228             tg3_asic_rev(tp) == ASIC_REV_5720)
15229                 tg3_flag_set(tp, 5717_PLUS);
15230
15231         if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15232             tg3_asic_rev(tp) == ASIC_REV_57766)
15233                 tg3_flag_set(tp, 57765_CLASS);
15234
15235         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15236              tg3_asic_rev(tp) == ASIC_REV_5762)
15237                 tg3_flag_set(tp, 57765_PLUS);
15238
15239         /* Intentionally exclude ASIC_REV_5906 */
15240         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15241             tg3_asic_rev(tp) == ASIC_REV_5787 ||
15242             tg3_asic_rev(tp) == ASIC_REV_5784 ||
15243             tg3_asic_rev(tp) == ASIC_REV_5761 ||
15244             tg3_asic_rev(tp) == ASIC_REV_5785 ||
15245             tg3_asic_rev(tp) == ASIC_REV_57780 ||
15246             tg3_flag(tp, 57765_PLUS))
15247                 tg3_flag_set(tp, 5755_PLUS);
15248
15249         if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15250             tg3_asic_rev(tp) == ASIC_REV_5714)
15251                 tg3_flag_set(tp, 5780_CLASS);
15252
15253         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15254             tg3_asic_rev(tp) == ASIC_REV_5752 ||
15255             tg3_asic_rev(tp) == ASIC_REV_5906 ||
15256             tg3_flag(tp, 5755_PLUS) ||
15257             tg3_flag(tp, 5780_CLASS))
15258                 tg3_flag_set(tp, 5750_PLUS);
15259
15260         if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15261             tg3_flag(tp, 5750_PLUS))
15262                 tg3_flag_set(tp, 5705_PLUS);
15263 }
15264
15265 static bool tg3_10_100_only_device(struct tg3 *tp,
15266                                    const struct pci_device_id *ent)
15267 {
15268         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15269
15270         if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15271              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15272             (tp->phy_flags & TG3_PHYFLG_IS_FET))
15273                 return true;
15274
15275         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15276                 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15277                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15278                                 return true;
15279                 } else {
15280                         return true;
15281                 }
15282         }
15283
15284         return false;
15285 }
15286
15287 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15288 {
15289         u32 misc_ctrl_reg;
15290         u32 pci_state_reg, grc_misc_cfg;
15291         u32 val;
15292         u16 pci_cmd;
15293         int err;
15294
15295         /* Force memory write invalidate off.  If we leave it on,
15296          * then on 5700_BX chips we have to enable a workaround.
15297          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15298          * to match the cacheline size.  The Broadcom driver have this
15299          * workaround but turns MWI off all the times so never uses
15300          * it.  This seems to suggest that the workaround is insufficient.
15301          */
15302         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15303         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15304         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15305
15306         /* Important! -- Make sure register accesses are byteswapped
15307          * correctly.  Also, for those chips that require it, make
15308          * sure that indirect register accesses are enabled before
15309          * the first operation.
15310          */
15311         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15312                               &misc_ctrl_reg);
15313         tp->misc_host_ctrl |= (misc_ctrl_reg &
15314                                MISC_HOST_CTRL_CHIPREV);
15315         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15316                                tp->misc_host_ctrl);
15317
15318         tg3_detect_asic_rev(tp, misc_ctrl_reg);
15319
15320         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15321          * we need to disable memory and use config. cycles
15322          * only to access all registers. The 5702/03 chips
15323          * can mistakenly decode the special cycles from the
15324          * ICH chipsets as memory write cycles, causing corruption
15325          * of register and memory space. Only certain ICH bridges
15326          * will drive special cycles with non-zero data during the
15327          * address phase which can fall within the 5703's address
15328          * range. This is not an ICH bug as the PCI spec allows
15329          * non-zero address during special cycles. However, only
15330          * these ICH bridges are known to drive non-zero addresses
15331          * during special cycles.
15332          *
15333          * Since special cycles do not cross PCI bridges, we only
15334          * enable this workaround if the 5703 is on the secondary
15335          * bus of these ICH bridges.
15336          */
15337         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15338             (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15339                 static struct tg3_dev_id {
15340                         u32     vendor;
15341                         u32     device;
15342                         u32     rev;
15343                 } ich_chipsets[] = {
15344                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15345                           PCI_ANY_ID },
15346                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15347                           PCI_ANY_ID },
15348                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15349                           0xa },
15350                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15351                           PCI_ANY_ID },
15352                         { },
15353                 };
15354                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15355                 struct pci_dev *bridge = NULL;
15356
15357                 while (pci_id->vendor != 0) {
15358                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
15359                                                 bridge);
15360                         if (!bridge) {
15361                                 pci_id++;
15362                                 continue;
15363                         }
15364                         if (pci_id->rev != PCI_ANY_ID) {
15365                                 if (bridge->revision > pci_id->rev)
15366                                         continue;
15367                         }
15368                         if (bridge->subordinate &&
15369                             (bridge->subordinate->number ==
15370                              tp->pdev->bus->number)) {
15371                                 tg3_flag_set(tp, ICH_WORKAROUND);
15372                                 pci_dev_put(bridge);
15373                                 break;
15374                         }
15375                 }
15376         }
15377
15378         if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15379                 static struct tg3_dev_id {
15380                         u32     vendor;
15381                         u32     device;
15382                 } bridge_chipsets[] = {
15383                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15384                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15385                         { },
15386                 };
15387                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15388                 struct pci_dev *bridge = NULL;
15389
15390                 while (pci_id->vendor != 0) {
15391                         bridge = pci_get_device(pci_id->vendor,
15392                                                 pci_id->device,
15393                                                 bridge);
15394                         if (!bridge) {
15395                                 pci_id++;
15396                                 continue;
15397                         }
15398                         if (bridge->subordinate &&
15399                             (bridge->subordinate->number <=
15400                              tp->pdev->bus->number) &&
15401                             (bridge->subordinate->busn_res.end >=
15402                              tp->pdev->bus->number)) {
15403                                 tg3_flag_set(tp, 5701_DMA_BUG);
15404                                 pci_dev_put(bridge);
15405                                 break;
15406                         }
15407                 }
15408         }
15409
15410         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15411          * DMA addresses > 40-bit. This bridge may have other additional
15412          * 57xx devices behind it in some 4-port NIC designs for example.
15413          * Any tg3 device found behind the bridge will also need the 40-bit
15414          * DMA workaround.
15415          */
15416         if (tg3_flag(tp, 5780_CLASS)) {
15417                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15418                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15419         } else {
15420                 struct pci_dev *bridge = NULL;
15421
15422                 do {
15423                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15424                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
15425                                                 bridge);
15426                         if (bridge && bridge->subordinate &&
15427                             (bridge->subordinate->number <=
15428                              tp->pdev->bus->number) &&
15429                             (bridge->subordinate->busn_res.end >=
15430                              tp->pdev->bus->number)) {
15431                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15432                                 pci_dev_put(bridge);
15433                                 break;
15434                         }
15435                 } while (bridge);
15436         }
15437
15438         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15439             tg3_asic_rev(tp) == ASIC_REV_5714)
15440                 tp->pdev_peer = tg3_find_peer(tp);
15441
15442         /* Determine TSO capabilities */
15443         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
15444                 ; /* Do nothing. HW bug. */
15445         else if (tg3_flag(tp, 57765_PLUS))
15446                 tg3_flag_set(tp, HW_TSO_3);
15447         else if (tg3_flag(tp, 5755_PLUS) ||
15448                  tg3_asic_rev(tp) == ASIC_REV_5906)
15449                 tg3_flag_set(tp, HW_TSO_2);
15450         else if (tg3_flag(tp, 5750_PLUS)) {
15451                 tg3_flag_set(tp, HW_TSO_1);
15452                 tg3_flag_set(tp, TSO_BUG);
15453                 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
15454                     tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
15455                         tg3_flag_clear(tp, TSO_BUG);
15456         } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15457                    tg3_asic_rev(tp) != ASIC_REV_5701 &&
15458                    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
15459                 tg3_flag_set(tp, FW_TSO);
15460                 tg3_flag_set(tp, TSO_BUG);
15461                 if (tg3_asic_rev(tp) == ASIC_REV_5705)
15462                         tp->fw_needed = FIRMWARE_TG3TSO5;
15463                 else
15464                         tp->fw_needed = FIRMWARE_TG3TSO;
15465         }
15466
15467         /* Selectively allow TSO based on operating conditions */
15468         if (tg3_flag(tp, HW_TSO_1) ||
15469             tg3_flag(tp, HW_TSO_2) ||
15470             tg3_flag(tp, HW_TSO_3) ||
15471             tg3_flag(tp, FW_TSO)) {
15472                 /* For firmware TSO, assume ASF is disabled.
15473                  * We'll disable TSO later if we discover ASF
15474                  * is enabled in tg3_get_eeprom_hw_cfg().
15475                  */
15476                 tg3_flag_set(tp, TSO_CAPABLE);
15477         } else {
15478                 tg3_flag_clear(tp, TSO_CAPABLE);
15479                 tg3_flag_clear(tp, TSO_BUG);
15480                 tp->fw_needed = NULL;
15481         }
15482
15483         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
15484                 tp->fw_needed = FIRMWARE_TG3;
15485
15486         if (tg3_asic_rev(tp) == ASIC_REV_57766)
15487                 tp->fw_needed = FIRMWARE_TG357766;
15488
15489         tp->irq_max = 1;
15490
15491         if (tg3_flag(tp, 5750_PLUS)) {
15492                 tg3_flag_set(tp, SUPPORT_MSI);
15493                 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
15494                     tg3_chip_rev(tp) == CHIPREV_5750_BX ||
15495                     (tg3_asic_rev(tp) == ASIC_REV_5714 &&
15496                      tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
15497                      tp->pdev_peer == tp->pdev))
15498                         tg3_flag_clear(tp, SUPPORT_MSI);
15499
15500                 if (tg3_flag(tp, 5755_PLUS) ||
15501                     tg3_asic_rev(tp) == ASIC_REV_5906) {
15502                         tg3_flag_set(tp, 1SHOT_MSI);
15503                 }
15504
15505                 if (tg3_flag(tp, 57765_PLUS)) {
15506                         tg3_flag_set(tp, SUPPORT_MSIX);
15507                         tp->irq_max = TG3_IRQ_MAX_VECS;
15508                 }
15509         }
15510
15511         tp->txq_max = 1;
15512         tp->rxq_max = 1;
15513         if (tp->irq_max > 1) {
15514                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15515                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15516
15517                 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15518                     tg3_asic_rev(tp) == ASIC_REV_5720)
15519                         tp->txq_max = tp->irq_max - 1;
15520         }
15521
15522         if (tg3_flag(tp, 5755_PLUS) ||
15523             tg3_asic_rev(tp) == ASIC_REV_5906)
15524                 tg3_flag_set(tp, SHORT_DMA_BUG);
15525
15526         if (tg3_asic_rev(tp) == ASIC_REV_5719)
15527                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15528
15529         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15530             tg3_asic_rev(tp) == ASIC_REV_5719 ||
15531             tg3_asic_rev(tp) == ASIC_REV_5720 ||
15532             tg3_asic_rev(tp) == ASIC_REV_5762)
15533                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15534
15535         if (tg3_flag(tp, 57765_PLUS) &&
15536             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
15537                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15538
15539         if (!tg3_flag(tp, 5705_PLUS) ||
15540             tg3_flag(tp, 5780_CLASS) ||
15541             tg3_flag(tp, USE_JUMBO_BDFLAG))
15542                 tg3_flag_set(tp, JUMBO_CAPABLE);
15543
15544         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15545                               &pci_state_reg);
15546
15547         if (pci_is_pcie(tp->pdev)) {
15548                 u16 lnkctl;
15549
15550                 tg3_flag_set(tp, PCI_EXPRESS);
15551
15552                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15553                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15554                         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15555                                 tg3_flag_clear(tp, HW_TSO_2);
15556                                 tg3_flag_clear(tp, TSO_CAPABLE);
15557                         }
15558                         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
15559                             tg3_asic_rev(tp) == ASIC_REV_5761 ||
15560                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
15561                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
15562                                 tg3_flag_set(tp, CLKREQ_BUG);
15563                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
15564                         tg3_flag_set(tp, L1PLLPD_EN);
15565                 }
15566         } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
15567                 /* BCM5785 devices are effectively PCIe devices, and should
15568                  * follow PCIe codepaths, but do not have a PCIe capabilities
15569                  * section.
15570                  */
15571                 tg3_flag_set(tp, PCI_EXPRESS);
15572         } else if (!tg3_flag(tp, 5705_PLUS) ||
15573                    tg3_flag(tp, 5780_CLASS)) {
15574                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15575                 if (!tp->pcix_cap) {
15576                         dev_err(&tp->pdev->dev,
15577                                 "Cannot find PCI-X capability, aborting\n");
15578                         return -EIO;
15579                 }
15580
15581                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15582                         tg3_flag_set(tp, PCIX_MODE);
15583         }
15584
15585         /* If we have an AMD 762 or VIA K8T800 chipset, write
15586          * reordering to the mailbox registers done by the host
15587          * controller can cause major troubles.  We read back from
15588          * every mailbox register write to force the writes to be
15589          * posted to the chip in order.
15590          */
15591         if (pci_dev_present(tg3_write_reorder_chipsets) &&
15592             !tg3_flag(tp, PCI_EXPRESS))
15593                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
15594
15595         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15596                              &tp->pci_cacheline_sz);
15597         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15598                              &tp->pci_lat_timer);
15599         if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
15600             tp->pci_lat_timer < 64) {
15601                 tp->pci_lat_timer = 64;
15602                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15603                                       tp->pci_lat_timer);
15604         }
15605
15606         /* Important! -- It is critical that the PCI-X hw workaround
15607          * situation is decided before the first MMIO register access.
15608          */
15609         if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
15610                 /* 5700 BX chips need to have their TX producer index
15611                  * mailboxes written twice to workaround a bug.
15612                  */
15613                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
15614
15615                 /* If we are in PCI-X mode, enable register write workaround.
15616                  *
15617                  * The workaround is to use indirect register accesses
15618                  * for all chip writes not to mailbox registers.
15619                  */
15620                 if (tg3_flag(tp, PCIX_MODE)) {
15621                         u32 pm_reg;
15622
15623                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15624
15625                         /* The chip can have it's power management PCI config
15626                          * space registers clobbered due to this bug.
15627                          * So explicitly force the chip into D0 here.
15628                          */
15629                         pci_read_config_dword(tp->pdev,
15630                                               tp->pm_cap + PCI_PM_CTRL,
15631                                               &pm_reg);
15632                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
15633                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
15634                         pci_write_config_dword(tp->pdev,
15635                                                tp->pm_cap + PCI_PM_CTRL,
15636                                                pm_reg);
15637
15638                         /* Also, force SERR#/PERR# in PCI command. */
15639                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15640                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
15641                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15642                 }
15643         }
15644
15645         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
15646                 tg3_flag_set(tp, PCI_HIGH_SPEED);
15647         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
15648                 tg3_flag_set(tp, PCI_32BIT);
15649
15650         /* Chip-specific fixup from Broadcom driver */
15651         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
15652             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
15653                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
15654                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
15655         }
15656
15657         /* Default fast path register access methods */
15658         tp->read32 = tg3_read32;
15659         tp->write32 = tg3_write32;
15660         tp->read32_mbox = tg3_read32;
15661         tp->write32_mbox = tg3_write32;
15662         tp->write32_tx_mbox = tg3_write32;
15663         tp->write32_rx_mbox = tg3_write32;
15664
15665         /* Various workaround register access methods */
15666         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
15667                 tp->write32 = tg3_write_indirect_reg32;
15668         else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
15669                  (tg3_flag(tp, PCI_EXPRESS) &&
15670                   tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
15671                 /*
15672                  * Back to back register writes can cause problems on these
15673                  * chips, the workaround is to read back all reg writes
15674                  * except those to mailbox regs.
15675                  *
15676                  * See tg3_write_indirect_reg32().
15677                  */
15678                 tp->write32 = tg3_write_flush_reg32;
15679         }
15680
15681         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
15682                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
15683                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
15684                         tp->write32_rx_mbox = tg3_write_flush_reg32;
15685         }
15686
15687         if (tg3_flag(tp, ICH_WORKAROUND)) {
15688                 tp->read32 = tg3_read_indirect_reg32;
15689                 tp->write32 = tg3_write_indirect_reg32;
15690                 tp->read32_mbox = tg3_read_indirect_mbox;
15691                 tp->write32_mbox = tg3_write_indirect_mbox;
15692                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
15693                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
15694
15695                 iounmap(tp->regs);
15696                 tp->regs = NULL;
15697
15698                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15699                 pci_cmd &= ~PCI_COMMAND_MEMORY;
15700                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15701         }
15702         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15703                 tp->read32_mbox = tg3_read32_mbox_5906;
15704                 tp->write32_mbox = tg3_write32_mbox_5906;
15705                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
15706                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
15707         }
15708
15709         if (tp->write32 == tg3_write_indirect_reg32 ||
15710             (tg3_flag(tp, PCIX_MODE) &&
15711              (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15712               tg3_asic_rev(tp) == ASIC_REV_5701)))
15713                 tg3_flag_set(tp, SRAM_USE_CONFIG);
15714
15715         /* The memory arbiter has to be enabled in order for SRAM accesses
15716          * to succeed.  Normally on powerup the tg3 chip firmware will make
15717          * sure it is enabled, but other entities such as system netboot
15718          * code might disable it.
15719          */
15720         val = tr32(MEMARB_MODE);
15721         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
15722
15723         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
15724         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15725             tg3_flag(tp, 5780_CLASS)) {
15726                 if (tg3_flag(tp, PCIX_MODE)) {
15727                         pci_read_config_dword(tp->pdev,
15728                                               tp->pcix_cap + PCI_X_STATUS,
15729                                               &val);
15730                         tp->pci_fn = val & 0x7;
15731                 }
15732         } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15733                    tg3_asic_rev(tp) == ASIC_REV_5719 ||
15734                    tg3_asic_rev(tp) == ASIC_REV_5720) {
15735                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
15736                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
15737                         val = tr32(TG3_CPMU_STATUS);
15738
15739                 if (tg3_asic_rev(tp) == ASIC_REV_5717)
15740                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
15741                 else
15742                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
15743                                      TG3_CPMU_STATUS_FSHFT_5719;
15744         }
15745
15746         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
15747                 tp->write32_tx_mbox = tg3_write_flush_reg32;
15748                 tp->write32_rx_mbox = tg3_write_flush_reg32;
15749         }
15750
15751         /* Get eeprom hw config before calling tg3_set_power_state().
15752          * In particular, the TG3_FLAG_IS_NIC flag must be
15753          * determined before calling tg3_set_power_state() so that
15754          * we know whether or not to switch out of Vaux power.
15755          * When the flag is set, it means that GPIO1 is used for eeprom
15756          * write protect and also implies that it is a LOM where GPIOs
15757          * are not used to switch power.
15758          */
15759         tg3_get_eeprom_hw_cfg(tp);
15760
15761         if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
15762                 tg3_flag_clear(tp, TSO_CAPABLE);
15763                 tg3_flag_clear(tp, TSO_BUG);
15764                 tp->fw_needed = NULL;
15765         }
15766
15767         if (tg3_flag(tp, ENABLE_APE)) {
15768                 /* Allow reads and writes to the
15769                  * APE register and memory space.
15770                  */
15771                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
15772                                  PCISTATE_ALLOW_APE_SHMEM_WR |
15773                                  PCISTATE_ALLOW_APE_PSPACE_WR;
15774                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
15775                                        pci_state_reg);
15776
15777                 tg3_ape_lock_init(tp);
15778         }
15779
15780         /* Set up tp->grc_local_ctrl before calling
15781          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
15782          * will bring 5700's external PHY out of reset.
15783          * It is also used as eeprom write protect on LOMs.
15784          */
15785         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
15786         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15787             tg3_flag(tp, EEPROM_WRITE_PROT))
15788                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
15789                                        GRC_LCLCTRL_GPIO_OUTPUT1);
15790         /* Unused GPIO3 must be driven as output on 5752 because there
15791          * are no pull-up resistors on unused GPIO pins.
15792          */
15793         else if (tg3_asic_rev(tp) == ASIC_REV_5752)
15794                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
15795
15796         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15797             tg3_asic_rev(tp) == ASIC_REV_57780 ||
15798             tg3_flag(tp, 57765_CLASS))
15799                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15800
15801         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15802             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
15803                 /* Turn off the debug UART. */
15804                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15805                 if (tg3_flag(tp, IS_NIC))
15806                         /* Keep VMain power. */
15807                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
15808                                               GRC_LCLCTRL_GPIO_OUTPUT0;
15809         }
15810
15811         if (tg3_asic_rev(tp) == ASIC_REV_5762)
15812                 tp->grc_local_ctrl |=
15813                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
15814
15815         /* Switch out of Vaux if it is a NIC */
15816         tg3_pwrsrc_switch_to_vmain(tp);
15817
15818         /* Derive initial jumbo mode from MTU assigned in
15819          * ether_setup() via the alloc_etherdev() call
15820          */
15821         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
15822                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
15823
15824         /* Determine WakeOnLan speed to use. */
15825         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15826             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
15827             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
15828             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
15829                 tg3_flag_clear(tp, WOL_SPEED_100MB);
15830         } else {
15831                 tg3_flag_set(tp, WOL_SPEED_100MB);
15832         }
15833
15834         if (tg3_asic_rev(tp) == ASIC_REV_5906)
15835                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
15836
15837         /* A few boards don't want Ethernet@WireSpeed phy feature */
15838         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15839             (tg3_asic_rev(tp) == ASIC_REV_5705 &&
15840              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
15841              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
15842             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15843             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15844                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
15845
15846         if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
15847             tg3_chip_rev(tp) == CHIPREV_5704_AX)
15848                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
15849         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
15850                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
15851
15852         if (tg3_flag(tp, 5705_PLUS) &&
15853             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
15854             tg3_asic_rev(tp) != ASIC_REV_5785 &&
15855             tg3_asic_rev(tp) != ASIC_REV_57780 &&
15856             !tg3_flag(tp, 57765_PLUS)) {
15857                 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15858                     tg3_asic_rev(tp) == ASIC_REV_5787 ||
15859                     tg3_asic_rev(tp) == ASIC_REV_5784 ||
15860                     tg3_asic_rev(tp) == ASIC_REV_5761) {
15861                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15862                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
15863                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
15864                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
15865                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
15866                 } else
15867                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
15868         }
15869
15870         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15871             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
15872                 tp->phy_otp = tg3_read_otp_phycfg(tp);
15873                 if (tp->phy_otp == 0)
15874                         tp->phy_otp = TG3_OTP_DEFAULT;
15875         }
15876
15877         if (tg3_flag(tp, CPMU_PRESENT))
15878                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15879         else
15880                 tp->mi_mode = MAC_MI_MODE_BASE;
15881
15882         tp->coalesce_mode = 0;
15883         if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
15884             tg3_chip_rev(tp) != CHIPREV_5700_BX)
15885                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15886
15887         /* Set these bits to enable statistics workaround. */
15888         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15889             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
15890             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
15891                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15892                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15893         }
15894
15895         if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
15896             tg3_asic_rev(tp) == ASIC_REV_57780)
15897                 tg3_flag_set(tp, USE_PHYLIB);
15898
15899         err = tg3_mdio_init(tp);
15900         if (err)
15901                 return err;
15902
15903         /* Initialize data/descriptor byte/word swapping. */
15904         val = tr32(GRC_MODE);
15905         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15906             tg3_asic_rev(tp) == ASIC_REV_5762)
15907                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15908                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
15909                         GRC_MODE_B2HRX_ENABLE |
15910                         GRC_MODE_HTX2B_ENABLE |
15911                         GRC_MODE_HOST_STACKUP);
15912         else
15913                 val &= GRC_MODE_HOST_STACKUP;
15914
15915         tw32(GRC_MODE, val | tp->grc_mode);
15916
15917         tg3_switch_clocks(tp);
15918
15919         /* Clear this out for sanity. */
15920         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15921
15922         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15923                               &pci_state_reg);
15924         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
15925             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
15926                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
15927                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
15928                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
15929                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
15930                         void __iomem *sram_base;
15931
15932                         /* Write some dummy words into the SRAM status block
15933                          * area, see if it reads back correctly.  If the return
15934                          * value is bad, force enable the PCIX workaround.
15935                          */
15936                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15937
15938                         writel(0x00000000, sram_base);
15939                         writel(0x00000000, sram_base + 4);
15940                         writel(0xffffffff, sram_base + 4);
15941                         if (readl(sram_base) != 0x00000000)
15942                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15943                 }
15944         }
15945
15946         udelay(50);
15947         tg3_nvram_init(tp);
15948
15949         /* If the device has an NVRAM, no need to load patch firmware */
15950         if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
15951             !tg3_flag(tp, NO_NVRAM))
15952                 tp->fw_needed = NULL;
15953
15954         grc_misc_cfg = tr32(GRC_MISC_CFG);
15955         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15956
15957         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
15958             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
15959              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
15960                 tg3_flag_set(tp, IS_5788);
15961
15962         if (!tg3_flag(tp, IS_5788) &&
15963             tg3_asic_rev(tp) != ASIC_REV_5700)
15964                 tg3_flag_set(tp, TAGGED_STATUS);
15965         if (tg3_flag(tp, TAGGED_STATUS)) {
15966                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
15967                                       HOSTCC_MODE_CLRTICK_TXBD);
15968
15969                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
15970                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15971                                        tp->misc_host_ctrl);
15972         }
15973
15974         /* Preserve the APE MAC_MODE bits */
15975         if (tg3_flag(tp, ENABLE_APE))
15976                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
15977         else
15978                 tp->mac_mode = 0;
15979
15980         if (tg3_10_100_only_device(tp, ent))
15981                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
15982
15983         err = tg3_phy_probe(tp);
15984         if (err) {
15985                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
15986                 /* ... but do not return immediately ... */
15987                 tg3_mdio_fini(tp);
15988         }
15989
15990         tg3_read_vpd(tp);
15991         tg3_read_fw_ver(tp);
15992
15993         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
15994                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15995         } else {
15996                 if (tg3_asic_rev(tp) == ASIC_REV_5700)
15997                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15998                 else
15999                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16000         }
16001
16002         /* 5700 {AX,BX} chips have a broken status block link
16003          * change bit implementation, so we must use the
16004          * status register in those cases.
16005          */
16006         if (tg3_asic_rev(tp) == ASIC_REV_5700)
16007                 tg3_flag_set(tp, USE_LINKCHG_REG);
16008         else
16009                 tg3_flag_clear(tp, USE_LINKCHG_REG);
16010
16011         /* The led_ctrl is set during tg3_phy_probe, here we might
16012          * have to force the link status polling mechanism based
16013          * upon subsystem IDs.
16014          */
16015         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16016             tg3_asic_rev(tp) == ASIC_REV_5701 &&
16017             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16018                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16019                 tg3_flag_set(tp, USE_LINKCHG_REG);
16020         }
16021
16022         /* For all SERDES we poll the MAC status register. */
16023         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16024                 tg3_flag_set(tp, POLL_SERDES);
16025         else
16026                 tg3_flag_clear(tp, POLL_SERDES);
16027
16028         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16029         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16030         if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16031             tg3_flag(tp, PCIX_MODE)) {
16032                 tp->rx_offset = NET_SKB_PAD;
16033 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16034                 tp->rx_copy_thresh = ~(u16)0;
16035 #endif
16036         }
16037
16038         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16039         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16040         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16041
16042         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16043
16044         /* Increment the rx prod index on the rx std ring by at most
16045          * 8 for these chips to workaround hw errata.
16046          */
16047         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16048             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16049             tg3_asic_rev(tp) == ASIC_REV_5755)
16050                 tp->rx_std_max_post = 8;
16051
16052         if (tg3_flag(tp, ASPM_WORKAROUND))
16053                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16054                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
16055
16056         return err;
16057 }
16058
16059 #ifdef CONFIG_SPARC
16060 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16061 {
16062         struct net_device *dev = tp->dev;
16063         struct pci_dev *pdev = tp->pdev;
16064         struct device_node *dp = pci_device_to_OF_node(pdev);
16065         const unsigned char *addr;
16066         int len;
16067
16068         addr = of_get_property(dp, "local-mac-address", &len);
16069         if (addr && len == 6) {
16070                 memcpy(dev->dev_addr, addr, 6);
16071                 return 0;
16072         }
16073         return -ENODEV;
16074 }
16075
16076 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16077 {
16078         struct net_device *dev = tp->dev;
16079
16080         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
16081         return 0;
16082 }
16083 #endif
16084
16085 static int tg3_get_device_address(struct tg3 *tp)
16086 {
16087         struct net_device *dev = tp->dev;
16088         u32 hi, lo, mac_offset;
16089         int addr_ok = 0;
16090         int err;
16091
16092 #ifdef CONFIG_SPARC
16093         if (!tg3_get_macaddr_sparc(tp))
16094                 return 0;
16095 #endif
16096
16097         if (tg3_flag(tp, IS_SSB_CORE)) {
16098                 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16099                 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16100                         return 0;
16101         }
16102
16103         mac_offset = 0x7c;
16104         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16105             tg3_flag(tp, 5780_CLASS)) {
16106                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16107                         mac_offset = 0xcc;
16108                 if (tg3_nvram_lock(tp))
16109                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16110                 else
16111                         tg3_nvram_unlock(tp);
16112         } else if (tg3_flag(tp, 5717_PLUS)) {
16113                 if (tp->pci_fn & 1)
16114                         mac_offset = 0xcc;
16115                 if (tp->pci_fn > 1)
16116                         mac_offset += 0x18c;
16117         } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16118                 mac_offset = 0x10;
16119
16120         /* First try to get it from MAC address mailbox. */
16121         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16122         if ((hi >> 16) == 0x484b) {
16123                 dev->dev_addr[0] = (hi >>  8) & 0xff;
16124                 dev->dev_addr[1] = (hi >>  0) & 0xff;
16125
16126                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16127                 dev->dev_addr[2] = (lo >> 24) & 0xff;
16128                 dev->dev_addr[3] = (lo >> 16) & 0xff;
16129                 dev->dev_addr[4] = (lo >>  8) & 0xff;
16130                 dev->dev_addr[5] = (lo >>  0) & 0xff;
16131
16132                 /* Some old bootcode may report a 0 MAC address in SRAM */
16133                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16134         }
16135         if (!addr_ok) {
16136                 /* Next, try NVRAM. */
16137                 if (!tg3_flag(tp, NO_NVRAM) &&
16138                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16139                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16140                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16141                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16142                 }
16143                 /* Finally just fetch it out of the MAC control regs. */
16144                 else {
16145                         hi = tr32(MAC_ADDR_0_HIGH);
16146                         lo = tr32(MAC_ADDR_0_LOW);
16147
16148                         dev->dev_addr[5] = lo & 0xff;
16149                         dev->dev_addr[4] = (lo >> 8) & 0xff;
16150                         dev->dev_addr[3] = (lo >> 16) & 0xff;
16151                         dev->dev_addr[2] = (lo >> 24) & 0xff;
16152                         dev->dev_addr[1] = hi & 0xff;
16153                         dev->dev_addr[0] = (hi >> 8) & 0xff;
16154                 }
16155         }
16156
16157         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16158 #ifdef CONFIG_SPARC
16159                 if (!tg3_get_default_macaddr_sparc(tp))
16160                         return 0;
16161 #endif
16162                 return -EINVAL;
16163         }
16164         return 0;
16165 }
16166
16167 #define BOUNDARY_SINGLE_CACHELINE       1
16168 #define BOUNDARY_MULTI_CACHELINE        2
16169
16170 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16171 {
16172         int cacheline_size;
16173         u8 byte;
16174         int goal;
16175
16176         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16177         if (byte == 0)
16178                 cacheline_size = 1024;
16179         else
16180                 cacheline_size = (int) byte * 4;
16181
16182         /* On 5703 and later chips, the boundary bits have no
16183          * effect.
16184          */
16185         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16186             tg3_asic_rev(tp) != ASIC_REV_5701 &&
16187             !tg3_flag(tp, PCI_EXPRESS))
16188                 goto out;
16189
16190 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16191         goal = BOUNDARY_MULTI_CACHELINE;
16192 #else
16193 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16194         goal = BOUNDARY_SINGLE_CACHELINE;
16195 #else
16196         goal = 0;
16197 #endif
16198 #endif
16199
16200         if (tg3_flag(tp, 57765_PLUS)) {
16201                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16202                 goto out;
16203         }
16204
16205         if (!goal)
16206                 goto out;
16207
16208         /* PCI controllers on most RISC systems tend to disconnect
16209          * when a device tries to burst across a cache-line boundary.
16210          * Therefore, letting tg3 do so just wastes PCI bandwidth.
16211          *
16212          * Unfortunately, for PCI-E there are only limited
16213          * write-side controls for this, and thus for reads
16214          * we will still get the disconnects.  We'll also waste
16215          * these PCI cycles for both read and write for chips
16216          * other than 5700 and 5701 which do not implement the
16217          * boundary bits.
16218          */
16219         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16220                 switch (cacheline_size) {
16221                 case 16:
16222                 case 32:
16223                 case 64:
16224                 case 128:
16225                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16226                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16227                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16228                         } else {
16229                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16230                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16231                         }
16232                         break;
16233
16234                 case 256:
16235                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16236                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16237                         break;
16238
16239                 default:
16240                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16241                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16242                         break;
16243                 }
16244         } else if (tg3_flag(tp, PCI_EXPRESS)) {
16245                 switch (cacheline_size) {
16246                 case 16:
16247                 case 32:
16248                 case 64:
16249                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16250                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16251                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16252                                 break;
16253                         }
16254                         /* fallthrough */
16255                 case 128:
16256                 default:
16257                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16258                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16259                         break;
16260                 }
16261         } else {
16262                 switch (cacheline_size) {
16263                 case 16:
16264                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16265                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16266                                         DMA_RWCTRL_WRITE_BNDRY_16);
16267                                 break;
16268                         }
16269                         /* fallthrough */
16270                 case 32:
16271                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16272                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16273                                         DMA_RWCTRL_WRITE_BNDRY_32);
16274                                 break;
16275                         }
16276                         /* fallthrough */
16277                 case 64:
16278                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16279                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16280                                         DMA_RWCTRL_WRITE_BNDRY_64);
16281                                 break;
16282                         }
16283                         /* fallthrough */
16284                 case 128:
16285                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16286                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16287                                         DMA_RWCTRL_WRITE_BNDRY_128);
16288                                 break;
16289                         }
16290                         /* fallthrough */
16291                 case 256:
16292                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
16293                                 DMA_RWCTRL_WRITE_BNDRY_256);
16294                         break;
16295                 case 512:
16296                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
16297                                 DMA_RWCTRL_WRITE_BNDRY_512);
16298                         break;
16299                 case 1024:
16300                 default:
16301                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16302                                 DMA_RWCTRL_WRITE_BNDRY_1024);
16303                         break;
16304                 }
16305         }
16306
16307 out:
16308         return val;
16309 }
16310
16311 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16312                            int size, int to_device)
16313 {
16314         struct tg3_internal_buffer_desc test_desc;
16315         u32 sram_dma_descs;
16316         int i, ret;
16317
16318         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16319
16320         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16321         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16322         tw32(RDMAC_STATUS, 0);
16323         tw32(WDMAC_STATUS, 0);
16324
16325         tw32(BUFMGR_MODE, 0);
16326         tw32(FTQ_RESET, 0);
16327
16328         test_desc.addr_hi = ((u64) buf_dma) >> 32;
16329         test_desc.addr_lo = buf_dma & 0xffffffff;
16330         test_desc.nic_mbuf = 0x00002100;
16331         test_desc.len = size;
16332
16333         /*
16334          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16335          * the *second* time the tg3 driver was getting loaded after an
16336          * initial scan.
16337          *
16338          * Broadcom tells me:
16339          *   ...the DMA engine is connected to the GRC block and a DMA
16340          *   reset may affect the GRC block in some unpredictable way...
16341          *   The behavior of resets to individual blocks has not been tested.
16342          *
16343          * Broadcom noted the GRC reset will also reset all sub-components.
16344          */
16345         if (to_device) {
16346                 test_desc.cqid_sqid = (13 << 8) | 2;
16347
16348                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16349                 udelay(40);
16350         } else {
16351                 test_desc.cqid_sqid = (16 << 8) | 7;
16352
16353                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16354                 udelay(40);
16355         }
16356         test_desc.flags = 0x00000005;
16357
16358         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16359                 u32 val;
16360
16361                 val = *(((u32 *)&test_desc) + i);
16362                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16363                                        sram_dma_descs + (i * sizeof(u32)));
16364                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16365         }
16366         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16367
16368         if (to_device)
16369                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16370         else
16371                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16372
16373         ret = -ENODEV;
16374         for (i = 0; i < 40; i++) {
16375                 u32 val;
16376
16377                 if (to_device)
16378                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16379                 else
16380                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16381                 if ((val & 0xffff) == sram_dma_descs) {
16382                         ret = 0;
16383                         break;
16384                 }
16385
16386                 udelay(100);
16387         }
16388
16389         return ret;
16390 }
16391
16392 #define TEST_BUFFER_SIZE        0x2000
16393
16394 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16395         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16396         { },
16397 };
16398
16399 static int tg3_test_dma(struct tg3 *tp)
16400 {
16401         dma_addr_t buf_dma;
16402         u32 *buf, saved_dma_rwctrl;
16403         int ret = 0;
16404
16405         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16406                                  &buf_dma, GFP_KERNEL);
16407         if (!buf) {
16408                 ret = -ENOMEM;
16409                 goto out_nofree;
16410         }
16411
16412         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16413                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16414
16415         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16416
16417         if (tg3_flag(tp, 57765_PLUS))
16418                 goto out;
16419
16420         if (tg3_flag(tp, PCI_EXPRESS)) {
16421                 /* DMA read watermark not used on PCIE */
16422                 tp->dma_rwctrl |= 0x00180000;
16423         } else if (!tg3_flag(tp, PCIX_MODE)) {
16424                 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16425                     tg3_asic_rev(tp) == ASIC_REV_5750)
16426                         tp->dma_rwctrl |= 0x003f0000;
16427                 else
16428                         tp->dma_rwctrl |= 0x003f000f;
16429         } else {
16430                 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16431                     tg3_asic_rev(tp) == ASIC_REV_5704) {
16432                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16433                         u32 read_water = 0x7;
16434
16435                         /* If the 5704 is behind the EPB bridge, we can
16436                          * do the less restrictive ONE_DMA workaround for
16437                          * better performance.
16438                          */
16439                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16440                             tg3_asic_rev(tp) == ASIC_REV_5704)
16441                                 tp->dma_rwctrl |= 0x8000;
16442                         else if (ccval == 0x6 || ccval == 0x7)
16443                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16444
16445                         if (tg3_asic_rev(tp) == ASIC_REV_5703)
16446                                 read_water = 4;
16447                         /* Set bit 23 to enable PCIX hw bug fix */
16448                         tp->dma_rwctrl |=
16449                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16450                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16451                                 (1 << 23);
16452                 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
16453                         /* 5780 always in PCIX mode */
16454                         tp->dma_rwctrl |= 0x00144000;
16455                 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
16456                         /* 5714 always in PCIX mode */
16457                         tp->dma_rwctrl |= 0x00148000;
16458                 } else {
16459                         tp->dma_rwctrl |= 0x001b000f;
16460                 }
16461         }
16462         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16463                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16464
16465         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16466             tg3_asic_rev(tp) == ASIC_REV_5704)
16467                 tp->dma_rwctrl &= 0xfffffff0;
16468
16469         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16470             tg3_asic_rev(tp) == ASIC_REV_5701) {
16471                 /* Remove this if it causes problems for some boards. */
16472                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16473
16474                 /* On 5700/5701 chips, we need to set this bit.
16475                  * Otherwise the chip will issue cacheline transactions
16476                  * to streamable DMA memory with not all the byte
16477                  * enables turned on.  This is an error on several
16478                  * RISC PCI controllers, in particular sparc64.
16479                  *
16480                  * On 5703/5704 chips, this bit has been reassigned
16481                  * a different meaning.  In particular, it is used
16482                  * on those chips to enable a PCI-X workaround.
16483                  */
16484                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16485         }
16486
16487         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16488
16489 #if 0
16490         /* Unneeded, already done by tg3_get_invariants.  */
16491         tg3_switch_clocks(tp);
16492 #endif
16493
16494         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16495             tg3_asic_rev(tp) != ASIC_REV_5701)
16496                 goto out;
16497
16498         /* It is best to perform DMA test with maximum write burst size
16499          * to expose the 5700/5701 write DMA bug.
16500          */
16501         saved_dma_rwctrl = tp->dma_rwctrl;
16502         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16503         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16504
16505         while (1) {
16506                 u32 *p = buf, i;
16507
16508                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16509                         p[i] = i;
16510
16511                 /* Send the buffer to the chip. */
16512                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
16513                 if (ret) {
16514                         dev_err(&tp->pdev->dev,
16515                                 "%s: Buffer write failed. err = %d\n",
16516                                 __func__, ret);
16517                         break;
16518                 }
16519
16520 #if 0
16521                 /* validate data reached card RAM correctly. */
16522                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16523                         u32 val;
16524                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
16525                         if (le32_to_cpu(val) != p[i]) {
16526                                 dev_err(&tp->pdev->dev,
16527                                         "%s: Buffer corrupted on device! "
16528                                         "(%d != %d)\n", __func__, val, i);
16529                                 /* ret = -ENODEV here? */
16530                         }
16531                         p[i] = 0;
16532                 }
16533 #endif
16534                 /* Now read it back. */
16535                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
16536                 if (ret) {
16537                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16538                                 "err = %d\n", __func__, ret);
16539                         break;
16540                 }
16541
16542                 /* Verify it. */
16543                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16544                         if (p[i] == i)
16545                                 continue;
16546
16547                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16548                             DMA_RWCTRL_WRITE_BNDRY_16) {
16549                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16550                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16551                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16552                                 break;
16553                         } else {
16554                                 dev_err(&tp->pdev->dev,
16555                                         "%s: Buffer corrupted on read back! "
16556                                         "(%d != %d)\n", __func__, p[i], i);
16557                                 ret = -ENODEV;
16558                                 goto out;
16559                         }
16560                 }
16561
16562                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16563                         /* Success. */
16564                         ret = 0;
16565                         break;
16566                 }
16567         }
16568         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16569             DMA_RWCTRL_WRITE_BNDRY_16) {
16570                 /* DMA test passed without adjusting DMA boundary,
16571                  * now look for chipsets that are known to expose the
16572                  * DMA bug without failing the test.
16573                  */
16574                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16575                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16576                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16577                 } else {
16578                         /* Safe to use the calculated DMA boundary. */
16579                         tp->dma_rwctrl = saved_dma_rwctrl;
16580                 }
16581
16582                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16583         }
16584
16585 out:
16586         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16587 out_nofree:
16588         return ret;
16589 }
16590
16591 static void tg3_init_bufmgr_config(struct tg3 *tp)
16592 {
16593         if (tg3_flag(tp, 57765_PLUS)) {
16594                 tp->bufmgr_config.mbuf_read_dma_low_water =
16595                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16596                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16597                         DEFAULT_MB_MACRX_LOW_WATER_57765;
16598                 tp->bufmgr_config.mbuf_high_water =
16599                         DEFAULT_MB_HIGH_WATER_57765;
16600
16601                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16602                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16603                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16604                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16605                 tp->bufmgr_config.mbuf_high_water_jumbo =
16606                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16607         } else if (tg3_flag(tp, 5705_PLUS)) {
16608                 tp->bufmgr_config.mbuf_read_dma_low_water =
16609                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16610                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16611                         DEFAULT_MB_MACRX_LOW_WATER_5705;
16612                 tp->bufmgr_config.mbuf_high_water =
16613                         DEFAULT_MB_HIGH_WATER_5705;
16614                 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16615                         tp->bufmgr_config.mbuf_mac_rx_low_water =
16616                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
16617                         tp->bufmgr_config.mbuf_high_water =
16618                                 DEFAULT_MB_HIGH_WATER_5906;
16619                 }
16620
16621                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16622                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
16623                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16624                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
16625                 tp->bufmgr_config.mbuf_high_water_jumbo =
16626                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
16627         } else {
16628                 tp->bufmgr_config.mbuf_read_dma_low_water =
16629                         DEFAULT_MB_RDMA_LOW_WATER;
16630                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16631                         DEFAULT_MB_MACRX_LOW_WATER;
16632                 tp->bufmgr_config.mbuf_high_water =
16633                         DEFAULT_MB_HIGH_WATER;
16634
16635                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16636                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
16637                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16638                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
16639                 tp->bufmgr_config.mbuf_high_water_jumbo =
16640                         DEFAULT_MB_HIGH_WATER_JUMBO;
16641         }
16642
16643         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
16644         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
16645 }
16646
16647 static char *tg3_phy_string(struct tg3 *tp)
16648 {
16649         switch (tp->phy_id & TG3_PHY_ID_MASK) {
16650         case TG3_PHY_ID_BCM5400:        return "5400";
16651         case TG3_PHY_ID_BCM5401:        return "5401";
16652         case TG3_PHY_ID_BCM5411:        return "5411";
16653         case TG3_PHY_ID_BCM5701:        return "5701";
16654         case TG3_PHY_ID_BCM5703:        return "5703";
16655         case TG3_PHY_ID_BCM5704:        return "5704";
16656         case TG3_PHY_ID_BCM5705:        return "5705";
16657         case TG3_PHY_ID_BCM5750:        return "5750";
16658         case TG3_PHY_ID_BCM5752:        return "5752";
16659         case TG3_PHY_ID_BCM5714:        return "5714";
16660         case TG3_PHY_ID_BCM5780:        return "5780";
16661         case TG3_PHY_ID_BCM5755:        return "5755";
16662         case TG3_PHY_ID_BCM5787:        return "5787";
16663         case TG3_PHY_ID_BCM5784:        return "5784";
16664         case TG3_PHY_ID_BCM5756:        return "5722/5756";
16665         case TG3_PHY_ID_BCM5906:        return "5906";
16666         case TG3_PHY_ID_BCM5761:        return "5761";
16667         case TG3_PHY_ID_BCM5718C:       return "5718C";
16668         case TG3_PHY_ID_BCM5718S:       return "5718S";
16669         case TG3_PHY_ID_BCM57765:       return "57765";
16670         case TG3_PHY_ID_BCM5719C:       return "5719C";
16671         case TG3_PHY_ID_BCM5720C:       return "5720C";
16672         case TG3_PHY_ID_BCM5762:        return "5762C";
16673         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
16674         case 0:                 return "serdes";
16675         default:                return "unknown";
16676         }
16677 }
16678
16679 static char *tg3_bus_string(struct tg3 *tp, char *str)
16680 {
16681         if (tg3_flag(tp, PCI_EXPRESS)) {
16682                 strcpy(str, "PCI Express");
16683                 return str;
16684         } else if (tg3_flag(tp, PCIX_MODE)) {
16685                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
16686
16687                 strcpy(str, "PCIX:");
16688
16689                 if ((clock_ctrl == 7) ||
16690                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
16691                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
16692                         strcat(str, "133MHz");
16693                 else if (clock_ctrl == 0)
16694                         strcat(str, "33MHz");
16695                 else if (clock_ctrl == 2)
16696                         strcat(str, "50MHz");
16697                 else if (clock_ctrl == 4)
16698                         strcat(str, "66MHz");
16699                 else if (clock_ctrl == 6)
16700                         strcat(str, "100MHz");
16701         } else {
16702                 strcpy(str, "PCI:");
16703                 if (tg3_flag(tp, PCI_HIGH_SPEED))
16704                         strcat(str, "66MHz");
16705                 else
16706                         strcat(str, "33MHz");
16707         }
16708         if (tg3_flag(tp, PCI_32BIT))
16709                 strcat(str, ":32-bit");
16710         else
16711                 strcat(str, ":64-bit");
16712         return str;
16713 }
16714
16715 static void tg3_init_coal(struct tg3 *tp)
16716 {
16717         struct ethtool_coalesce *ec = &tp->coal;
16718
16719         memset(ec, 0, sizeof(*ec));
16720         ec->cmd = ETHTOOL_GCOALESCE;
16721         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
16722         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
16723         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
16724         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
16725         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
16726         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
16727         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
16728         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
16729         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
16730
16731         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
16732                                  HOSTCC_MODE_CLRTICK_TXBD)) {
16733                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
16734                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
16735                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
16736                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
16737         }
16738
16739         if (tg3_flag(tp, 5705_PLUS)) {
16740                 ec->rx_coalesce_usecs_irq = 0;
16741                 ec->tx_coalesce_usecs_irq = 0;
16742                 ec->stats_block_coalesce_usecs = 0;
16743         }
16744 }
16745
16746 static int tg3_init_one(struct pci_dev *pdev,
16747                                   const struct pci_device_id *ent)
16748 {
16749         struct net_device *dev;
16750         struct tg3 *tp;
16751         int i, err, pm_cap;
16752         u32 sndmbx, rcvmbx, intmbx;
16753         char str[40];
16754         u64 dma_mask, persist_dma_mask;
16755         netdev_features_t features = 0;
16756
16757         printk_once(KERN_INFO "%s\n", version);
16758
16759         err = pci_enable_device(pdev);
16760         if (err) {
16761                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
16762                 return err;
16763         }
16764
16765         err = pci_request_regions(pdev, DRV_MODULE_NAME);
16766         if (err) {
16767                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
16768                 goto err_out_disable_pdev;
16769         }
16770
16771         pci_set_master(pdev);
16772
16773         /* Find power-management capability. */
16774         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
16775         if (pm_cap == 0) {
16776                 dev_err(&pdev->dev,
16777                         "Cannot find Power Management capability, aborting\n");
16778                 err = -EIO;
16779                 goto err_out_free_res;
16780         }
16781
16782         err = pci_set_power_state(pdev, PCI_D0);
16783         if (err) {
16784                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
16785                 goto err_out_free_res;
16786         }
16787
16788         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
16789         if (!dev) {
16790                 err = -ENOMEM;
16791                 goto err_out_power_down;
16792         }
16793
16794         SET_NETDEV_DEV(dev, &pdev->dev);
16795
16796         tp = netdev_priv(dev);
16797         tp->pdev = pdev;
16798         tp->dev = dev;
16799         tp->pm_cap = pm_cap;
16800         tp->rx_mode = TG3_DEF_RX_MODE;
16801         tp->tx_mode = TG3_DEF_TX_MODE;
16802         tp->irq_sync = 1;
16803
16804         if (tg3_debug > 0)
16805                 tp->msg_enable = tg3_debug;
16806         else
16807                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
16808
16809         if (pdev_is_ssb_gige_core(pdev)) {
16810                 tg3_flag_set(tp, IS_SSB_CORE);
16811                 if (ssb_gige_must_flush_posted_writes(pdev))
16812                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
16813                 if (ssb_gige_one_dma_at_once(pdev))
16814                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
16815                 if (ssb_gige_have_roboswitch(pdev))
16816                         tg3_flag_set(tp, ROBOSWITCH);
16817                 if (ssb_gige_is_rgmii(pdev))
16818                         tg3_flag_set(tp, RGMII_MODE);
16819         }
16820
16821         /* The word/byte swap controls here control register access byte
16822          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
16823          * setting below.
16824          */
16825         tp->misc_host_ctrl =
16826                 MISC_HOST_CTRL_MASK_PCI_INT |
16827                 MISC_HOST_CTRL_WORD_SWAP |
16828                 MISC_HOST_CTRL_INDIR_ACCESS |
16829                 MISC_HOST_CTRL_PCISTATE_RW;
16830
16831         /* The NONFRM (non-frame) byte/word swap controls take effect
16832          * on descriptor entries, anything which isn't packet data.
16833          *
16834          * The StrongARM chips on the board (one for tx, one for rx)
16835          * are running in big-endian mode.
16836          */
16837         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
16838                         GRC_MODE_WSWAP_NONFRM_DATA);
16839 #ifdef __BIG_ENDIAN
16840         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
16841 #endif
16842         spin_lock_init(&tp->lock);
16843         spin_lock_init(&tp->indirect_lock);
16844         INIT_WORK(&tp->reset_task, tg3_reset_task);
16845
16846         tp->regs = pci_ioremap_bar(pdev, BAR_0);
16847         if (!tp->regs) {
16848                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
16849                 err = -ENOMEM;
16850                 goto err_out_free_dev;
16851         }
16852
16853         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16854             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16855             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16856             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16857             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16858             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16859             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16860             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16861             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16862             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16863             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16864             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
16865                 tg3_flag_set(tp, ENABLE_APE);
16866                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16867                 if (!tp->aperegs) {
16868                         dev_err(&pdev->dev,
16869                                 "Cannot map APE registers, aborting\n");
16870                         err = -ENOMEM;
16871                         goto err_out_iounmap;
16872                 }
16873         }
16874
16875         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16876         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
16877
16878         dev->ethtool_ops = &tg3_ethtool_ops;
16879         dev->watchdog_timeo = TG3_TX_TIMEOUT;
16880         dev->netdev_ops = &tg3_netdev_ops;
16881         dev->irq = pdev->irq;
16882
16883         err = tg3_get_invariants(tp, ent);
16884         if (err) {
16885                 dev_err(&pdev->dev,
16886                         "Problem fetching invariants of chip, aborting\n");
16887                 goto err_out_apeunmap;
16888         }
16889
16890         /* The EPB bridge inside 5714, 5715, and 5780 and any
16891          * device behind the EPB cannot support DMA addresses > 40-bit.
16892          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16893          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16894          * do DMA address check in tg3_start_xmit().
16895          */
16896         if (tg3_flag(tp, IS_5788))
16897                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
16898         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
16899                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
16900 #ifdef CONFIG_HIGHMEM
16901                 dma_mask = DMA_BIT_MASK(64);
16902 #endif
16903         } else
16904                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
16905
16906         /* Configure DMA attributes. */
16907         if (dma_mask > DMA_BIT_MASK(32)) {
16908                 err = pci_set_dma_mask(pdev, dma_mask);
16909                 if (!err) {
16910                         features |= NETIF_F_HIGHDMA;
16911                         err = pci_set_consistent_dma_mask(pdev,
16912                                                           persist_dma_mask);
16913                         if (err < 0) {
16914                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
16915                                         "DMA for consistent allocations\n");
16916                                 goto err_out_apeunmap;
16917                         }
16918                 }
16919         }
16920         if (err || dma_mask == DMA_BIT_MASK(32)) {
16921                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
16922                 if (err) {
16923                         dev_err(&pdev->dev,
16924                                 "No usable DMA configuration, aborting\n");
16925                         goto err_out_apeunmap;
16926                 }
16927         }
16928
16929         tg3_init_bufmgr_config(tp);
16930
16931         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16932
16933         /* 5700 B0 chips do not support checksumming correctly due
16934          * to hardware bugs.
16935          */
16936         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
16937                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16938
16939                 if (tg3_flag(tp, 5755_PLUS))
16940                         features |= NETIF_F_IPV6_CSUM;
16941         }
16942
16943         /* TSO is on by default on chips that support hardware TSO.
16944          * Firmware TSO on older chips gives lower performance, so it
16945          * is off by default, but can be enabled using ethtool.
16946          */
16947         if ((tg3_flag(tp, HW_TSO_1) ||
16948              tg3_flag(tp, HW_TSO_2) ||
16949              tg3_flag(tp, HW_TSO_3)) &&
16950             (features & NETIF_F_IP_CSUM))
16951                 features |= NETIF_F_TSO;
16952         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
16953                 if (features & NETIF_F_IPV6_CSUM)
16954                         features |= NETIF_F_TSO6;
16955                 if (tg3_flag(tp, HW_TSO_3) ||
16956                     tg3_asic_rev(tp) == ASIC_REV_5761 ||
16957                     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16958                      tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
16959                     tg3_asic_rev(tp) == ASIC_REV_5785 ||
16960                     tg3_asic_rev(tp) == ASIC_REV_57780)
16961                         features |= NETIF_F_TSO_ECN;
16962         }
16963
16964         dev->features |= features;
16965         dev->vlan_features |= features;
16966
16967         /*
16968          * Add loopback capability only for a subset of devices that support
16969          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16970          * loopback for the remaining devices.
16971          */
16972         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
16973             !tg3_flag(tp, CPMU_PRESENT))
16974                 /* Add the loopback capability */
16975                 features |= NETIF_F_LOOPBACK;
16976
16977         dev->hw_features |= features;
16978
16979         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
16980             !tg3_flag(tp, TSO_CAPABLE) &&
16981             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
16982                 tg3_flag_set(tp, MAX_RXPEND_64);
16983                 tp->rx_pending = 63;
16984         }
16985
16986         err = tg3_get_device_address(tp);
16987         if (err) {
16988                 dev_err(&pdev->dev,
16989                         "Could not obtain valid ethernet address, aborting\n");
16990                 goto err_out_apeunmap;
16991         }
16992
16993         /*
16994          * Reset chip in case UNDI or EFI driver did not shutdown
16995          * DMA self test will enable WDMAC and we'll see (spurious)
16996          * pending DMA on the PCI bus at that point.
16997          */
16998         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
16999             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17000                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17001                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17002         }
17003
17004         err = tg3_test_dma(tp);
17005         if (err) {
17006                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17007                 goto err_out_apeunmap;
17008         }
17009
17010         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17011         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17012         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17013         for (i = 0; i < tp->irq_max; i++) {
17014                 struct tg3_napi *tnapi = &tp->napi[i];
17015
17016                 tnapi->tp = tp;
17017                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17018
17019                 tnapi->int_mbox = intmbx;
17020                 if (i <= 4)
17021                         intmbx += 0x8;
17022                 else
17023                         intmbx += 0x4;
17024
17025                 tnapi->consmbox = rcvmbx;
17026                 tnapi->prodmbox = sndmbx;
17027
17028                 if (i)
17029                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17030                 else
17031                         tnapi->coal_now = HOSTCC_MODE_NOW;
17032
17033                 if (!tg3_flag(tp, SUPPORT_MSIX))
17034                         break;
17035
17036                 /*
17037                  * If we support MSIX, we'll be using RSS.  If we're using
17038                  * RSS, the first vector only handles link interrupts and the
17039                  * remaining vectors handle rx and tx interrupts.  Reuse the
17040                  * mailbox values for the next iteration.  The values we setup
17041                  * above are still useful for the single vectored mode.
17042                  */
17043                 if (!i)
17044                         continue;
17045
17046                 rcvmbx += 0x8;
17047
17048                 if (sndmbx & 0x4)
17049                         sndmbx -= 0x4;
17050                 else
17051                         sndmbx += 0xc;
17052         }
17053
17054         tg3_init_coal(tp);
17055
17056         pci_set_drvdata(pdev, dev);
17057
17058         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17059             tg3_asic_rev(tp) == ASIC_REV_5720 ||
17060             tg3_asic_rev(tp) == ASIC_REV_5762)
17061                 tg3_flag_set(tp, PTP_CAPABLE);
17062
17063         if (tg3_flag(tp, 5717_PLUS)) {
17064                 /* Resume a low-power mode */
17065                 tg3_frob_aux_power(tp, false);
17066         }
17067
17068         tg3_timer_init(tp);
17069
17070         tg3_carrier_off(tp);
17071
17072         err = register_netdev(dev);
17073         if (err) {
17074                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17075                 goto err_out_apeunmap;
17076         }
17077
17078         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17079                     tp->board_part_number,
17080                     tg3_chip_rev_id(tp),
17081                     tg3_bus_string(tp, str),
17082                     dev->dev_addr);
17083
17084         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17085                 struct phy_device *phydev;
17086                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
17087                 netdev_info(dev,
17088                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17089                             phydev->drv->name, dev_name(&phydev->dev));
17090         } else {
17091                 char *ethtype;
17092
17093                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17094                         ethtype = "10/100Base-TX";
17095                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17096                         ethtype = "1000Base-SX";
17097                 else
17098                         ethtype = "10/100/1000Base-T";
17099
17100                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17101                             "(WireSpeed[%d], EEE[%d])\n",
17102                             tg3_phy_string(tp), ethtype,
17103                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17104                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17105         }
17106
17107         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17108                     (dev->features & NETIF_F_RXCSUM) != 0,
17109                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
17110                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17111                     tg3_flag(tp, ENABLE_ASF) != 0,
17112                     tg3_flag(tp, TSO_CAPABLE) != 0);
17113         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17114                     tp->dma_rwctrl,
17115                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17116                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17117
17118         pci_save_state(pdev);
17119
17120         return 0;
17121
17122 err_out_apeunmap:
17123         if (tp->aperegs) {
17124                 iounmap(tp->aperegs);
17125                 tp->aperegs = NULL;
17126         }
17127
17128 err_out_iounmap:
17129         if (tp->regs) {
17130                 iounmap(tp->regs);
17131                 tp->regs = NULL;
17132         }
17133
17134 err_out_free_dev:
17135         free_netdev(dev);
17136
17137 err_out_power_down:
17138         pci_set_power_state(pdev, PCI_D3hot);
17139
17140 err_out_free_res:
17141         pci_release_regions(pdev);
17142
17143 err_out_disable_pdev:
17144         pci_disable_device(pdev);
17145         pci_set_drvdata(pdev, NULL);
17146         return err;
17147 }
17148
17149 static void tg3_remove_one(struct pci_dev *pdev)
17150 {
17151         struct net_device *dev = pci_get_drvdata(pdev);
17152
17153         if (dev) {
17154                 struct tg3 *tp = netdev_priv(dev);
17155
17156                 release_firmware(tp->fw);
17157
17158                 tg3_reset_task_cancel(tp);
17159
17160                 if (tg3_flag(tp, USE_PHYLIB)) {
17161                         tg3_phy_fini(tp);
17162                         tg3_mdio_fini(tp);
17163                 }
17164
17165                 unregister_netdev(dev);
17166                 if (tp->aperegs) {
17167                         iounmap(tp->aperegs);
17168                         tp->aperegs = NULL;
17169                 }
17170                 if (tp->regs) {
17171                         iounmap(tp->regs);
17172                         tp->regs = NULL;
17173                 }
17174                 free_netdev(dev);
17175                 pci_release_regions(pdev);
17176                 pci_disable_device(pdev);
17177                 pci_set_drvdata(pdev, NULL);
17178         }
17179 }
17180
17181 #ifdef CONFIG_PM_SLEEP
17182 static int tg3_suspend(struct device *device)
17183 {
17184         struct pci_dev *pdev = to_pci_dev(device);
17185         struct net_device *dev = pci_get_drvdata(pdev);
17186         struct tg3 *tp = netdev_priv(dev);
17187         int err;
17188
17189         if (!netif_running(dev))
17190                 return 0;
17191
17192         tg3_reset_task_cancel(tp);
17193         tg3_phy_stop(tp);
17194         tg3_netif_stop(tp);
17195
17196         tg3_timer_stop(tp);
17197
17198         tg3_full_lock(tp, 1);
17199         tg3_disable_ints(tp);
17200         tg3_full_unlock(tp);
17201
17202         netif_device_detach(dev);
17203
17204         tg3_full_lock(tp, 0);
17205         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17206         tg3_flag_clear(tp, INIT_COMPLETE);
17207         tg3_full_unlock(tp);
17208
17209         err = tg3_power_down_prepare(tp);
17210         if (err) {
17211                 int err2;
17212
17213                 tg3_full_lock(tp, 0);
17214
17215                 tg3_flag_set(tp, INIT_COMPLETE);
17216                 err2 = tg3_restart_hw(tp, 1);
17217                 if (err2)
17218                         goto out;
17219
17220                 tg3_timer_start(tp);
17221
17222                 netif_device_attach(dev);
17223                 tg3_netif_start(tp);
17224
17225 out:
17226                 tg3_full_unlock(tp);
17227
17228                 if (!err2)
17229                         tg3_phy_start(tp);
17230         }
17231
17232         return err;
17233 }
17234
17235 static int tg3_resume(struct device *device)
17236 {
17237         struct pci_dev *pdev = to_pci_dev(device);
17238         struct net_device *dev = pci_get_drvdata(pdev);
17239         struct tg3 *tp = netdev_priv(dev);
17240         int err;
17241
17242         if (!netif_running(dev))
17243                 return 0;
17244
17245         netif_device_attach(dev);
17246
17247         tg3_full_lock(tp, 0);
17248
17249         tg3_flag_set(tp, INIT_COMPLETE);
17250         err = tg3_restart_hw(tp, 1);
17251         if (err)
17252                 goto out;
17253
17254         tg3_timer_start(tp);
17255
17256         tg3_netif_start(tp);
17257
17258 out:
17259         tg3_full_unlock(tp);
17260
17261         if (!err)
17262                 tg3_phy_start(tp);
17263
17264         return err;
17265 }
17266
17267 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17268 #define TG3_PM_OPS (&tg3_pm_ops)
17269
17270 #else
17271
17272 #define TG3_PM_OPS NULL
17273
17274 #endif /* CONFIG_PM_SLEEP */
17275
17276 /**
17277  * tg3_io_error_detected - called when PCI error is detected
17278  * @pdev: Pointer to PCI device
17279  * @state: The current pci connection state
17280  *
17281  * This function is called after a PCI bus error affecting
17282  * this device has been detected.
17283  */
17284 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17285                                               pci_channel_state_t state)
17286 {
17287         struct net_device *netdev = pci_get_drvdata(pdev);
17288         struct tg3 *tp = netdev_priv(netdev);
17289         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17290
17291         netdev_info(netdev, "PCI I/O error detected\n");
17292
17293         rtnl_lock();
17294
17295         if (!netif_running(netdev))
17296                 goto done;
17297
17298         tg3_phy_stop(tp);
17299
17300         tg3_netif_stop(tp);
17301
17302         tg3_timer_stop(tp);
17303
17304         /* Want to make sure that the reset task doesn't run */
17305         tg3_reset_task_cancel(tp);
17306
17307         netif_device_detach(netdev);
17308
17309         /* Clean up software state, even if MMIO is blocked */
17310         tg3_full_lock(tp, 0);
17311         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17312         tg3_full_unlock(tp);
17313
17314 done:
17315         if (state == pci_channel_io_perm_failure)
17316                 err = PCI_ERS_RESULT_DISCONNECT;
17317         else
17318                 pci_disable_device(pdev);
17319
17320         rtnl_unlock();
17321
17322         return err;
17323 }
17324
17325 /**
17326  * tg3_io_slot_reset - called after the pci bus has been reset.
17327  * @pdev: Pointer to PCI device
17328  *
17329  * Restart the card from scratch, as if from a cold-boot.
17330  * At this point, the card has exprienced a hard reset,
17331  * followed by fixups by BIOS, and has its config space
17332  * set up identically to what it was at cold boot.
17333  */
17334 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17335 {
17336         struct net_device *netdev = pci_get_drvdata(pdev);
17337         struct tg3 *tp = netdev_priv(netdev);
17338         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17339         int err;
17340
17341         rtnl_lock();
17342
17343         if (pci_enable_device(pdev)) {
17344                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17345                 goto done;
17346         }
17347
17348         pci_set_master(pdev);
17349         pci_restore_state(pdev);
17350         pci_save_state(pdev);
17351
17352         if (!netif_running(netdev)) {
17353                 rc = PCI_ERS_RESULT_RECOVERED;
17354                 goto done;
17355         }
17356
17357         err = tg3_power_up(tp);
17358         if (err)
17359                 goto done;
17360
17361         rc = PCI_ERS_RESULT_RECOVERED;
17362
17363 done:
17364         rtnl_unlock();
17365
17366         return rc;
17367 }
17368
17369 /**
17370  * tg3_io_resume - called when traffic can start flowing again.
17371  * @pdev: Pointer to PCI device
17372  *
17373  * This callback is called when the error recovery driver tells
17374  * us that its OK to resume normal operation.
17375  */
17376 static void tg3_io_resume(struct pci_dev *pdev)
17377 {
17378         struct net_device *netdev = pci_get_drvdata(pdev);
17379         struct tg3 *tp = netdev_priv(netdev);
17380         int err;
17381
17382         rtnl_lock();
17383
17384         if (!netif_running(netdev))
17385                 goto done;
17386
17387         tg3_full_lock(tp, 0);
17388         tg3_flag_set(tp, INIT_COMPLETE);
17389         err = tg3_restart_hw(tp, 1);
17390         if (err) {
17391                 tg3_full_unlock(tp);
17392                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17393                 goto done;
17394         }
17395
17396         netif_device_attach(netdev);
17397
17398         tg3_timer_start(tp);
17399
17400         tg3_netif_start(tp);
17401
17402         tg3_full_unlock(tp);
17403
17404         tg3_phy_start(tp);
17405
17406 done:
17407         rtnl_unlock();
17408 }
17409
17410 static const struct pci_error_handlers tg3_err_handler = {
17411         .error_detected = tg3_io_error_detected,
17412         .slot_reset     = tg3_io_slot_reset,
17413         .resume         = tg3_io_resume
17414 };
17415
17416 static struct pci_driver tg3_driver = {
17417         .name           = DRV_MODULE_NAME,
17418         .id_table       = tg3_pci_tbl,
17419         .probe          = tg3_init_one,
17420         .remove         = tg3_remove_one,
17421         .err_handler    = &tg3_err_handler,
17422         .driver.pm      = TG3_PM_OPS,
17423 };
17424
17425 static int __init tg3_init(void)
17426 {
17427         return pci_register_driver(&tg3_driver);
17428 }
17429
17430 static void __exit tg3_cleanup(void)
17431 {
17432         pci_unregister_driver(&tg3_driver);
17433 }
17434
17435 module_init(tg3_init);
17436 module_exit(tg3_cleanup);