]> Pileus Git - ~andy/linux/blob - drivers/net/ethernet/broadcom/tg3.c
568dd07fc0524b6569782de92d834149765901ab
[~andy/linux] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2013 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50
51 #include <net/checksum.h>
52 #include <net/ip.h>
53
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65
66 #define BAR_0   0
67 #define BAR_2   2
68
69 #include "tg3.h"
70
71 /* Functions & macros to verify TG3_FLAGS types */
72
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         return test_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         set_bit(flag, bits);
81 }
82
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85         clear_bit(flag, bits);
86 }
87
88 #define tg3_flag(tp, flag)                              \
89         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag)                          \
91         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag)                        \
93         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94
95 #define DRV_MODULE_NAME         "tg3"
96 #define TG3_MAJ_NUM                     3
97 #define TG3_MIN_NUM                     130
98 #define DRV_MODULE_VERSION      \
99         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE      "February 14, 2013"
101
102 #define RESET_KIND_SHUTDOWN     0
103 #define RESET_KIND_INIT         1
104 #define RESET_KIND_SUSPEND      2
105
106 #define TG3_DEF_RX_MODE         0
107 #define TG3_DEF_TX_MODE         0
108 #define TG3_DEF_MSG_ENABLE        \
109         (NETIF_MSG_DRV          | \
110          NETIF_MSG_PROBE        | \
111          NETIF_MSG_LINK         | \
112          NETIF_MSG_TIMER        | \
113          NETIF_MSG_IFDOWN       | \
114          NETIF_MSG_IFUP         | \
115          NETIF_MSG_RX_ERR       | \
116          NETIF_MSG_TX_ERR)
117
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
119
120 /* length of time before we decide the hardware is borked,
121  * and dev->tx_timeout() should be called to fix the problem
122  */
123
124 #define TG3_TX_TIMEOUT                  (5 * HZ)
125
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU                     60
128 #define TG3_MAX_MTU(tp) \
129         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132  * You can't change the ring sizes, but you can change where you place
133  * them in the NIC onboard memory.
134  */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING         200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
143
144 /* Do not place this n-ring entries value into the tp struct itself,
145  * we really want to expose these constants to GCC so that modulo et
146  * al.  operations are done with shifts and masks instead of with
147  * hw multiply/modulo instructions.  Another solution would be to
148  * replace things like '% foo' with '& (foo - 1)'.
149  */
150
151 #define TG3_TX_RING_SIZE                512
152 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
153
154 #define TG3_RX_STD_RING_BYTES(tp) \
155         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
161                                  TG3_TX_RING_SIZE)
162 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
163
164 #define TG3_DMA_BYTE_ENAB               64
165
166 #define TG3_RX_STD_DMA_SZ               1536
167 #define TG3_RX_JMB_DMA_SZ               9046
168
169 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
170
171 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181  * that are at least dword aligned when used in PCIX mode.  The driver
182  * works around this bug by double copying the packet.  This workaround
183  * is built into the normal double copy length check for efficiency.
184  *
185  * However, the double copy is only necessary on those architectures
186  * where unaligned memory accesses are inefficient.  For those architectures
187  * where unaligned memory accesses incur little penalty, we can reintegrate
188  * the 5701 in the normal rx path.  Doing so saves a device structure
189  * dereference by hardcoding the double copy threshold in place.
190  */
191 #define TG3_RX_COPY_THRESHOLD           256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
194 #else
195         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
196 #endif
197
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
202 #endif
203
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K            2048
207 #define TG3_TX_BD_DMA_MAX_4K            4096
208
209 #define TG3_RAW_IP_ALIGN 2
210
211 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
212 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213
214 #define FIRMWARE_TG3            "tigon/tg3.bin"
215 #define FIRMWARE_TG357766       "tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
218
219 static char version[] =
220         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
221
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
229
230 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
233
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
236
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258                         TG3_DRV_DATA_FLAG_5705_10_100},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261                         TG3_DRV_DATA_FLAG_5705_10_100},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265                         TG3_DRV_DATA_FLAG_5705_10_100},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287                         PCI_VENDOR_ID_LENOVO,
288                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
341         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
342         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
343         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
344         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
345         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
346         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
347         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
348         {}
349 };
350
351 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
352
353 static const struct {
354         const char string[ETH_GSTRING_LEN];
355 } ethtool_stats_keys[] = {
356         { "rx_octets" },
357         { "rx_fragments" },
358         { "rx_ucast_packets" },
359         { "rx_mcast_packets" },
360         { "rx_bcast_packets" },
361         { "rx_fcs_errors" },
362         { "rx_align_errors" },
363         { "rx_xon_pause_rcvd" },
364         { "rx_xoff_pause_rcvd" },
365         { "rx_mac_ctrl_rcvd" },
366         { "rx_xoff_entered" },
367         { "rx_frame_too_long_errors" },
368         { "rx_jabbers" },
369         { "rx_undersize_packets" },
370         { "rx_in_length_errors" },
371         { "rx_out_length_errors" },
372         { "rx_64_or_less_octet_packets" },
373         { "rx_65_to_127_octet_packets" },
374         { "rx_128_to_255_octet_packets" },
375         { "rx_256_to_511_octet_packets" },
376         { "rx_512_to_1023_octet_packets" },
377         { "rx_1024_to_1522_octet_packets" },
378         { "rx_1523_to_2047_octet_packets" },
379         { "rx_2048_to_4095_octet_packets" },
380         { "rx_4096_to_8191_octet_packets" },
381         { "rx_8192_to_9022_octet_packets" },
382
383         { "tx_octets" },
384         { "tx_collisions" },
385
386         { "tx_xon_sent" },
387         { "tx_xoff_sent" },
388         { "tx_flow_control" },
389         { "tx_mac_errors" },
390         { "tx_single_collisions" },
391         { "tx_mult_collisions" },
392         { "tx_deferred" },
393         { "tx_excessive_collisions" },
394         { "tx_late_collisions" },
395         { "tx_collide_2times" },
396         { "tx_collide_3times" },
397         { "tx_collide_4times" },
398         { "tx_collide_5times" },
399         { "tx_collide_6times" },
400         { "tx_collide_7times" },
401         { "tx_collide_8times" },
402         { "tx_collide_9times" },
403         { "tx_collide_10times" },
404         { "tx_collide_11times" },
405         { "tx_collide_12times" },
406         { "tx_collide_13times" },
407         { "tx_collide_14times" },
408         { "tx_collide_15times" },
409         { "tx_ucast_packets" },
410         { "tx_mcast_packets" },
411         { "tx_bcast_packets" },
412         { "tx_carrier_sense_errors" },
413         { "tx_discards" },
414         { "tx_errors" },
415
416         { "dma_writeq_full" },
417         { "dma_write_prioq_full" },
418         { "rxbds_empty" },
419         { "rx_discards" },
420         { "rx_errors" },
421         { "rx_threshold_hit" },
422
423         { "dma_readq_full" },
424         { "dma_read_prioq_full" },
425         { "tx_comp_queue_full" },
426
427         { "ring_set_send_prod_index" },
428         { "ring_status_update" },
429         { "nic_irqs" },
430         { "nic_avoided_irqs" },
431         { "nic_tx_threshold_hit" },
432
433         { "mbuf_lwm_thresh_hit" },
434 };
435
436 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST          0
438 #define TG3_LINK_TEST           1
439 #define TG3_REGISTER_TEST       2
440 #define TG3_MEMORY_TEST         3
441 #define TG3_MAC_LOOPB_TEST      4
442 #define TG3_PHY_LOOPB_TEST      5
443 #define TG3_EXT_LOOPB_TEST      6
444 #define TG3_INTERRUPT_TEST      7
445
446
447 static const struct {
448         const char string[ETH_GSTRING_LEN];
449 } ethtool_test_keys[] = {
450         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
451         [TG3_LINK_TEST]         = { "link test         (online) " },
452         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
453         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
454         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
455         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
456         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
457         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
458 };
459
460 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
461
462
463 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
464 {
465         writel(val, tp->regs + off);
466 }
467
468 static u32 tg3_read32(struct tg3 *tp, u32 off)
469 {
470         return readl(tp->regs + off);
471 }
472
473 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
474 {
475         writel(val, tp->aperegs + off);
476 }
477
478 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
479 {
480         return readl(tp->aperegs + off);
481 }
482
483 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
484 {
485         unsigned long flags;
486
487         spin_lock_irqsave(&tp->indirect_lock, flags);
488         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
489         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
490         spin_unlock_irqrestore(&tp->indirect_lock, flags);
491 }
492
493 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
494 {
495         writel(val, tp->regs + off);
496         readl(tp->regs + off);
497 }
498
499 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
500 {
501         unsigned long flags;
502         u32 val;
503
504         spin_lock_irqsave(&tp->indirect_lock, flags);
505         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
506         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507         spin_unlock_irqrestore(&tp->indirect_lock, flags);
508         return val;
509 }
510
511 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
512 {
513         unsigned long flags;
514
515         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
516                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
517                                        TG3_64BIT_REG_LOW, val);
518                 return;
519         }
520         if (off == TG3_RX_STD_PROD_IDX_REG) {
521                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
522                                        TG3_64BIT_REG_LOW, val);
523                 return;
524         }
525
526         spin_lock_irqsave(&tp->indirect_lock, flags);
527         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
528         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
529         spin_unlock_irqrestore(&tp->indirect_lock, flags);
530
531         /* In indirect mode when disabling interrupts, we also need
532          * to clear the interrupt bit in the GRC local ctrl register.
533          */
534         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
535             (val == 0x1)) {
536                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
537                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
538         }
539 }
540
541 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
542 {
543         unsigned long flags;
544         u32 val;
545
546         spin_lock_irqsave(&tp->indirect_lock, flags);
547         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
548         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
549         spin_unlock_irqrestore(&tp->indirect_lock, flags);
550         return val;
551 }
552
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554  * where it is unsafe to read back the register without some delay.
555  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
557  */
558 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
559 {
560         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
561                 /* Non-posted methods */
562                 tp->write32(tp, off, val);
563         else {
564                 /* Posted method */
565                 tg3_write32(tp, off, val);
566                 if (usec_wait)
567                         udelay(usec_wait);
568                 tp->read32(tp, off);
569         }
570         /* Wait again after the read for the posted method to guarantee that
571          * the wait time is met.
572          */
573         if (usec_wait)
574                 udelay(usec_wait);
575 }
576
577 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
578 {
579         tp->write32_mbox(tp, off, val);
580         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
581             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
582              !tg3_flag(tp, ICH_WORKAROUND)))
583                 tp->read32_mbox(tp, off);
584 }
585
586 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
587 {
588         void __iomem *mbox = tp->regs + off;
589         writel(val, mbox);
590         if (tg3_flag(tp, TXD_MBOX_HWBUG))
591                 writel(val, mbox);
592         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
593             tg3_flag(tp, FLUSH_POSTED_WRITES))
594                 readl(mbox);
595 }
596
597 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
598 {
599         return readl(tp->regs + off + GRCMBOX_BASE);
600 }
601
602 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
603 {
604         writel(val, tp->regs + off + GRCMBOX_BASE);
605 }
606
607 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
612
613 #define tw32(reg, val)                  tp->write32(tp, reg, val)
614 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg)                       tp->read32(tp, reg)
617
618 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
619 {
620         unsigned long flags;
621
622         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
623             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
624                 return;
625
626         spin_lock_irqsave(&tp->indirect_lock, flags);
627         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
628                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
629                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
630
631                 /* Always leave this as zero. */
632                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
633         } else {
634                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
635                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
636
637                 /* Always leave this as zero. */
638                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
639         }
640         spin_unlock_irqrestore(&tp->indirect_lock, flags);
641 }
642
643 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
644 {
645         unsigned long flags;
646
647         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
648             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
649                 *val = 0;
650                 return;
651         }
652
653         spin_lock_irqsave(&tp->indirect_lock, flags);
654         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
655                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
656                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
657
658                 /* Always leave this as zero. */
659                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
660         } else {
661                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
662                 *val = tr32(TG3PCI_MEM_WIN_DATA);
663
664                 /* Always leave this as zero. */
665                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
666         }
667         spin_unlock_irqrestore(&tp->indirect_lock, flags);
668 }
669
670 static void tg3_ape_lock_init(struct tg3 *tp)
671 {
672         int i;
673         u32 regbase, bit;
674
675         if (tg3_asic_rev(tp) == ASIC_REV_5761)
676                 regbase = TG3_APE_LOCK_GRANT;
677         else
678                 regbase = TG3_APE_PER_LOCK_GRANT;
679
680         /* Make sure the driver hasn't any stale locks. */
681         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
682                 switch (i) {
683                 case TG3_APE_LOCK_PHY0:
684                 case TG3_APE_LOCK_PHY1:
685                 case TG3_APE_LOCK_PHY2:
686                 case TG3_APE_LOCK_PHY3:
687                         bit = APE_LOCK_GRANT_DRIVER;
688                         break;
689                 default:
690                         if (!tp->pci_fn)
691                                 bit = APE_LOCK_GRANT_DRIVER;
692                         else
693                                 bit = 1 << tp->pci_fn;
694                 }
695                 tg3_ape_write32(tp, regbase + 4 * i, bit);
696         }
697
698 }
699
700 static int tg3_ape_lock(struct tg3 *tp, int locknum)
701 {
702         int i, off;
703         int ret = 0;
704         u32 status, req, gnt, bit;
705
706         if (!tg3_flag(tp, ENABLE_APE))
707                 return 0;
708
709         switch (locknum) {
710         case TG3_APE_LOCK_GPIO:
711                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
712                         return 0;
713         case TG3_APE_LOCK_GRC:
714         case TG3_APE_LOCK_MEM:
715                 if (!tp->pci_fn)
716                         bit = APE_LOCK_REQ_DRIVER;
717                 else
718                         bit = 1 << tp->pci_fn;
719                 break;
720         case TG3_APE_LOCK_PHY0:
721         case TG3_APE_LOCK_PHY1:
722         case TG3_APE_LOCK_PHY2:
723         case TG3_APE_LOCK_PHY3:
724                 bit = APE_LOCK_REQ_DRIVER;
725                 break;
726         default:
727                 return -EINVAL;
728         }
729
730         if (tg3_asic_rev(tp) == ASIC_REV_5761) {
731                 req = TG3_APE_LOCK_REQ;
732                 gnt = TG3_APE_LOCK_GRANT;
733         } else {
734                 req = TG3_APE_PER_LOCK_REQ;
735                 gnt = TG3_APE_PER_LOCK_GRANT;
736         }
737
738         off = 4 * locknum;
739
740         tg3_ape_write32(tp, req + off, bit);
741
742         /* Wait for up to 1 millisecond to acquire lock. */
743         for (i = 0; i < 100; i++) {
744                 status = tg3_ape_read32(tp, gnt + off);
745                 if (status == bit)
746                         break;
747                 udelay(10);
748         }
749
750         if (status != bit) {
751                 /* Revoke the lock request. */
752                 tg3_ape_write32(tp, gnt + off, bit);
753                 ret = -EBUSY;
754         }
755
756         return ret;
757 }
758
759 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
760 {
761         u32 gnt, bit;
762
763         if (!tg3_flag(tp, ENABLE_APE))
764                 return;
765
766         switch (locknum) {
767         case TG3_APE_LOCK_GPIO:
768                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
769                         return;
770         case TG3_APE_LOCK_GRC:
771         case TG3_APE_LOCK_MEM:
772                 if (!tp->pci_fn)
773                         bit = APE_LOCK_GRANT_DRIVER;
774                 else
775                         bit = 1 << tp->pci_fn;
776                 break;
777         case TG3_APE_LOCK_PHY0:
778         case TG3_APE_LOCK_PHY1:
779         case TG3_APE_LOCK_PHY2:
780         case TG3_APE_LOCK_PHY3:
781                 bit = APE_LOCK_GRANT_DRIVER;
782                 break;
783         default:
784                 return;
785         }
786
787         if (tg3_asic_rev(tp) == ASIC_REV_5761)
788                 gnt = TG3_APE_LOCK_GRANT;
789         else
790                 gnt = TG3_APE_PER_LOCK_GRANT;
791
792         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
793 }
794
795 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
796 {
797         u32 apedata;
798
799         while (timeout_us) {
800                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
801                         return -EBUSY;
802
803                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
804                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
805                         break;
806
807                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
808
809                 udelay(10);
810                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
811         }
812
813         return timeout_us ? 0 : -EBUSY;
814 }
815
816 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
817 {
818         u32 i, apedata;
819
820         for (i = 0; i < timeout_us / 10; i++) {
821                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
822
823                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
824                         break;
825
826                 udelay(10);
827         }
828
829         return i == timeout_us / 10;
830 }
831
832 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
833                                    u32 len)
834 {
835         int err;
836         u32 i, bufoff, msgoff, maxlen, apedata;
837
838         if (!tg3_flag(tp, APE_HAS_NCSI))
839                 return 0;
840
841         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
842         if (apedata != APE_SEG_SIG_MAGIC)
843                 return -ENODEV;
844
845         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
846         if (!(apedata & APE_FW_STATUS_READY))
847                 return -EAGAIN;
848
849         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
850                  TG3_APE_SHMEM_BASE;
851         msgoff = bufoff + 2 * sizeof(u32);
852         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
853
854         while (len) {
855                 u32 length;
856
857                 /* Cap xfer sizes to scratchpad limits. */
858                 length = (len > maxlen) ? maxlen : len;
859                 len -= length;
860
861                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
862                 if (!(apedata & APE_FW_STATUS_READY))
863                         return -EAGAIN;
864
865                 /* Wait for up to 1 msec for APE to service previous event. */
866                 err = tg3_ape_event_lock(tp, 1000);
867                 if (err)
868                         return err;
869
870                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
871                           APE_EVENT_STATUS_SCRTCHPD_READ |
872                           APE_EVENT_STATUS_EVENT_PENDING;
873                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
874
875                 tg3_ape_write32(tp, bufoff, base_off);
876                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
877
878                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
879                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
880
881                 base_off += length;
882
883                 if (tg3_ape_wait_for_event(tp, 30000))
884                         return -EAGAIN;
885
886                 for (i = 0; length; i += 4, length -= 4) {
887                         u32 val = tg3_ape_read32(tp, msgoff + i);
888                         memcpy(data, &val, sizeof(u32));
889                         data++;
890                 }
891         }
892
893         return 0;
894 }
895
896 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
897 {
898         int err;
899         u32 apedata;
900
901         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
902         if (apedata != APE_SEG_SIG_MAGIC)
903                 return -EAGAIN;
904
905         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
906         if (!(apedata & APE_FW_STATUS_READY))
907                 return -EAGAIN;
908
909         /* Wait for up to 1 millisecond for APE to service previous event. */
910         err = tg3_ape_event_lock(tp, 1000);
911         if (err)
912                 return err;
913
914         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
915                         event | APE_EVENT_STATUS_EVENT_PENDING);
916
917         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
918         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
919
920         return 0;
921 }
922
923 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
924 {
925         u32 event;
926         u32 apedata;
927
928         if (!tg3_flag(tp, ENABLE_APE))
929                 return;
930
931         switch (kind) {
932         case RESET_KIND_INIT:
933                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
934                                 APE_HOST_SEG_SIG_MAGIC);
935                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
936                                 APE_HOST_SEG_LEN_MAGIC);
937                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
938                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
939                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
940                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
941                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
942                                 APE_HOST_BEHAV_NO_PHYLOCK);
943                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
944                                     TG3_APE_HOST_DRVR_STATE_START);
945
946                 event = APE_EVENT_STATUS_STATE_START;
947                 break;
948         case RESET_KIND_SHUTDOWN:
949                 /* With the interface we are currently using,
950                  * APE does not track driver state.  Wiping
951                  * out the HOST SEGMENT SIGNATURE forces
952                  * the APE to assume OS absent status.
953                  */
954                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
955
956                 if (device_may_wakeup(&tp->pdev->dev) &&
957                     tg3_flag(tp, WOL_ENABLE)) {
958                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
959                                             TG3_APE_HOST_WOL_SPEED_AUTO);
960                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
961                 } else
962                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
963
964                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
965
966                 event = APE_EVENT_STATUS_STATE_UNLOAD;
967                 break;
968         case RESET_KIND_SUSPEND:
969                 event = APE_EVENT_STATUS_STATE_SUSPEND;
970                 break;
971         default:
972                 return;
973         }
974
975         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
976
977         tg3_ape_send_event(tp, event);
978 }
979
980 static void tg3_disable_ints(struct tg3 *tp)
981 {
982         int i;
983
984         tw32(TG3PCI_MISC_HOST_CTRL,
985              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
986         for (i = 0; i < tp->irq_max; i++)
987                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
988 }
989
990 static void tg3_enable_ints(struct tg3 *tp)
991 {
992         int i;
993
994         tp->irq_sync = 0;
995         wmb();
996
997         tw32(TG3PCI_MISC_HOST_CTRL,
998              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
999
1000         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1001         for (i = 0; i < tp->irq_cnt; i++) {
1002                 struct tg3_napi *tnapi = &tp->napi[i];
1003
1004                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1005                 if (tg3_flag(tp, 1SHOT_MSI))
1006                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1007
1008                 tp->coal_now |= tnapi->coal_now;
1009         }
1010
1011         /* Force an initial interrupt */
1012         if (!tg3_flag(tp, TAGGED_STATUS) &&
1013             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1014                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1015         else
1016                 tw32(HOSTCC_MODE, tp->coal_now);
1017
1018         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1019 }
1020
1021 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1022 {
1023         struct tg3 *tp = tnapi->tp;
1024         struct tg3_hw_status *sblk = tnapi->hw_status;
1025         unsigned int work_exists = 0;
1026
1027         /* check for phy events */
1028         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1029                 if (sblk->status & SD_STATUS_LINK_CHG)
1030                         work_exists = 1;
1031         }
1032
1033         /* check for TX work to do */
1034         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1035                 work_exists = 1;
1036
1037         /* check for RX work to do */
1038         if (tnapi->rx_rcb_prod_idx &&
1039             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1040                 work_exists = 1;
1041
1042         return work_exists;
1043 }
1044
1045 /* tg3_int_reenable
1046  *  similar to tg3_enable_ints, but it accurately determines whether there
1047  *  is new work pending and can return without flushing the PIO write
1048  *  which reenables interrupts
1049  */
1050 static void tg3_int_reenable(struct tg3_napi *tnapi)
1051 {
1052         struct tg3 *tp = tnapi->tp;
1053
1054         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1055         mmiowb();
1056
1057         /* When doing tagged status, this work check is unnecessary.
1058          * The last_tag we write above tells the chip which piece of
1059          * work we've completed.
1060          */
1061         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1062                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1063                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1064 }
1065
1066 static void tg3_switch_clocks(struct tg3 *tp)
1067 {
1068         u32 clock_ctrl;
1069         u32 orig_clock_ctrl;
1070
1071         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1072                 return;
1073
1074         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1075
1076         orig_clock_ctrl = clock_ctrl;
1077         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1078                        CLOCK_CTRL_CLKRUN_OENABLE |
1079                        0x1f);
1080         tp->pci_clock_ctrl = clock_ctrl;
1081
1082         if (tg3_flag(tp, 5705_PLUS)) {
1083                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1084                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1085                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1086                 }
1087         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1088                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1089                             clock_ctrl |
1090                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1091                             40);
1092                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1093                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1094                             40);
1095         }
1096         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1097 }
1098
1099 #define PHY_BUSY_LOOPS  5000
1100
1101 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1102                          u32 *val)
1103 {
1104         u32 frame_val;
1105         unsigned int loops;
1106         int ret;
1107
1108         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1109                 tw32_f(MAC_MI_MODE,
1110                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1111                 udelay(80);
1112         }
1113
1114         tg3_ape_lock(tp, tp->phy_ape_lock);
1115
1116         *val = 0x0;
1117
1118         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1119                       MI_COM_PHY_ADDR_MASK);
1120         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1121                       MI_COM_REG_ADDR_MASK);
1122         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1123
1124         tw32_f(MAC_MI_COM, frame_val);
1125
1126         loops = PHY_BUSY_LOOPS;
1127         while (loops != 0) {
1128                 udelay(10);
1129                 frame_val = tr32(MAC_MI_COM);
1130
1131                 if ((frame_val & MI_COM_BUSY) == 0) {
1132                         udelay(5);
1133                         frame_val = tr32(MAC_MI_COM);
1134                         break;
1135                 }
1136                 loops -= 1;
1137         }
1138
1139         ret = -EBUSY;
1140         if (loops != 0) {
1141                 *val = frame_val & MI_COM_DATA_MASK;
1142                 ret = 0;
1143         }
1144
1145         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1146                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1147                 udelay(80);
1148         }
1149
1150         tg3_ape_unlock(tp, tp->phy_ape_lock);
1151
1152         return ret;
1153 }
1154
1155 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1156 {
1157         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1158 }
1159
1160 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1161                           u32 val)
1162 {
1163         u32 frame_val;
1164         unsigned int loops;
1165         int ret;
1166
1167         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1168             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1169                 return 0;
1170
1171         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1172                 tw32_f(MAC_MI_MODE,
1173                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1174                 udelay(80);
1175         }
1176
1177         tg3_ape_lock(tp, tp->phy_ape_lock);
1178
1179         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1180                       MI_COM_PHY_ADDR_MASK);
1181         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1182                       MI_COM_REG_ADDR_MASK);
1183         frame_val |= (val & MI_COM_DATA_MASK);
1184         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1185
1186         tw32_f(MAC_MI_COM, frame_val);
1187
1188         loops = PHY_BUSY_LOOPS;
1189         while (loops != 0) {
1190                 udelay(10);
1191                 frame_val = tr32(MAC_MI_COM);
1192                 if ((frame_val & MI_COM_BUSY) == 0) {
1193                         udelay(5);
1194                         frame_val = tr32(MAC_MI_COM);
1195                         break;
1196                 }
1197                 loops -= 1;
1198         }
1199
1200         ret = -EBUSY;
1201         if (loops != 0)
1202                 ret = 0;
1203
1204         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1205                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1206                 udelay(80);
1207         }
1208
1209         tg3_ape_unlock(tp, tp->phy_ape_lock);
1210
1211         return ret;
1212 }
1213
1214 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1215 {
1216         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1217 }
1218
1219 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1220 {
1221         int err;
1222
1223         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1224         if (err)
1225                 goto done;
1226
1227         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1228         if (err)
1229                 goto done;
1230
1231         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1232                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1233         if (err)
1234                 goto done;
1235
1236         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1237
1238 done:
1239         return err;
1240 }
1241
1242 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1243 {
1244         int err;
1245
1246         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1247         if (err)
1248                 goto done;
1249
1250         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1251         if (err)
1252                 goto done;
1253
1254         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1255                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1256         if (err)
1257                 goto done;
1258
1259         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1260
1261 done:
1262         return err;
1263 }
1264
1265 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1266 {
1267         int err;
1268
1269         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1270         if (!err)
1271                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1272
1273         return err;
1274 }
1275
1276 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1277 {
1278         int err;
1279
1280         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1281         if (!err)
1282                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1283
1284         return err;
1285 }
1286
1287 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1288 {
1289         int err;
1290
1291         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1292                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1293                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1294         if (!err)
1295                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1296
1297         return err;
1298 }
1299
1300 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1301 {
1302         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1303                 set |= MII_TG3_AUXCTL_MISC_WREN;
1304
1305         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1306 }
1307
1308 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1309 {
1310         u32 val;
1311         int err;
1312
1313         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1314
1315         if (err)
1316                 return err;
1317         if (enable)
1318
1319                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1320         else
1321                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1322
1323         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1324                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1325
1326         return err;
1327 }
1328
1329 static int tg3_bmcr_reset(struct tg3 *tp)
1330 {
1331         u32 phy_control;
1332         int limit, err;
1333
1334         /* OK, reset it, and poll the BMCR_RESET bit until it
1335          * clears or we time out.
1336          */
1337         phy_control = BMCR_RESET;
1338         err = tg3_writephy(tp, MII_BMCR, phy_control);
1339         if (err != 0)
1340                 return -EBUSY;
1341
1342         limit = 5000;
1343         while (limit--) {
1344                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1345                 if (err != 0)
1346                         return -EBUSY;
1347
1348                 if ((phy_control & BMCR_RESET) == 0) {
1349                         udelay(40);
1350                         break;
1351                 }
1352                 udelay(10);
1353         }
1354         if (limit < 0)
1355                 return -EBUSY;
1356
1357         return 0;
1358 }
1359
1360 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1361 {
1362         struct tg3 *tp = bp->priv;
1363         u32 val;
1364
1365         spin_lock_bh(&tp->lock);
1366
1367         if (tg3_readphy(tp, reg, &val))
1368                 val = -EIO;
1369
1370         spin_unlock_bh(&tp->lock);
1371
1372         return val;
1373 }
1374
1375 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1376 {
1377         struct tg3 *tp = bp->priv;
1378         u32 ret = 0;
1379
1380         spin_lock_bh(&tp->lock);
1381
1382         if (tg3_writephy(tp, reg, val))
1383                 ret = -EIO;
1384
1385         spin_unlock_bh(&tp->lock);
1386
1387         return ret;
1388 }
1389
1390 static int tg3_mdio_reset(struct mii_bus *bp)
1391 {
1392         return 0;
1393 }
1394
1395 static void tg3_mdio_config_5785(struct tg3 *tp)
1396 {
1397         u32 val;
1398         struct phy_device *phydev;
1399
1400         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1401         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1402         case PHY_ID_BCM50610:
1403         case PHY_ID_BCM50610M:
1404                 val = MAC_PHYCFG2_50610_LED_MODES;
1405                 break;
1406         case PHY_ID_BCMAC131:
1407                 val = MAC_PHYCFG2_AC131_LED_MODES;
1408                 break;
1409         case PHY_ID_RTL8211C:
1410                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1411                 break;
1412         case PHY_ID_RTL8201E:
1413                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1414                 break;
1415         default:
1416                 return;
1417         }
1418
1419         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1420                 tw32(MAC_PHYCFG2, val);
1421
1422                 val = tr32(MAC_PHYCFG1);
1423                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1424                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1425                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1426                 tw32(MAC_PHYCFG1, val);
1427
1428                 return;
1429         }
1430
1431         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1432                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1433                        MAC_PHYCFG2_FMODE_MASK_MASK |
1434                        MAC_PHYCFG2_GMODE_MASK_MASK |
1435                        MAC_PHYCFG2_ACT_MASK_MASK   |
1436                        MAC_PHYCFG2_QUAL_MASK_MASK |
1437                        MAC_PHYCFG2_INBAND_ENABLE;
1438
1439         tw32(MAC_PHYCFG2, val);
1440
1441         val = tr32(MAC_PHYCFG1);
1442         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1443                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1444         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1445                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1446                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1447                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1448                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1449         }
1450         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1451                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1452         tw32(MAC_PHYCFG1, val);
1453
1454         val = tr32(MAC_EXT_RGMII_MODE);
1455         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1456                  MAC_RGMII_MODE_RX_QUALITY |
1457                  MAC_RGMII_MODE_RX_ACTIVITY |
1458                  MAC_RGMII_MODE_RX_ENG_DET |
1459                  MAC_RGMII_MODE_TX_ENABLE |
1460                  MAC_RGMII_MODE_TX_LOWPWR |
1461                  MAC_RGMII_MODE_TX_RESET);
1462         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1463                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1464                         val |= MAC_RGMII_MODE_RX_INT_B |
1465                                MAC_RGMII_MODE_RX_QUALITY |
1466                                MAC_RGMII_MODE_RX_ACTIVITY |
1467                                MAC_RGMII_MODE_RX_ENG_DET;
1468                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1469                         val |= MAC_RGMII_MODE_TX_ENABLE |
1470                                MAC_RGMII_MODE_TX_LOWPWR |
1471                                MAC_RGMII_MODE_TX_RESET;
1472         }
1473         tw32(MAC_EXT_RGMII_MODE, val);
1474 }
1475
1476 static void tg3_mdio_start(struct tg3 *tp)
1477 {
1478         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1479         tw32_f(MAC_MI_MODE, tp->mi_mode);
1480         udelay(80);
1481
1482         if (tg3_flag(tp, MDIOBUS_INITED) &&
1483             tg3_asic_rev(tp) == ASIC_REV_5785)
1484                 tg3_mdio_config_5785(tp);
1485 }
1486
1487 static int tg3_mdio_init(struct tg3 *tp)
1488 {
1489         int i;
1490         u32 reg;
1491         struct phy_device *phydev;
1492
1493         if (tg3_flag(tp, 5717_PLUS)) {
1494                 u32 is_serdes;
1495
1496                 tp->phy_addr = tp->pci_fn + 1;
1497
1498                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1499                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1500                 else
1501                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1502                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1503                 if (is_serdes)
1504                         tp->phy_addr += 7;
1505         } else
1506                 tp->phy_addr = TG3_PHY_MII_ADDR;
1507
1508         tg3_mdio_start(tp);
1509
1510         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1511                 return 0;
1512
1513         tp->mdio_bus = mdiobus_alloc();
1514         if (tp->mdio_bus == NULL)
1515                 return -ENOMEM;
1516
1517         tp->mdio_bus->name     = "tg3 mdio bus";
1518         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1519                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1520         tp->mdio_bus->priv     = tp;
1521         tp->mdio_bus->parent   = &tp->pdev->dev;
1522         tp->mdio_bus->read     = &tg3_mdio_read;
1523         tp->mdio_bus->write    = &tg3_mdio_write;
1524         tp->mdio_bus->reset    = &tg3_mdio_reset;
1525         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1526         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1527
1528         for (i = 0; i < PHY_MAX_ADDR; i++)
1529                 tp->mdio_bus->irq[i] = PHY_POLL;
1530
1531         /* The bus registration will look for all the PHYs on the mdio bus.
1532          * Unfortunately, it does not ensure the PHY is powered up before
1533          * accessing the PHY ID registers.  A chip reset is the
1534          * quickest way to bring the device back to an operational state..
1535          */
1536         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1537                 tg3_bmcr_reset(tp);
1538
1539         i = mdiobus_register(tp->mdio_bus);
1540         if (i) {
1541                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1542                 mdiobus_free(tp->mdio_bus);
1543                 return i;
1544         }
1545
1546         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1547
1548         if (!phydev || !phydev->drv) {
1549                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1550                 mdiobus_unregister(tp->mdio_bus);
1551                 mdiobus_free(tp->mdio_bus);
1552                 return -ENODEV;
1553         }
1554
1555         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1556         case PHY_ID_BCM57780:
1557                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1558                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1559                 break;
1560         case PHY_ID_BCM50610:
1561         case PHY_ID_BCM50610M:
1562                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1563                                      PHY_BRCM_RX_REFCLK_UNUSED |
1564                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1565                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1566                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1567                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1568                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1569                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1570                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1571                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1572                 /* fallthru */
1573         case PHY_ID_RTL8211C:
1574                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1575                 break;
1576         case PHY_ID_RTL8201E:
1577         case PHY_ID_BCMAC131:
1578                 phydev->interface = PHY_INTERFACE_MODE_MII;
1579                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1580                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1581                 break;
1582         }
1583
1584         tg3_flag_set(tp, MDIOBUS_INITED);
1585
1586         if (tg3_asic_rev(tp) == ASIC_REV_5785)
1587                 tg3_mdio_config_5785(tp);
1588
1589         return 0;
1590 }
1591
1592 static void tg3_mdio_fini(struct tg3 *tp)
1593 {
1594         if (tg3_flag(tp, MDIOBUS_INITED)) {
1595                 tg3_flag_clear(tp, MDIOBUS_INITED);
1596                 mdiobus_unregister(tp->mdio_bus);
1597                 mdiobus_free(tp->mdio_bus);
1598         }
1599 }
1600
1601 /* tp->lock is held. */
1602 static inline void tg3_generate_fw_event(struct tg3 *tp)
1603 {
1604         u32 val;
1605
1606         val = tr32(GRC_RX_CPU_EVENT);
1607         val |= GRC_RX_CPU_DRIVER_EVENT;
1608         tw32_f(GRC_RX_CPU_EVENT, val);
1609
1610         tp->last_event_jiffies = jiffies;
1611 }
1612
1613 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1614
1615 /* tp->lock is held. */
1616 static void tg3_wait_for_event_ack(struct tg3 *tp)
1617 {
1618         int i;
1619         unsigned int delay_cnt;
1620         long time_remain;
1621
1622         /* If enough time has passed, no wait is necessary. */
1623         time_remain = (long)(tp->last_event_jiffies + 1 +
1624                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1625                       (long)jiffies;
1626         if (time_remain < 0)
1627                 return;
1628
1629         /* Check if we can shorten the wait time. */
1630         delay_cnt = jiffies_to_usecs(time_remain);
1631         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1632                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1633         delay_cnt = (delay_cnt >> 3) + 1;
1634
1635         for (i = 0; i < delay_cnt; i++) {
1636                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1637                         break;
1638                 udelay(8);
1639         }
1640 }
1641
1642 /* tp->lock is held. */
1643 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1644 {
1645         u32 reg, val;
1646
1647         val = 0;
1648         if (!tg3_readphy(tp, MII_BMCR, &reg))
1649                 val = reg << 16;
1650         if (!tg3_readphy(tp, MII_BMSR, &reg))
1651                 val |= (reg & 0xffff);
1652         *data++ = val;
1653
1654         val = 0;
1655         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1656                 val = reg << 16;
1657         if (!tg3_readphy(tp, MII_LPA, &reg))
1658                 val |= (reg & 0xffff);
1659         *data++ = val;
1660
1661         val = 0;
1662         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1663                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1664                         val = reg << 16;
1665                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1666                         val |= (reg & 0xffff);
1667         }
1668         *data++ = val;
1669
1670         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1671                 val = reg << 16;
1672         else
1673                 val = 0;
1674         *data++ = val;
1675 }
1676
1677 /* tp->lock is held. */
1678 static void tg3_ump_link_report(struct tg3 *tp)
1679 {
1680         u32 data[4];
1681
1682         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1683                 return;
1684
1685         tg3_phy_gather_ump_data(tp, data);
1686
1687         tg3_wait_for_event_ack(tp);
1688
1689         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1690         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1691         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1692         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1693         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1694         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1695
1696         tg3_generate_fw_event(tp);
1697 }
1698
1699 /* tp->lock is held. */
1700 static void tg3_stop_fw(struct tg3 *tp)
1701 {
1702         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1703                 /* Wait for RX cpu to ACK the previous event. */
1704                 tg3_wait_for_event_ack(tp);
1705
1706                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1707
1708                 tg3_generate_fw_event(tp);
1709
1710                 /* Wait for RX cpu to ACK this event. */
1711                 tg3_wait_for_event_ack(tp);
1712         }
1713 }
1714
1715 /* tp->lock is held. */
1716 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1717 {
1718         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1719                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1720
1721         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1722                 switch (kind) {
1723                 case RESET_KIND_INIT:
1724                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1725                                       DRV_STATE_START);
1726                         break;
1727
1728                 case RESET_KIND_SHUTDOWN:
1729                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1730                                       DRV_STATE_UNLOAD);
1731                         break;
1732
1733                 case RESET_KIND_SUSPEND:
1734                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1735                                       DRV_STATE_SUSPEND);
1736                         break;
1737
1738                 default:
1739                         break;
1740                 }
1741         }
1742
1743         if (kind == RESET_KIND_INIT ||
1744             kind == RESET_KIND_SUSPEND)
1745                 tg3_ape_driver_state_change(tp, kind);
1746 }
1747
1748 /* tp->lock is held. */
1749 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1750 {
1751         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1752                 switch (kind) {
1753                 case RESET_KIND_INIT:
1754                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1755                                       DRV_STATE_START_DONE);
1756                         break;
1757
1758                 case RESET_KIND_SHUTDOWN:
1759                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1760                                       DRV_STATE_UNLOAD_DONE);
1761                         break;
1762
1763                 default:
1764                         break;
1765                 }
1766         }
1767
1768         if (kind == RESET_KIND_SHUTDOWN)
1769                 tg3_ape_driver_state_change(tp, kind);
1770 }
1771
1772 /* tp->lock is held. */
1773 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1774 {
1775         if (tg3_flag(tp, ENABLE_ASF)) {
1776                 switch (kind) {
1777                 case RESET_KIND_INIT:
1778                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1779                                       DRV_STATE_START);
1780                         break;
1781
1782                 case RESET_KIND_SHUTDOWN:
1783                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1784                                       DRV_STATE_UNLOAD);
1785                         break;
1786
1787                 case RESET_KIND_SUSPEND:
1788                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1789                                       DRV_STATE_SUSPEND);
1790                         break;
1791
1792                 default:
1793                         break;
1794                 }
1795         }
1796 }
1797
1798 static int tg3_poll_fw(struct tg3 *tp)
1799 {
1800         int i;
1801         u32 val;
1802
1803         if (tg3_flag(tp, IS_SSB_CORE)) {
1804                 /* We don't use firmware. */
1805                 return 0;
1806         }
1807
1808         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1809                 /* Wait up to 20ms for init done. */
1810                 for (i = 0; i < 200; i++) {
1811                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1812                                 return 0;
1813                         udelay(100);
1814                 }
1815                 return -ENODEV;
1816         }
1817
1818         /* Wait for firmware initialization to complete. */
1819         for (i = 0; i < 100000; i++) {
1820                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1821                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1822                         break;
1823                 udelay(10);
1824         }
1825
1826         /* Chip might not be fitted with firmware.  Some Sun onboard
1827          * parts are configured like that.  So don't signal the timeout
1828          * of the above loop as an error, but do report the lack of
1829          * running firmware once.
1830          */
1831         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1832                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1833
1834                 netdev_info(tp->dev, "No firmware running\n");
1835         }
1836
1837         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1838                 /* The 57765 A0 needs a little more
1839                  * time to do some important work.
1840                  */
1841                 mdelay(10);
1842         }
1843
1844         return 0;
1845 }
1846
1847 static void tg3_link_report(struct tg3 *tp)
1848 {
1849         if (!netif_carrier_ok(tp->dev)) {
1850                 netif_info(tp, link, tp->dev, "Link is down\n");
1851                 tg3_ump_link_report(tp);
1852         } else if (netif_msg_link(tp)) {
1853                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1854                             (tp->link_config.active_speed == SPEED_1000 ?
1855                              1000 :
1856                              (tp->link_config.active_speed == SPEED_100 ?
1857                               100 : 10)),
1858                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1859                              "full" : "half"));
1860
1861                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1862                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1863                             "on" : "off",
1864                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1865                             "on" : "off");
1866
1867                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1868                         netdev_info(tp->dev, "EEE is %s\n",
1869                                     tp->setlpicnt ? "enabled" : "disabled");
1870
1871                 tg3_ump_link_report(tp);
1872         }
1873
1874         tp->link_up = netif_carrier_ok(tp->dev);
1875 }
1876
1877 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1878 {
1879         u16 miireg;
1880
1881         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1882                 miireg = ADVERTISE_1000XPAUSE;
1883         else if (flow_ctrl & FLOW_CTRL_TX)
1884                 miireg = ADVERTISE_1000XPSE_ASYM;
1885         else if (flow_ctrl & FLOW_CTRL_RX)
1886                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1887         else
1888                 miireg = 0;
1889
1890         return miireg;
1891 }
1892
1893 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1894 {
1895         u8 cap = 0;
1896
1897         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1898                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1899         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1900                 if (lcladv & ADVERTISE_1000XPAUSE)
1901                         cap = FLOW_CTRL_RX;
1902                 if (rmtadv & ADVERTISE_1000XPAUSE)
1903                         cap = FLOW_CTRL_TX;
1904         }
1905
1906         return cap;
1907 }
1908
1909 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1910 {
1911         u8 autoneg;
1912         u8 flowctrl = 0;
1913         u32 old_rx_mode = tp->rx_mode;
1914         u32 old_tx_mode = tp->tx_mode;
1915
1916         if (tg3_flag(tp, USE_PHYLIB))
1917                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1918         else
1919                 autoneg = tp->link_config.autoneg;
1920
1921         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1922                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1923                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1924                 else
1925                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1926         } else
1927                 flowctrl = tp->link_config.flowctrl;
1928
1929         tp->link_config.active_flowctrl = flowctrl;
1930
1931         if (flowctrl & FLOW_CTRL_RX)
1932                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1933         else
1934                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1935
1936         if (old_rx_mode != tp->rx_mode)
1937                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1938
1939         if (flowctrl & FLOW_CTRL_TX)
1940                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1941         else
1942                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1943
1944         if (old_tx_mode != tp->tx_mode)
1945                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1946 }
1947
1948 static void tg3_adjust_link(struct net_device *dev)
1949 {
1950         u8 oldflowctrl, linkmesg = 0;
1951         u32 mac_mode, lcl_adv, rmt_adv;
1952         struct tg3 *tp = netdev_priv(dev);
1953         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1954
1955         spin_lock_bh(&tp->lock);
1956
1957         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1958                                     MAC_MODE_HALF_DUPLEX);
1959
1960         oldflowctrl = tp->link_config.active_flowctrl;
1961
1962         if (phydev->link) {
1963                 lcl_adv = 0;
1964                 rmt_adv = 0;
1965
1966                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1967                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1968                 else if (phydev->speed == SPEED_1000 ||
1969                          tg3_asic_rev(tp) != ASIC_REV_5785)
1970                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1971                 else
1972                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1973
1974                 if (phydev->duplex == DUPLEX_HALF)
1975                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1976                 else {
1977                         lcl_adv = mii_advertise_flowctrl(
1978                                   tp->link_config.flowctrl);
1979
1980                         if (phydev->pause)
1981                                 rmt_adv = LPA_PAUSE_CAP;
1982                         if (phydev->asym_pause)
1983                                 rmt_adv |= LPA_PAUSE_ASYM;
1984                 }
1985
1986                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1987         } else
1988                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1989
1990         if (mac_mode != tp->mac_mode) {
1991                 tp->mac_mode = mac_mode;
1992                 tw32_f(MAC_MODE, tp->mac_mode);
1993                 udelay(40);
1994         }
1995
1996         if (tg3_asic_rev(tp) == ASIC_REV_5785) {
1997                 if (phydev->speed == SPEED_10)
1998                         tw32(MAC_MI_STAT,
1999                              MAC_MI_STAT_10MBPS_MODE |
2000                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2001                 else
2002                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2003         }
2004
2005         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2006                 tw32(MAC_TX_LENGTHS,
2007                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2008                       (6 << TX_LENGTHS_IPG_SHIFT) |
2009                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2010         else
2011                 tw32(MAC_TX_LENGTHS,
2012                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2013                       (6 << TX_LENGTHS_IPG_SHIFT) |
2014                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2015
2016         if (phydev->link != tp->old_link ||
2017             phydev->speed != tp->link_config.active_speed ||
2018             phydev->duplex != tp->link_config.active_duplex ||
2019             oldflowctrl != tp->link_config.active_flowctrl)
2020                 linkmesg = 1;
2021
2022         tp->old_link = phydev->link;
2023         tp->link_config.active_speed = phydev->speed;
2024         tp->link_config.active_duplex = phydev->duplex;
2025
2026         spin_unlock_bh(&tp->lock);
2027
2028         if (linkmesg)
2029                 tg3_link_report(tp);
2030 }
2031
2032 static int tg3_phy_init(struct tg3 *tp)
2033 {
2034         struct phy_device *phydev;
2035
2036         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2037                 return 0;
2038
2039         /* Bring the PHY back to a known state. */
2040         tg3_bmcr_reset(tp);
2041
2042         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2043
2044         /* Attach the MAC to the PHY. */
2045         phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2046                              tg3_adjust_link, phydev->interface);
2047         if (IS_ERR(phydev)) {
2048                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2049                 return PTR_ERR(phydev);
2050         }
2051
2052         /* Mask with MAC supported features. */
2053         switch (phydev->interface) {
2054         case PHY_INTERFACE_MODE_GMII:
2055         case PHY_INTERFACE_MODE_RGMII:
2056                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2057                         phydev->supported &= (PHY_GBIT_FEATURES |
2058                                               SUPPORTED_Pause |
2059                                               SUPPORTED_Asym_Pause);
2060                         break;
2061                 }
2062                 /* fallthru */
2063         case PHY_INTERFACE_MODE_MII:
2064                 phydev->supported &= (PHY_BASIC_FEATURES |
2065                                       SUPPORTED_Pause |
2066                                       SUPPORTED_Asym_Pause);
2067                 break;
2068         default:
2069                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2070                 return -EINVAL;
2071         }
2072
2073         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2074
2075         phydev->advertising = phydev->supported;
2076
2077         return 0;
2078 }
2079
2080 static void tg3_phy_start(struct tg3 *tp)
2081 {
2082         struct phy_device *phydev;
2083
2084         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2085                 return;
2086
2087         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2088
2089         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2090                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2091                 phydev->speed = tp->link_config.speed;
2092                 phydev->duplex = tp->link_config.duplex;
2093                 phydev->autoneg = tp->link_config.autoneg;
2094                 phydev->advertising = tp->link_config.advertising;
2095         }
2096
2097         phy_start(phydev);
2098
2099         phy_start_aneg(phydev);
2100 }
2101
2102 static void tg3_phy_stop(struct tg3 *tp)
2103 {
2104         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2105                 return;
2106
2107         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2108 }
2109
2110 static void tg3_phy_fini(struct tg3 *tp)
2111 {
2112         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2113                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2114                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2115         }
2116 }
2117
2118 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2119 {
2120         int err;
2121         u32 val;
2122
2123         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2124                 return 0;
2125
2126         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2127                 /* Cannot do read-modify-write on 5401 */
2128                 err = tg3_phy_auxctl_write(tp,
2129                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2130                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2131                                            0x4c20);
2132                 goto done;
2133         }
2134
2135         err = tg3_phy_auxctl_read(tp,
2136                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2137         if (err)
2138                 return err;
2139
2140         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2141         err = tg3_phy_auxctl_write(tp,
2142                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2143
2144 done:
2145         return err;
2146 }
2147
2148 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2149 {
2150         u32 phytest;
2151
2152         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2153                 u32 phy;
2154
2155                 tg3_writephy(tp, MII_TG3_FET_TEST,
2156                              phytest | MII_TG3_FET_SHADOW_EN);
2157                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2158                         if (enable)
2159                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2160                         else
2161                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2162                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2163                 }
2164                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2165         }
2166 }
2167
2168 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2169 {
2170         u32 reg;
2171
2172         if (!tg3_flag(tp, 5705_PLUS) ||
2173             (tg3_flag(tp, 5717_PLUS) &&
2174              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2175                 return;
2176
2177         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2178                 tg3_phy_fet_toggle_apd(tp, enable);
2179                 return;
2180         }
2181
2182         reg = MII_TG3_MISC_SHDW_WREN |
2183               MII_TG3_MISC_SHDW_SCR5_SEL |
2184               MII_TG3_MISC_SHDW_SCR5_LPED |
2185               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2186               MII_TG3_MISC_SHDW_SCR5_SDTL |
2187               MII_TG3_MISC_SHDW_SCR5_C125OE;
2188         if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2189                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2190
2191         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2192
2193
2194         reg = MII_TG3_MISC_SHDW_WREN |
2195               MII_TG3_MISC_SHDW_APD_SEL |
2196               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2197         if (enable)
2198                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2199
2200         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2201 }
2202
2203 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2204 {
2205         u32 phy;
2206
2207         if (!tg3_flag(tp, 5705_PLUS) ||
2208             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2209                 return;
2210
2211         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2212                 u32 ephy;
2213
2214                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2215                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2216
2217                         tg3_writephy(tp, MII_TG3_FET_TEST,
2218                                      ephy | MII_TG3_FET_SHADOW_EN);
2219                         if (!tg3_readphy(tp, reg, &phy)) {
2220                                 if (enable)
2221                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2222                                 else
2223                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2224                                 tg3_writephy(tp, reg, phy);
2225                         }
2226                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2227                 }
2228         } else {
2229                 int ret;
2230
2231                 ret = tg3_phy_auxctl_read(tp,
2232                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2233                 if (!ret) {
2234                         if (enable)
2235                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2236                         else
2237                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2238                         tg3_phy_auxctl_write(tp,
2239                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2240                 }
2241         }
2242 }
2243
2244 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2245 {
2246         int ret;
2247         u32 val;
2248
2249         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2250                 return;
2251
2252         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2253         if (!ret)
2254                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2255                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2256 }
2257
2258 static void tg3_phy_apply_otp(struct tg3 *tp)
2259 {
2260         u32 otp, phy;
2261
2262         if (!tp->phy_otp)
2263                 return;
2264
2265         otp = tp->phy_otp;
2266
2267         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2268                 return;
2269
2270         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2271         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2272         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2273
2274         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2275               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2276         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2277
2278         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2279         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2280         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2281
2282         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2283         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2284
2285         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2286         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2287
2288         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2289               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2290         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2291
2292         tg3_phy_toggle_auxctl_smdsp(tp, false);
2293 }
2294
2295 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2296 {
2297         u32 val;
2298
2299         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2300                 return;
2301
2302         tp->setlpicnt = 0;
2303
2304         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2305             current_link_up == 1 &&
2306             tp->link_config.active_duplex == DUPLEX_FULL &&
2307             (tp->link_config.active_speed == SPEED_100 ||
2308              tp->link_config.active_speed == SPEED_1000)) {
2309                 u32 eeectl;
2310
2311                 if (tp->link_config.active_speed == SPEED_1000)
2312                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2313                 else
2314                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2315
2316                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2317
2318                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2319                                   TG3_CL45_D7_EEERES_STAT, &val);
2320
2321                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2322                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2323                         tp->setlpicnt = 2;
2324         }
2325
2326         if (!tp->setlpicnt) {
2327                 if (current_link_up == 1 &&
2328                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2329                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2330                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2331                 }
2332
2333                 val = tr32(TG3_CPMU_EEE_MODE);
2334                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2335         }
2336 }
2337
2338 static void tg3_phy_eee_enable(struct tg3 *tp)
2339 {
2340         u32 val;
2341
2342         if (tp->link_config.active_speed == SPEED_1000 &&
2343             (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2344              tg3_asic_rev(tp) == ASIC_REV_5719 ||
2345              tg3_flag(tp, 57765_CLASS)) &&
2346             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2347                 val = MII_TG3_DSP_TAP26_ALNOKO |
2348                       MII_TG3_DSP_TAP26_RMRXSTO;
2349                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2350                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2351         }
2352
2353         val = tr32(TG3_CPMU_EEE_MODE);
2354         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2355 }
2356
2357 static int tg3_wait_macro_done(struct tg3 *tp)
2358 {
2359         int limit = 100;
2360
2361         while (limit--) {
2362                 u32 tmp32;
2363
2364                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2365                         if ((tmp32 & 0x1000) == 0)
2366                                 break;
2367                 }
2368         }
2369         if (limit < 0)
2370                 return -EBUSY;
2371
2372         return 0;
2373 }
2374
2375 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2376 {
2377         static const u32 test_pat[4][6] = {
2378         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2379         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2380         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2381         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2382         };
2383         int chan;
2384
2385         for (chan = 0; chan < 4; chan++) {
2386                 int i;
2387
2388                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2389                              (chan * 0x2000) | 0x0200);
2390                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2391
2392                 for (i = 0; i < 6; i++)
2393                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2394                                      test_pat[chan][i]);
2395
2396                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2397                 if (tg3_wait_macro_done(tp)) {
2398                         *resetp = 1;
2399                         return -EBUSY;
2400                 }
2401
2402                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2403                              (chan * 0x2000) | 0x0200);
2404                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2405                 if (tg3_wait_macro_done(tp)) {
2406                         *resetp = 1;
2407                         return -EBUSY;
2408                 }
2409
2410                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2411                 if (tg3_wait_macro_done(tp)) {
2412                         *resetp = 1;
2413                         return -EBUSY;
2414                 }
2415
2416                 for (i = 0; i < 6; i += 2) {
2417                         u32 low, high;
2418
2419                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2420                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2421                             tg3_wait_macro_done(tp)) {
2422                                 *resetp = 1;
2423                                 return -EBUSY;
2424                         }
2425                         low &= 0x7fff;
2426                         high &= 0x000f;
2427                         if (low != test_pat[chan][i] ||
2428                             high != test_pat[chan][i+1]) {
2429                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2430                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2431                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2432
2433                                 return -EBUSY;
2434                         }
2435                 }
2436         }
2437
2438         return 0;
2439 }
2440
2441 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2442 {
2443         int chan;
2444
2445         for (chan = 0; chan < 4; chan++) {
2446                 int i;
2447
2448                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2449                              (chan * 0x2000) | 0x0200);
2450                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2451                 for (i = 0; i < 6; i++)
2452                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2453                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2454                 if (tg3_wait_macro_done(tp))
2455                         return -EBUSY;
2456         }
2457
2458         return 0;
2459 }
2460
2461 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2462 {
2463         u32 reg32, phy9_orig;
2464         int retries, do_phy_reset, err;
2465
2466         retries = 10;
2467         do_phy_reset = 1;
2468         do {
2469                 if (do_phy_reset) {
2470                         err = tg3_bmcr_reset(tp);
2471                         if (err)
2472                                 return err;
2473                         do_phy_reset = 0;
2474                 }
2475
2476                 /* Disable transmitter and interrupt.  */
2477                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2478                         continue;
2479
2480                 reg32 |= 0x3000;
2481                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2482
2483                 /* Set full-duplex, 1000 mbps.  */
2484                 tg3_writephy(tp, MII_BMCR,
2485                              BMCR_FULLDPLX | BMCR_SPEED1000);
2486
2487                 /* Set to master mode.  */
2488                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2489                         continue;
2490
2491                 tg3_writephy(tp, MII_CTRL1000,
2492                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2493
2494                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2495                 if (err)
2496                         return err;
2497
2498                 /* Block the PHY control access.  */
2499                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2500
2501                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2502                 if (!err)
2503                         break;
2504         } while (--retries);
2505
2506         err = tg3_phy_reset_chanpat(tp);
2507         if (err)
2508                 return err;
2509
2510         tg3_phydsp_write(tp, 0x8005, 0x0000);
2511
2512         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2513         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2514
2515         tg3_phy_toggle_auxctl_smdsp(tp, false);
2516
2517         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2518
2519         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2520                 reg32 &= ~0x3000;
2521                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2522         } else if (!err)
2523                 err = -EBUSY;
2524
2525         return err;
2526 }
2527
2528 static void tg3_carrier_off(struct tg3 *tp)
2529 {
2530         netif_carrier_off(tp->dev);
2531         tp->link_up = false;
2532 }
2533
2534 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2535 {
2536         if (tg3_flag(tp, ENABLE_ASF))
2537                 netdev_warn(tp->dev,
2538                             "Management side-band traffic will be interrupted during phy settings change\n");
2539 }
2540
2541 /* This will reset the tigon3 PHY if there is no valid
2542  * link unless the FORCE argument is non-zero.
2543  */
2544 static int tg3_phy_reset(struct tg3 *tp)
2545 {
2546         u32 val, cpmuctrl;
2547         int err;
2548
2549         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2550                 val = tr32(GRC_MISC_CFG);
2551                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2552                 udelay(40);
2553         }
2554         err  = tg3_readphy(tp, MII_BMSR, &val);
2555         err |= tg3_readphy(tp, MII_BMSR, &val);
2556         if (err != 0)
2557                 return -EBUSY;
2558
2559         if (netif_running(tp->dev) && tp->link_up) {
2560                 netif_carrier_off(tp->dev);
2561                 tg3_link_report(tp);
2562         }
2563
2564         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2565             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2566             tg3_asic_rev(tp) == ASIC_REV_5705) {
2567                 err = tg3_phy_reset_5703_4_5(tp);
2568                 if (err)
2569                         return err;
2570                 goto out;
2571         }
2572
2573         cpmuctrl = 0;
2574         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2575             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2576                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2577                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2578                         tw32(TG3_CPMU_CTRL,
2579                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2580         }
2581
2582         err = tg3_bmcr_reset(tp);
2583         if (err)
2584                 return err;
2585
2586         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2587                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2588                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2589
2590                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2591         }
2592
2593         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2594             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2595                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2596                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2597                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2598                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2599                         udelay(40);
2600                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2601                 }
2602         }
2603
2604         if (tg3_flag(tp, 5717_PLUS) &&
2605             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2606                 return 0;
2607
2608         tg3_phy_apply_otp(tp);
2609
2610         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2611                 tg3_phy_toggle_apd(tp, true);
2612         else
2613                 tg3_phy_toggle_apd(tp, false);
2614
2615 out:
2616         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2617             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2618                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2619                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2620                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2621         }
2622
2623         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2624                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2625                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2626         }
2627
2628         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2629                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2630                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2631                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2632                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2633                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2634                 }
2635         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2636                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2637                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2638                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2639                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2640                                 tg3_writephy(tp, MII_TG3_TEST1,
2641                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2642                         } else
2643                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2644
2645                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2646                 }
2647         }
2648
2649         /* Set Extended packet length bit (bit 14) on all chips that */
2650         /* support jumbo frames */
2651         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2652                 /* Cannot do read-modify-write on 5401 */
2653                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2654         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2655                 /* Set bit 14 with read-modify-write to preserve other bits */
2656                 err = tg3_phy_auxctl_read(tp,
2657                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2658                 if (!err)
2659                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2660                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2661         }
2662
2663         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2664          * jumbo frames transmission.
2665          */
2666         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2667                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2668                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2669                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2670         }
2671
2672         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2673                 /* adjust output voltage */
2674                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2675         }
2676
2677         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2678                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2679
2680         tg3_phy_toggle_automdix(tp, 1);
2681         tg3_phy_set_wirespeed(tp);
2682         return 0;
2683 }
2684
2685 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2686 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2687 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2688                                           TG3_GPIO_MSG_NEED_VAUX)
2689 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2690         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2691          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2692          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2693          (TG3_GPIO_MSG_DRVR_PRES << 12))
2694
2695 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2696         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2697          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2698          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2699          (TG3_GPIO_MSG_NEED_VAUX << 12))
2700
2701 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2702 {
2703         u32 status, shift;
2704
2705         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2706             tg3_asic_rev(tp) == ASIC_REV_5719)
2707                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2708         else
2709                 status = tr32(TG3_CPMU_DRV_STATUS);
2710
2711         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2712         status &= ~(TG3_GPIO_MSG_MASK << shift);
2713         status |= (newstat << shift);
2714
2715         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2716             tg3_asic_rev(tp) == ASIC_REV_5719)
2717                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2718         else
2719                 tw32(TG3_CPMU_DRV_STATUS, status);
2720
2721         return status >> TG3_APE_GPIO_MSG_SHIFT;
2722 }
2723
2724 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2725 {
2726         if (!tg3_flag(tp, IS_NIC))
2727                 return 0;
2728
2729         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2730             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2731             tg3_asic_rev(tp) == ASIC_REV_5720) {
2732                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2733                         return -EIO;
2734
2735                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2736
2737                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2738                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2739
2740                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2741         } else {
2742                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2743                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2744         }
2745
2746         return 0;
2747 }
2748
2749 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2750 {
2751         u32 grc_local_ctrl;
2752
2753         if (!tg3_flag(tp, IS_NIC) ||
2754             tg3_asic_rev(tp) == ASIC_REV_5700 ||
2755             tg3_asic_rev(tp) == ASIC_REV_5701)
2756                 return;
2757
2758         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2759
2760         tw32_wait_f(GRC_LOCAL_CTRL,
2761                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2762                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2763
2764         tw32_wait_f(GRC_LOCAL_CTRL,
2765                     grc_local_ctrl,
2766                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2767
2768         tw32_wait_f(GRC_LOCAL_CTRL,
2769                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2770                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2771 }
2772
2773 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2774 {
2775         if (!tg3_flag(tp, IS_NIC))
2776                 return;
2777
2778         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2779             tg3_asic_rev(tp) == ASIC_REV_5701) {
2780                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2781                             (GRC_LCLCTRL_GPIO_OE0 |
2782                              GRC_LCLCTRL_GPIO_OE1 |
2783                              GRC_LCLCTRL_GPIO_OE2 |
2784                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2785                              GRC_LCLCTRL_GPIO_OUTPUT1),
2786                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2787         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2788                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2789                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2790                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2791                                      GRC_LCLCTRL_GPIO_OE1 |
2792                                      GRC_LCLCTRL_GPIO_OE2 |
2793                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2794                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2795                                      tp->grc_local_ctrl;
2796                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2797                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2798
2799                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2800                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2801                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2802
2803                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2804                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2805                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2806         } else {
2807                 u32 no_gpio2;
2808                 u32 grc_local_ctrl = 0;
2809
2810                 /* Workaround to prevent overdrawing Amps. */
2811                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2812                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2813                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2814                                     grc_local_ctrl,
2815                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2816                 }
2817
2818                 /* On 5753 and variants, GPIO2 cannot be used. */
2819                 no_gpio2 = tp->nic_sram_data_cfg &
2820                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2821
2822                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2823                                   GRC_LCLCTRL_GPIO_OE1 |
2824                                   GRC_LCLCTRL_GPIO_OE2 |
2825                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2826                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2827                 if (no_gpio2) {
2828                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2829                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2830                 }
2831                 tw32_wait_f(GRC_LOCAL_CTRL,
2832                             tp->grc_local_ctrl | grc_local_ctrl,
2833                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2834
2835                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2836
2837                 tw32_wait_f(GRC_LOCAL_CTRL,
2838                             tp->grc_local_ctrl | grc_local_ctrl,
2839                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2840
2841                 if (!no_gpio2) {
2842                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2843                         tw32_wait_f(GRC_LOCAL_CTRL,
2844                                     tp->grc_local_ctrl | grc_local_ctrl,
2845                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2846                 }
2847         }
2848 }
2849
2850 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2851 {
2852         u32 msg = 0;
2853
2854         /* Serialize power state transitions */
2855         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2856                 return;
2857
2858         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2859                 msg = TG3_GPIO_MSG_NEED_VAUX;
2860
2861         msg = tg3_set_function_status(tp, msg);
2862
2863         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2864                 goto done;
2865
2866         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2867                 tg3_pwrsrc_switch_to_vaux(tp);
2868         else
2869                 tg3_pwrsrc_die_with_vmain(tp);
2870
2871 done:
2872         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2873 }
2874
2875 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2876 {
2877         bool need_vaux = false;
2878
2879         /* The GPIOs do something completely different on 57765. */
2880         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2881                 return;
2882
2883         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2884             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2885             tg3_asic_rev(tp) == ASIC_REV_5720) {
2886                 tg3_frob_aux_power_5717(tp, include_wol ?
2887                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2888                 return;
2889         }
2890
2891         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2892                 struct net_device *dev_peer;
2893
2894                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2895
2896                 /* remove_one() may have been run on the peer. */
2897                 if (dev_peer) {
2898                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2899
2900                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2901                                 return;
2902
2903                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2904                             tg3_flag(tp_peer, ENABLE_ASF))
2905                                 need_vaux = true;
2906                 }
2907         }
2908
2909         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2910             tg3_flag(tp, ENABLE_ASF))
2911                 need_vaux = true;
2912
2913         if (need_vaux)
2914                 tg3_pwrsrc_switch_to_vaux(tp);
2915         else
2916                 tg3_pwrsrc_die_with_vmain(tp);
2917 }
2918
2919 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2920 {
2921         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2922                 return 1;
2923         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2924                 if (speed != SPEED_10)
2925                         return 1;
2926         } else if (speed == SPEED_10)
2927                 return 1;
2928
2929         return 0;
2930 }
2931
2932 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2933 {
2934         u32 val;
2935
2936         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2937                 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
2938                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2939                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2940
2941                         sg_dig_ctrl |=
2942                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2943                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2944                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2945                 }
2946                 return;
2947         }
2948
2949         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2950                 tg3_bmcr_reset(tp);
2951                 val = tr32(GRC_MISC_CFG);
2952                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2953                 udelay(40);
2954                 return;
2955         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2956                 u32 phytest;
2957                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2958                         u32 phy;
2959
2960                         tg3_writephy(tp, MII_ADVERTISE, 0);
2961                         tg3_writephy(tp, MII_BMCR,
2962                                      BMCR_ANENABLE | BMCR_ANRESTART);
2963
2964                         tg3_writephy(tp, MII_TG3_FET_TEST,
2965                                      phytest | MII_TG3_FET_SHADOW_EN);
2966                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2967                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2968                                 tg3_writephy(tp,
2969                                              MII_TG3_FET_SHDW_AUXMODE4,
2970                                              phy);
2971                         }
2972                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2973                 }
2974                 return;
2975         } else if (do_low_power) {
2976                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2977                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2978
2979                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2980                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2981                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2982                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2983         }
2984
2985         /* The PHY should not be powered down on some chips because
2986          * of bugs.
2987          */
2988         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2989             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2990             (tg3_asic_rev(tp) == ASIC_REV_5780 &&
2991              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2992             (tg3_asic_rev(tp) == ASIC_REV_5717 &&
2993              !tp->pci_fn))
2994                 return;
2995
2996         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2997             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2998                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2999                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3000                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3001                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3002         }
3003
3004         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3005 }
3006
3007 /* tp->lock is held. */
3008 static int tg3_nvram_lock(struct tg3 *tp)
3009 {
3010         if (tg3_flag(tp, NVRAM)) {
3011                 int i;
3012
3013                 if (tp->nvram_lock_cnt == 0) {
3014                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3015                         for (i = 0; i < 8000; i++) {
3016                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3017                                         break;
3018                                 udelay(20);
3019                         }
3020                         if (i == 8000) {
3021                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3022                                 return -ENODEV;
3023                         }
3024                 }
3025                 tp->nvram_lock_cnt++;
3026         }
3027         return 0;
3028 }
3029
3030 /* tp->lock is held. */
3031 static void tg3_nvram_unlock(struct tg3 *tp)
3032 {
3033         if (tg3_flag(tp, NVRAM)) {
3034                 if (tp->nvram_lock_cnt > 0)
3035                         tp->nvram_lock_cnt--;
3036                 if (tp->nvram_lock_cnt == 0)
3037                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3038         }
3039 }
3040
3041 /* tp->lock is held. */
3042 static void tg3_enable_nvram_access(struct tg3 *tp)
3043 {
3044         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3045                 u32 nvaccess = tr32(NVRAM_ACCESS);
3046
3047                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3048         }
3049 }
3050
3051 /* tp->lock is held. */
3052 static void tg3_disable_nvram_access(struct tg3 *tp)
3053 {
3054         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3055                 u32 nvaccess = tr32(NVRAM_ACCESS);
3056
3057                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3058         }
3059 }
3060
3061 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3062                                         u32 offset, u32 *val)
3063 {
3064         u32 tmp;
3065         int i;
3066
3067         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3068                 return -EINVAL;
3069
3070         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3071                                         EEPROM_ADDR_DEVID_MASK |
3072                                         EEPROM_ADDR_READ);
3073         tw32(GRC_EEPROM_ADDR,
3074              tmp |
3075              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3076              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3077               EEPROM_ADDR_ADDR_MASK) |
3078              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3079
3080         for (i = 0; i < 1000; i++) {
3081                 tmp = tr32(GRC_EEPROM_ADDR);
3082
3083                 if (tmp & EEPROM_ADDR_COMPLETE)
3084                         break;
3085                 msleep(1);
3086         }
3087         if (!(tmp & EEPROM_ADDR_COMPLETE))
3088                 return -EBUSY;
3089
3090         tmp = tr32(GRC_EEPROM_DATA);
3091
3092         /*
3093          * The data will always be opposite the native endian
3094          * format.  Perform a blind byteswap to compensate.
3095          */
3096         *val = swab32(tmp);
3097
3098         return 0;
3099 }
3100
3101 #define NVRAM_CMD_TIMEOUT 10000
3102
3103 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3104 {
3105         int i;
3106
3107         tw32(NVRAM_CMD, nvram_cmd);
3108         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3109                 udelay(10);
3110                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3111                         udelay(10);
3112                         break;
3113                 }
3114         }
3115
3116         if (i == NVRAM_CMD_TIMEOUT)
3117                 return -EBUSY;
3118
3119         return 0;
3120 }
3121
3122 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3123 {
3124         if (tg3_flag(tp, NVRAM) &&
3125             tg3_flag(tp, NVRAM_BUFFERED) &&
3126             tg3_flag(tp, FLASH) &&
3127             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3128             (tp->nvram_jedecnum == JEDEC_ATMEL))
3129
3130                 addr = ((addr / tp->nvram_pagesize) <<
3131                         ATMEL_AT45DB0X1B_PAGE_POS) +
3132                        (addr % tp->nvram_pagesize);
3133
3134         return addr;
3135 }
3136
3137 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3138 {
3139         if (tg3_flag(tp, NVRAM) &&
3140             tg3_flag(tp, NVRAM_BUFFERED) &&
3141             tg3_flag(tp, FLASH) &&
3142             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3143             (tp->nvram_jedecnum == JEDEC_ATMEL))
3144
3145                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3146                         tp->nvram_pagesize) +
3147                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3148
3149         return addr;
3150 }
3151
3152 /* NOTE: Data read in from NVRAM is byteswapped according to
3153  * the byteswapping settings for all other register accesses.
3154  * tg3 devices are BE devices, so on a BE machine, the data
3155  * returned will be exactly as it is seen in NVRAM.  On a LE
3156  * machine, the 32-bit value will be byteswapped.
3157  */
3158 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3159 {
3160         int ret;
3161
3162         if (!tg3_flag(tp, NVRAM))
3163                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3164
3165         offset = tg3_nvram_phys_addr(tp, offset);
3166
3167         if (offset > NVRAM_ADDR_MSK)
3168                 return -EINVAL;
3169
3170         ret = tg3_nvram_lock(tp);
3171         if (ret)
3172                 return ret;
3173
3174         tg3_enable_nvram_access(tp);
3175
3176         tw32(NVRAM_ADDR, offset);
3177         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3178                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3179
3180         if (ret == 0)
3181                 *val = tr32(NVRAM_RDDATA);
3182
3183         tg3_disable_nvram_access(tp);
3184
3185         tg3_nvram_unlock(tp);
3186
3187         return ret;
3188 }
3189
3190 /* Ensures NVRAM data is in bytestream format. */
3191 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3192 {
3193         u32 v;
3194         int res = tg3_nvram_read(tp, offset, &v);
3195         if (!res)
3196                 *val = cpu_to_be32(v);
3197         return res;
3198 }
3199
3200 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3201                                     u32 offset, u32 len, u8 *buf)
3202 {
3203         int i, j, rc = 0;
3204         u32 val;
3205
3206         for (i = 0; i < len; i += 4) {
3207                 u32 addr;
3208                 __be32 data;
3209
3210                 addr = offset + i;
3211
3212                 memcpy(&data, buf + i, 4);
3213
3214                 /*
3215                  * The SEEPROM interface expects the data to always be opposite
3216                  * the native endian format.  We accomplish this by reversing
3217                  * all the operations that would have been performed on the
3218                  * data from a call to tg3_nvram_read_be32().
3219                  */
3220                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3221
3222                 val = tr32(GRC_EEPROM_ADDR);
3223                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3224
3225                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3226                         EEPROM_ADDR_READ);
3227                 tw32(GRC_EEPROM_ADDR, val |
3228                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3229                         (addr & EEPROM_ADDR_ADDR_MASK) |
3230                         EEPROM_ADDR_START |
3231                         EEPROM_ADDR_WRITE);
3232
3233                 for (j = 0; j < 1000; j++) {
3234                         val = tr32(GRC_EEPROM_ADDR);
3235
3236                         if (val & EEPROM_ADDR_COMPLETE)
3237                                 break;
3238                         msleep(1);
3239                 }
3240                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3241                         rc = -EBUSY;
3242                         break;
3243                 }
3244         }
3245
3246         return rc;
3247 }
3248
3249 /* offset and length are dword aligned */
3250 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3251                 u8 *buf)
3252 {
3253         int ret = 0;
3254         u32 pagesize = tp->nvram_pagesize;
3255         u32 pagemask = pagesize - 1;
3256         u32 nvram_cmd;
3257         u8 *tmp;
3258
3259         tmp = kmalloc(pagesize, GFP_KERNEL);
3260         if (tmp == NULL)
3261                 return -ENOMEM;
3262
3263         while (len) {
3264                 int j;
3265                 u32 phy_addr, page_off, size;
3266
3267                 phy_addr = offset & ~pagemask;
3268
3269                 for (j = 0; j < pagesize; j += 4) {
3270                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3271                                                   (__be32 *) (tmp + j));
3272                         if (ret)
3273                                 break;
3274                 }
3275                 if (ret)
3276                         break;
3277
3278                 page_off = offset & pagemask;
3279                 size = pagesize;
3280                 if (len < size)
3281                         size = len;
3282
3283                 len -= size;
3284
3285                 memcpy(tmp + page_off, buf, size);
3286
3287                 offset = offset + (pagesize - page_off);
3288
3289                 tg3_enable_nvram_access(tp);
3290
3291                 /*
3292                  * Before we can erase the flash page, we need
3293                  * to issue a special "write enable" command.
3294                  */
3295                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3296
3297                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3298                         break;
3299
3300                 /* Erase the target page */
3301                 tw32(NVRAM_ADDR, phy_addr);
3302
3303                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3304                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3305
3306                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3307                         break;
3308
3309                 /* Issue another write enable to start the write. */
3310                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3311
3312                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3313                         break;
3314
3315                 for (j = 0; j < pagesize; j += 4) {
3316                         __be32 data;
3317
3318                         data = *((__be32 *) (tmp + j));
3319
3320                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3321
3322                         tw32(NVRAM_ADDR, phy_addr + j);
3323
3324                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3325                                 NVRAM_CMD_WR;
3326
3327                         if (j == 0)
3328                                 nvram_cmd |= NVRAM_CMD_FIRST;
3329                         else if (j == (pagesize - 4))
3330                                 nvram_cmd |= NVRAM_CMD_LAST;
3331
3332                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3333                         if (ret)
3334                                 break;
3335                 }
3336                 if (ret)
3337                         break;
3338         }
3339
3340         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3341         tg3_nvram_exec_cmd(tp, nvram_cmd);
3342
3343         kfree(tmp);
3344
3345         return ret;
3346 }
3347
3348 /* offset and length are dword aligned */
3349 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3350                 u8 *buf)
3351 {
3352         int i, ret = 0;
3353
3354         for (i = 0; i < len; i += 4, offset += 4) {
3355                 u32 page_off, phy_addr, nvram_cmd;
3356                 __be32 data;
3357
3358                 memcpy(&data, buf + i, 4);
3359                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3360
3361                 page_off = offset % tp->nvram_pagesize;
3362
3363                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3364
3365                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3366
3367                 if (page_off == 0 || i == 0)
3368                         nvram_cmd |= NVRAM_CMD_FIRST;
3369                 if (page_off == (tp->nvram_pagesize - 4))
3370                         nvram_cmd |= NVRAM_CMD_LAST;
3371
3372                 if (i == (len - 4))
3373                         nvram_cmd |= NVRAM_CMD_LAST;
3374
3375                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3376                     !tg3_flag(tp, FLASH) ||
3377                     !tg3_flag(tp, 57765_PLUS))
3378                         tw32(NVRAM_ADDR, phy_addr);
3379
3380                 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3381                     !tg3_flag(tp, 5755_PLUS) &&
3382                     (tp->nvram_jedecnum == JEDEC_ST) &&
3383                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3384                         u32 cmd;
3385
3386                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3387                         ret = tg3_nvram_exec_cmd(tp, cmd);
3388                         if (ret)
3389                                 break;
3390                 }
3391                 if (!tg3_flag(tp, FLASH)) {
3392                         /* We always do complete word writes to eeprom. */
3393                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3394                 }
3395
3396                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3397                 if (ret)
3398                         break;
3399         }
3400         return ret;
3401 }
3402
3403 /* offset and length are dword aligned */
3404 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3405 {
3406         int ret;
3407
3408         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3409                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3410                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3411                 udelay(40);
3412         }
3413
3414         if (!tg3_flag(tp, NVRAM)) {
3415                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3416         } else {
3417                 u32 grc_mode;
3418
3419                 ret = tg3_nvram_lock(tp);
3420                 if (ret)
3421                         return ret;
3422
3423                 tg3_enable_nvram_access(tp);
3424                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3425                         tw32(NVRAM_WRITE1, 0x406);
3426
3427                 grc_mode = tr32(GRC_MODE);
3428                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3429
3430                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3431                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3432                                 buf);
3433                 } else {
3434                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3435                                 buf);
3436                 }
3437
3438                 grc_mode = tr32(GRC_MODE);
3439                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3440
3441                 tg3_disable_nvram_access(tp);
3442                 tg3_nvram_unlock(tp);
3443         }
3444
3445         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3446                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3447                 udelay(40);
3448         }
3449
3450         return ret;
3451 }
3452
3453 #define RX_CPU_SCRATCH_BASE     0x30000
3454 #define RX_CPU_SCRATCH_SIZE     0x04000
3455 #define TX_CPU_SCRATCH_BASE     0x34000
3456 #define TX_CPU_SCRATCH_SIZE     0x04000
3457
3458 /* tp->lock is held. */
3459 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3460 {
3461         int i;
3462         const int iters = 10000;
3463
3464         for (i = 0; i < iters; i++) {
3465                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3466                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3467                 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3468                         break;
3469         }
3470
3471         return (i == iters) ? -EBUSY : 0;
3472 }
3473
3474 /* tp->lock is held. */
3475 static int tg3_rxcpu_pause(struct tg3 *tp)
3476 {
3477         int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3478
3479         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3480         tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3481         udelay(10);
3482
3483         return rc;
3484 }
3485
3486 /* tp->lock is held. */
3487 static int tg3_txcpu_pause(struct tg3 *tp)
3488 {
3489         return tg3_pause_cpu(tp, TX_CPU_BASE);
3490 }
3491
3492 /* tp->lock is held. */
3493 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3494 {
3495         tw32(cpu_base + CPU_STATE, 0xffffffff);
3496         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3497 }
3498
3499 /* tp->lock is held. */
3500 static void tg3_rxcpu_resume(struct tg3 *tp)
3501 {
3502         tg3_resume_cpu(tp, RX_CPU_BASE);
3503 }
3504
3505 /* tp->lock is held. */
3506 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3507 {
3508         int rc;
3509
3510         BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3511
3512         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3513                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3514
3515                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3516                 return 0;
3517         }
3518         if (cpu_base == RX_CPU_BASE) {
3519                 rc = tg3_rxcpu_pause(tp);
3520         } else {
3521                 /*
3522                  * There is only an Rx CPU for the 5750 derivative in the
3523                  * BCM4785.
3524                  */
3525                 if (tg3_flag(tp, IS_SSB_CORE))
3526                         return 0;
3527
3528                 rc = tg3_txcpu_pause(tp);
3529         }
3530
3531         if (rc) {
3532                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3533                            __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3534                 return -ENODEV;
3535         }
3536
3537         /* Clear firmware's nvram arbitration. */
3538         if (tg3_flag(tp, NVRAM))
3539                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3540         return 0;
3541 }
3542
3543 static int tg3_fw_data_len(struct tg3 *tp,
3544                            const struct tg3_firmware_hdr *fw_hdr)
3545 {
3546         int fw_len;
3547
3548         /* Non fragmented firmware have one firmware header followed by a
3549          * contiguous chunk of data to be written. The length field in that
3550          * header is not the length of data to be written but the complete
3551          * length of the bss. The data length is determined based on
3552          * tp->fw->size minus headers.
3553          *
3554          * Fragmented firmware have a main header followed by multiple
3555          * fragments. Each fragment is identical to non fragmented firmware
3556          * with a firmware header followed by a contiguous chunk of data. In
3557          * the main header, the length field is unused and set to 0xffffffff.
3558          * In each fragment header the length is the entire size of that
3559          * fragment i.e. fragment data + header length. Data length is
3560          * therefore length field in the header minus TG3_FW_HDR_LEN.
3561          */
3562         if (tp->fw_len == 0xffffffff)
3563                 fw_len = be32_to_cpu(fw_hdr->len);
3564         else
3565                 fw_len = tp->fw->size;
3566
3567         return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3568 }
3569
3570 /* tp->lock is held. */
3571 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3572                                  u32 cpu_scratch_base, int cpu_scratch_size,
3573                                  const struct tg3_firmware_hdr *fw_hdr)
3574 {
3575         int err, i;
3576         void (*write_op)(struct tg3 *, u32, u32);
3577         int total_len = tp->fw->size;
3578
3579         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3580                 netdev_err(tp->dev,
3581                            "%s: Trying to load TX cpu firmware which is 5705\n",
3582                            __func__);
3583                 return -EINVAL;
3584         }
3585
3586         if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3587                 write_op = tg3_write_mem;
3588         else
3589                 write_op = tg3_write_indirect_reg32;
3590
3591         if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3592                 /* It is possible that bootcode is still loading at this point.
3593                  * Get the nvram lock first before halting the cpu.
3594                  */
3595                 int lock_err = tg3_nvram_lock(tp);
3596                 err = tg3_halt_cpu(tp, cpu_base);
3597                 if (!lock_err)
3598                         tg3_nvram_unlock(tp);
3599                 if (err)
3600                         goto out;
3601
3602                 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3603                         write_op(tp, cpu_scratch_base + i, 0);
3604                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3605                 tw32(cpu_base + CPU_MODE,
3606                      tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3607         } else {
3608                 /* Subtract additional main header for fragmented firmware and
3609                  * advance to the first fragment
3610                  */
3611                 total_len -= TG3_FW_HDR_LEN;
3612                 fw_hdr++;
3613         }
3614
3615         do {
3616                 u32 *fw_data = (u32 *)(fw_hdr + 1);
3617                 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3618                         write_op(tp, cpu_scratch_base +
3619                                      (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3620                                      (i * sizeof(u32)),
3621                                  be32_to_cpu(fw_data[i]));
3622
3623                 total_len -= be32_to_cpu(fw_hdr->len);
3624
3625                 /* Advance to next fragment */
3626                 fw_hdr = (struct tg3_firmware_hdr *)
3627                          ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3628         } while (total_len > 0);
3629
3630         err = 0;
3631
3632 out:
3633         return err;
3634 }
3635
3636 /* tp->lock is held. */
3637 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3638 {
3639         int i;
3640         const int iters = 5;
3641
3642         tw32(cpu_base + CPU_STATE, 0xffffffff);
3643         tw32_f(cpu_base + CPU_PC, pc);
3644
3645         for (i = 0; i < iters; i++) {
3646                 if (tr32(cpu_base + CPU_PC) == pc)
3647                         break;
3648                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3649                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3650                 tw32_f(cpu_base + CPU_PC, pc);
3651                 udelay(1000);
3652         }
3653
3654         return (i == iters) ? -EBUSY : 0;
3655 }
3656
3657 /* tp->lock is held. */
3658 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3659 {
3660         const struct tg3_firmware_hdr *fw_hdr;
3661         int err;
3662
3663         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3664
3665         /* Firmware blob starts with version numbers, followed by
3666            start address and length. We are setting complete length.
3667            length = end_address_of_bss - start_address_of_text.
3668            Remainder is the blob to be loaded contiguously
3669            from start address. */
3670
3671         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3672                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3673                                     fw_hdr);
3674         if (err)
3675                 return err;
3676
3677         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3678                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3679                                     fw_hdr);
3680         if (err)
3681                 return err;
3682
3683         /* Now startup only the RX cpu. */
3684         err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3685                                        be32_to_cpu(fw_hdr->base_addr));
3686         if (err) {
3687                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3688                            "should be %08x\n", __func__,
3689                            tr32(RX_CPU_BASE + CPU_PC),
3690                                 be32_to_cpu(fw_hdr->base_addr));
3691                 return -ENODEV;
3692         }
3693
3694         tg3_rxcpu_resume(tp);
3695
3696         return 0;
3697 }
3698
3699 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3700 {
3701         const int iters = 1000;
3702         int i;
3703         u32 val;
3704
3705         /* Wait for boot code to complete initialization and enter service
3706          * loop. It is then safe to download service patches
3707          */
3708         for (i = 0; i < iters; i++) {
3709                 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3710                         break;
3711
3712                 udelay(10);
3713         }
3714
3715         if (i == iters) {
3716                 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3717                 return -EBUSY;
3718         }
3719
3720         val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3721         if (val & 0xff) {
3722                 netdev_warn(tp->dev,
3723                             "Other patches exist. Not downloading EEE patch\n");
3724                 return -EEXIST;
3725         }
3726
3727         return 0;
3728 }
3729
3730 /* tp->lock is held. */
3731 static void tg3_load_57766_firmware(struct tg3 *tp)
3732 {
3733         struct tg3_firmware_hdr *fw_hdr;
3734
3735         if (!tg3_flag(tp, NO_NVRAM))
3736                 return;
3737
3738         if (tg3_validate_rxcpu_state(tp))
3739                 return;
3740
3741         if (!tp->fw)
3742                 return;
3743
3744         /* This firmware blob has a different format than older firmware
3745          * releases as given below. The main difference is we have fragmented
3746          * data to be written to non-contiguous locations.
3747          *
3748          * In the beginning we have a firmware header identical to other
3749          * firmware which consists of version, base addr and length. The length
3750          * here is unused and set to 0xffffffff.
3751          *
3752          * This is followed by a series of firmware fragments which are
3753          * individually identical to previous firmware. i.e. they have the
3754          * firmware header and followed by data for that fragment. The version
3755          * field of the individual fragment header is unused.
3756          */
3757
3758         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3759         if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3760                 return;
3761
3762         if (tg3_rxcpu_pause(tp))
3763                 return;
3764
3765         /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3766         tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3767
3768         tg3_rxcpu_resume(tp);
3769 }
3770
3771 /* tp->lock is held. */
3772 static int tg3_load_tso_firmware(struct tg3 *tp)
3773 {
3774         const struct tg3_firmware_hdr *fw_hdr;
3775         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3776         int err;
3777
3778         if (!tg3_flag(tp, FW_TSO))
3779                 return 0;
3780
3781         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3782
3783         /* Firmware blob starts with version numbers, followed by
3784            start address and length. We are setting complete length.
3785            length = end_address_of_bss - start_address_of_text.
3786            Remainder is the blob to be loaded contiguously
3787            from start address. */
3788
3789         cpu_scratch_size = tp->fw_len;
3790
3791         if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3792                 cpu_base = RX_CPU_BASE;
3793                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3794         } else {
3795                 cpu_base = TX_CPU_BASE;
3796                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3797                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3798         }
3799
3800         err = tg3_load_firmware_cpu(tp, cpu_base,
3801                                     cpu_scratch_base, cpu_scratch_size,
3802                                     fw_hdr);
3803         if (err)
3804                 return err;
3805
3806         /* Now startup the cpu. */
3807         err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3808                                        be32_to_cpu(fw_hdr->base_addr));
3809         if (err) {
3810                 netdev_err(tp->dev,
3811                            "%s fails to set CPU PC, is %08x should be %08x\n",
3812                            __func__, tr32(cpu_base + CPU_PC),
3813                            be32_to_cpu(fw_hdr->base_addr));
3814                 return -ENODEV;
3815         }
3816
3817         tg3_resume_cpu(tp, cpu_base);
3818         return 0;
3819 }
3820
3821
3822 /* tp->lock is held. */
3823 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3824 {
3825         u32 addr_high, addr_low;
3826         int i;
3827
3828         addr_high = ((tp->dev->dev_addr[0] << 8) |
3829                      tp->dev->dev_addr[1]);
3830         addr_low = ((tp->dev->dev_addr[2] << 24) |
3831                     (tp->dev->dev_addr[3] << 16) |
3832                     (tp->dev->dev_addr[4] <<  8) |
3833                     (tp->dev->dev_addr[5] <<  0));
3834         for (i = 0; i < 4; i++) {
3835                 if (i == 1 && skip_mac_1)
3836                         continue;
3837                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3838                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3839         }
3840
3841         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3842             tg3_asic_rev(tp) == ASIC_REV_5704) {
3843                 for (i = 0; i < 12; i++) {
3844                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3845                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3846                 }
3847         }
3848
3849         addr_high = (tp->dev->dev_addr[0] +
3850                      tp->dev->dev_addr[1] +
3851                      tp->dev->dev_addr[2] +
3852                      tp->dev->dev_addr[3] +
3853                      tp->dev->dev_addr[4] +
3854                      tp->dev->dev_addr[5]) &
3855                 TX_BACKOFF_SEED_MASK;
3856         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3857 }
3858
3859 static void tg3_enable_register_access(struct tg3 *tp)
3860 {
3861         /*
3862          * Make sure register accesses (indirect or otherwise) will function
3863          * correctly.
3864          */
3865         pci_write_config_dword(tp->pdev,
3866                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3867 }
3868
3869 static int tg3_power_up(struct tg3 *tp)
3870 {
3871         int err;
3872
3873         tg3_enable_register_access(tp);
3874
3875         err = pci_set_power_state(tp->pdev, PCI_D0);
3876         if (!err) {
3877                 /* Switch out of Vaux if it is a NIC */
3878                 tg3_pwrsrc_switch_to_vmain(tp);
3879         } else {
3880                 netdev_err(tp->dev, "Transition to D0 failed\n");
3881         }
3882
3883         return err;
3884 }
3885
3886 static int tg3_setup_phy(struct tg3 *, int);
3887
3888 static int tg3_power_down_prepare(struct tg3 *tp)
3889 {
3890         u32 misc_host_ctrl;
3891         bool device_should_wake, do_low_power;
3892
3893         tg3_enable_register_access(tp);
3894
3895         /* Restore the CLKREQ setting. */
3896         if (tg3_flag(tp, CLKREQ_BUG))
3897                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3898                                          PCI_EXP_LNKCTL_CLKREQ_EN);
3899
3900         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3901         tw32(TG3PCI_MISC_HOST_CTRL,
3902              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3903
3904         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3905                              tg3_flag(tp, WOL_ENABLE);
3906
3907         if (tg3_flag(tp, USE_PHYLIB)) {
3908                 do_low_power = false;
3909                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3910                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3911                         struct phy_device *phydev;
3912                         u32 phyid, advertising;
3913
3914                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3915
3916                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3917
3918                         tp->link_config.speed = phydev->speed;
3919                         tp->link_config.duplex = phydev->duplex;
3920                         tp->link_config.autoneg = phydev->autoneg;
3921                         tp->link_config.advertising = phydev->advertising;
3922
3923                         advertising = ADVERTISED_TP |
3924                                       ADVERTISED_Pause |
3925                                       ADVERTISED_Autoneg |
3926                                       ADVERTISED_10baseT_Half;
3927
3928                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3929                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3930                                         advertising |=
3931                                                 ADVERTISED_100baseT_Half |
3932                                                 ADVERTISED_100baseT_Full |
3933                                                 ADVERTISED_10baseT_Full;
3934                                 else
3935                                         advertising |= ADVERTISED_10baseT_Full;
3936                         }
3937
3938                         phydev->advertising = advertising;
3939
3940                         phy_start_aneg(phydev);
3941
3942                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3943                         if (phyid != PHY_ID_BCMAC131) {
3944                                 phyid &= PHY_BCM_OUI_MASK;
3945                                 if (phyid == PHY_BCM_OUI_1 ||
3946                                     phyid == PHY_BCM_OUI_2 ||
3947                                     phyid == PHY_BCM_OUI_3)
3948                                         do_low_power = true;
3949                         }
3950                 }
3951         } else {
3952                 do_low_power = true;
3953
3954                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3955                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3956
3957                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3958                         tg3_setup_phy(tp, 0);
3959         }
3960
3961         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3962                 u32 val;
3963
3964                 val = tr32(GRC_VCPU_EXT_CTRL);
3965                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3966         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3967                 int i;
3968                 u32 val;
3969
3970                 for (i = 0; i < 200; i++) {
3971                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3972                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3973                                 break;
3974                         msleep(1);
3975                 }
3976         }
3977         if (tg3_flag(tp, WOL_CAP))
3978                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3979                                                      WOL_DRV_STATE_SHUTDOWN |
3980                                                      WOL_DRV_WOL |
3981                                                      WOL_SET_MAGIC_PKT);
3982
3983         if (device_should_wake) {
3984                 u32 mac_mode;
3985
3986                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3987                         if (do_low_power &&
3988                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3989                                 tg3_phy_auxctl_write(tp,
3990                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3991                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3992                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3993                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3994                                 udelay(40);
3995                         }
3996
3997                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3998                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3999                         else
4000                                 mac_mode = MAC_MODE_PORT_MODE_MII;
4001
4002                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4003                         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4004                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4005                                              SPEED_100 : SPEED_10;
4006                                 if (tg3_5700_link_polarity(tp, speed))
4007                                         mac_mode |= MAC_MODE_LINK_POLARITY;
4008                                 else
4009                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
4010                         }
4011                 } else {
4012                         mac_mode = MAC_MODE_PORT_MODE_TBI;
4013                 }
4014
4015                 if (!tg3_flag(tp, 5750_PLUS))
4016                         tw32(MAC_LED_CTRL, tp->led_ctrl);
4017
4018                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4019                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4020                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4021                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4022
4023                 if (tg3_flag(tp, ENABLE_APE))
4024                         mac_mode |= MAC_MODE_APE_TX_EN |
4025                                     MAC_MODE_APE_RX_EN |
4026                                     MAC_MODE_TDE_ENABLE;
4027
4028                 tw32_f(MAC_MODE, mac_mode);
4029                 udelay(100);
4030
4031                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4032                 udelay(10);
4033         }
4034
4035         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4036             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4037              tg3_asic_rev(tp) == ASIC_REV_5701)) {
4038                 u32 base_val;
4039
4040                 base_val = tp->pci_clock_ctrl;
4041                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4042                              CLOCK_CTRL_TXCLK_DISABLE);
4043
4044                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4045                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
4046         } else if (tg3_flag(tp, 5780_CLASS) ||
4047                    tg3_flag(tp, CPMU_PRESENT) ||
4048                    tg3_asic_rev(tp) == ASIC_REV_5906) {
4049                 /* do nothing */
4050         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4051                 u32 newbits1, newbits2;
4052
4053                 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4054                     tg3_asic_rev(tp) == ASIC_REV_5701) {
4055                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4056                                     CLOCK_CTRL_TXCLK_DISABLE |
4057                                     CLOCK_CTRL_ALTCLK);
4058                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4059                 } else if (tg3_flag(tp, 5705_PLUS)) {
4060                         newbits1 = CLOCK_CTRL_625_CORE;
4061                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4062                 } else {
4063                         newbits1 = CLOCK_CTRL_ALTCLK;
4064                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4065                 }
4066
4067                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4068                             40);
4069
4070                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4071                             40);
4072
4073                 if (!tg3_flag(tp, 5705_PLUS)) {
4074                         u32 newbits3;
4075
4076                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4077                             tg3_asic_rev(tp) == ASIC_REV_5701) {
4078                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4079                                             CLOCK_CTRL_TXCLK_DISABLE |
4080                                             CLOCK_CTRL_44MHZ_CORE);
4081                         } else {
4082                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4083                         }
4084
4085                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
4086                                     tp->pci_clock_ctrl | newbits3, 40);
4087                 }
4088         }
4089
4090         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4091                 tg3_power_down_phy(tp, do_low_power);
4092
4093         tg3_frob_aux_power(tp, true);
4094
4095         /* Workaround for unstable PLL clock */
4096         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4097             ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4098              (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4099                 u32 val = tr32(0x7d00);
4100
4101                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4102                 tw32(0x7d00, val);
4103                 if (!tg3_flag(tp, ENABLE_ASF)) {
4104                         int err;
4105
4106                         err = tg3_nvram_lock(tp);
4107                         tg3_halt_cpu(tp, RX_CPU_BASE);
4108                         if (!err)
4109                                 tg3_nvram_unlock(tp);
4110                 }
4111         }
4112
4113         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4114
4115         return 0;
4116 }
4117
4118 static void tg3_power_down(struct tg3 *tp)
4119 {
4120         tg3_power_down_prepare(tp);
4121
4122         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4123         pci_set_power_state(tp->pdev, PCI_D3hot);
4124 }
4125
4126 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4127 {
4128         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4129         case MII_TG3_AUX_STAT_10HALF:
4130                 *speed = SPEED_10;
4131                 *duplex = DUPLEX_HALF;
4132                 break;
4133
4134         case MII_TG3_AUX_STAT_10FULL:
4135                 *speed = SPEED_10;
4136                 *duplex = DUPLEX_FULL;
4137                 break;
4138
4139         case MII_TG3_AUX_STAT_100HALF:
4140                 *speed = SPEED_100;
4141                 *duplex = DUPLEX_HALF;
4142                 break;
4143
4144         case MII_TG3_AUX_STAT_100FULL:
4145                 *speed = SPEED_100;
4146                 *duplex = DUPLEX_FULL;
4147                 break;
4148
4149         case MII_TG3_AUX_STAT_1000HALF:
4150                 *speed = SPEED_1000;
4151                 *duplex = DUPLEX_HALF;
4152                 break;
4153
4154         case MII_TG3_AUX_STAT_1000FULL:
4155                 *speed = SPEED_1000;
4156                 *duplex = DUPLEX_FULL;
4157                 break;
4158
4159         default:
4160                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4161                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4162                                  SPEED_10;
4163                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4164                                   DUPLEX_HALF;
4165                         break;
4166                 }
4167                 *speed = SPEED_UNKNOWN;
4168                 *duplex = DUPLEX_UNKNOWN;
4169                 break;
4170         }
4171 }
4172
4173 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4174 {
4175         int err = 0;
4176         u32 val, new_adv;
4177
4178         new_adv = ADVERTISE_CSMA;
4179         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4180         new_adv |= mii_advertise_flowctrl(flowctrl);
4181
4182         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4183         if (err)
4184                 goto done;
4185
4186         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4187                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4188
4189                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4190                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4191                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4192
4193                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4194                 if (err)
4195                         goto done;
4196         }
4197
4198         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4199                 goto done;
4200
4201         tw32(TG3_CPMU_EEE_MODE,
4202              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4203
4204         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4205         if (!err) {
4206                 u32 err2;
4207
4208                 val = 0;
4209                 /* Advertise 100-BaseTX EEE ability */
4210                 if (advertise & ADVERTISED_100baseT_Full)
4211                         val |= MDIO_AN_EEE_ADV_100TX;
4212                 /* Advertise 1000-BaseT EEE ability */
4213                 if (advertise & ADVERTISED_1000baseT_Full)
4214                         val |= MDIO_AN_EEE_ADV_1000T;
4215                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4216                 if (err)
4217                         val = 0;
4218
4219                 switch (tg3_asic_rev(tp)) {
4220                 case ASIC_REV_5717:
4221                 case ASIC_REV_57765:
4222                 case ASIC_REV_57766:
4223                 case ASIC_REV_5719:
4224                         /* If we advertised any eee advertisements above... */
4225                         if (val)
4226                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4227                                       MII_TG3_DSP_TAP26_RMRXSTO |
4228                                       MII_TG3_DSP_TAP26_OPCSINPT;
4229                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4230                         /* Fall through */
4231                 case ASIC_REV_5720:
4232                 case ASIC_REV_5762:
4233                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4234                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4235                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4236                 }
4237
4238                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4239                 if (!err)
4240                         err = err2;
4241         }
4242
4243 done:
4244         return err;
4245 }
4246
4247 static void tg3_phy_copper_begin(struct tg3 *tp)
4248 {
4249         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4250             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4251                 u32 adv, fc;
4252
4253                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4254                         adv = ADVERTISED_10baseT_Half |
4255                               ADVERTISED_10baseT_Full;
4256                         if (tg3_flag(tp, WOL_SPEED_100MB))
4257                                 adv |= ADVERTISED_100baseT_Half |
4258                                        ADVERTISED_100baseT_Full;
4259
4260                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4261                 } else {
4262                         adv = tp->link_config.advertising;
4263                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4264                                 adv &= ~(ADVERTISED_1000baseT_Half |
4265                                          ADVERTISED_1000baseT_Full);
4266
4267                         fc = tp->link_config.flowctrl;
4268                 }
4269
4270                 tg3_phy_autoneg_cfg(tp, adv, fc);
4271
4272                 tg3_writephy(tp, MII_BMCR,
4273                              BMCR_ANENABLE | BMCR_ANRESTART);
4274         } else {
4275                 int i;
4276                 u32 bmcr, orig_bmcr;
4277
4278                 tp->link_config.active_speed = tp->link_config.speed;
4279                 tp->link_config.active_duplex = tp->link_config.duplex;
4280
4281                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4282                         /* With autoneg disabled, 5715 only links up when the
4283                          * advertisement register has the configured speed
4284                          * enabled.
4285                          */
4286                         tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4287                 }
4288
4289                 bmcr = 0;
4290                 switch (tp->link_config.speed) {
4291                 default:
4292                 case SPEED_10:
4293                         break;
4294
4295                 case SPEED_100:
4296                         bmcr |= BMCR_SPEED100;
4297                         break;
4298
4299                 case SPEED_1000:
4300                         bmcr |= BMCR_SPEED1000;
4301                         break;
4302                 }
4303
4304                 if (tp->link_config.duplex == DUPLEX_FULL)
4305                         bmcr |= BMCR_FULLDPLX;
4306
4307                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4308                     (bmcr != orig_bmcr)) {
4309                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4310                         for (i = 0; i < 1500; i++) {
4311                                 u32 tmp;
4312
4313                                 udelay(10);
4314                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4315                                     tg3_readphy(tp, MII_BMSR, &tmp))
4316                                         continue;
4317                                 if (!(tmp & BMSR_LSTATUS)) {
4318                                         udelay(40);
4319                                         break;
4320                                 }
4321                         }
4322                         tg3_writephy(tp, MII_BMCR, bmcr);
4323                         udelay(40);
4324                 }
4325         }
4326 }
4327
4328 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4329 {
4330         int err;
4331
4332         /* Turn off tap power management. */
4333         /* Set Extended packet length bit */
4334         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4335
4336         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4337         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4338         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4339         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4340         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4341
4342         udelay(40);
4343
4344         return err;
4345 }
4346
4347 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4348 {
4349         u32 advmsk, tgtadv, advertising;
4350
4351         advertising = tp->link_config.advertising;
4352         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4353
4354         advmsk = ADVERTISE_ALL;
4355         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4356                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4357                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4358         }
4359
4360         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4361                 return false;
4362
4363         if ((*lcladv & advmsk) != tgtadv)
4364                 return false;
4365
4366         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4367                 u32 tg3_ctrl;
4368
4369                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4370
4371                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4372                         return false;
4373
4374                 if (tgtadv &&
4375                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4376                      tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4377                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4378                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4379                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4380                 } else {
4381                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4382                 }
4383
4384                 if (tg3_ctrl != tgtadv)
4385                         return false;
4386         }
4387
4388         return true;
4389 }
4390
4391 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4392 {
4393         u32 lpeth = 0;
4394
4395         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4396                 u32 val;
4397
4398                 if (tg3_readphy(tp, MII_STAT1000, &val))
4399                         return false;
4400
4401                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4402         }
4403
4404         if (tg3_readphy(tp, MII_LPA, rmtadv))
4405                 return false;
4406
4407         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4408         tp->link_config.rmt_adv = lpeth;
4409
4410         return true;
4411 }
4412
4413 static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
4414 {
4415         if (curr_link_up != tp->link_up) {
4416                 if (curr_link_up) {
4417                         netif_carrier_on(tp->dev);
4418                 } else {
4419                         netif_carrier_off(tp->dev);
4420                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4421                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4422                 }
4423
4424                 tg3_link_report(tp);
4425                 return true;
4426         }
4427
4428         return false;
4429 }
4430
4431 static void tg3_clear_mac_status(struct tg3 *tp)
4432 {
4433         tw32(MAC_EVENT, 0);
4434
4435         tw32_f(MAC_STATUS,
4436                MAC_STATUS_SYNC_CHANGED |
4437                MAC_STATUS_CFG_CHANGED |
4438                MAC_STATUS_MI_COMPLETION |
4439                MAC_STATUS_LNKSTATE_CHANGED);
4440         udelay(40);
4441 }
4442
4443 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4444 {
4445         int current_link_up;
4446         u32 bmsr, val;
4447         u32 lcl_adv, rmt_adv;
4448         u16 current_speed;
4449         u8 current_duplex;
4450         int i, err;
4451
4452         tg3_clear_mac_status(tp);
4453
4454         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4455                 tw32_f(MAC_MI_MODE,
4456                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4457                 udelay(80);
4458         }
4459
4460         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4461
4462         /* Some third-party PHYs need to be reset on link going
4463          * down.
4464          */
4465         if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4466              tg3_asic_rev(tp) == ASIC_REV_5704 ||
4467              tg3_asic_rev(tp) == ASIC_REV_5705) &&
4468             tp->link_up) {
4469                 tg3_readphy(tp, MII_BMSR, &bmsr);
4470                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4471                     !(bmsr & BMSR_LSTATUS))
4472                         force_reset = 1;
4473         }
4474         if (force_reset)
4475                 tg3_phy_reset(tp);
4476
4477         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4478                 tg3_readphy(tp, MII_BMSR, &bmsr);
4479                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4480                     !tg3_flag(tp, INIT_COMPLETE))
4481                         bmsr = 0;
4482
4483                 if (!(bmsr & BMSR_LSTATUS)) {
4484                         err = tg3_init_5401phy_dsp(tp);
4485                         if (err)
4486                                 return err;
4487
4488                         tg3_readphy(tp, MII_BMSR, &bmsr);
4489                         for (i = 0; i < 1000; i++) {
4490                                 udelay(10);
4491                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4492                                     (bmsr & BMSR_LSTATUS)) {
4493                                         udelay(40);
4494                                         break;
4495                                 }
4496                         }
4497
4498                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4499                             TG3_PHY_REV_BCM5401_B0 &&
4500                             !(bmsr & BMSR_LSTATUS) &&
4501                             tp->link_config.active_speed == SPEED_1000) {
4502                                 err = tg3_phy_reset(tp);
4503                                 if (!err)
4504                                         err = tg3_init_5401phy_dsp(tp);
4505                                 if (err)
4506                                         return err;
4507                         }
4508                 }
4509         } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4510                    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4511                 /* 5701 {A0,B0} CRC bug workaround */
4512                 tg3_writephy(tp, 0x15, 0x0a75);
4513                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4514                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4515                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4516         }
4517
4518         /* Clear pending interrupts... */
4519         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4520         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4521
4522         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4523                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4524         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4525                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4526
4527         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4528             tg3_asic_rev(tp) == ASIC_REV_5701) {
4529                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4530                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4531                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4532                 else
4533                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4534         }
4535
4536         current_link_up = 0;
4537         current_speed = SPEED_UNKNOWN;
4538         current_duplex = DUPLEX_UNKNOWN;
4539         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4540         tp->link_config.rmt_adv = 0;
4541
4542         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4543                 err = tg3_phy_auxctl_read(tp,
4544                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4545                                           &val);
4546                 if (!err && !(val & (1 << 10))) {
4547                         tg3_phy_auxctl_write(tp,
4548                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4549                                              val | (1 << 10));
4550                         goto relink;
4551                 }
4552         }
4553
4554         bmsr = 0;
4555         for (i = 0; i < 100; i++) {
4556                 tg3_readphy(tp, MII_BMSR, &bmsr);
4557                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4558                     (bmsr & BMSR_LSTATUS))
4559                         break;
4560                 udelay(40);
4561         }
4562
4563         if (bmsr & BMSR_LSTATUS) {
4564                 u32 aux_stat, bmcr;
4565
4566                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4567                 for (i = 0; i < 2000; i++) {
4568                         udelay(10);
4569                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4570                             aux_stat)
4571                                 break;
4572                 }
4573
4574                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4575                                              &current_speed,
4576                                              &current_duplex);
4577
4578                 bmcr = 0;
4579                 for (i = 0; i < 200; i++) {
4580                         tg3_readphy(tp, MII_BMCR, &bmcr);
4581                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4582                                 continue;
4583                         if (bmcr && bmcr != 0x7fff)
4584                                 break;
4585                         udelay(10);
4586                 }
4587
4588                 lcl_adv = 0;
4589                 rmt_adv = 0;
4590
4591                 tp->link_config.active_speed = current_speed;
4592                 tp->link_config.active_duplex = current_duplex;
4593
4594                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4595                         if ((bmcr & BMCR_ANENABLE) &&
4596                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4597                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4598                                 current_link_up = 1;
4599                 } else {
4600                         if (!(bmcr & BMCR_ANENABLE) &&
4601                             tp->link_config.speed == current_speed &&
4602                             tp->link_config.duplex == current_duplex) {
4603                                 current_link_up = 1;
4604                         }
4605                 }
4606
4607                 if (current_link_up == 1 &&
4608                     tp->link_config.active_duplex == DUPLEX_FULL) {
4609                         u32 reg, bit;
4610
4611                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4612                                 reg = MII_TG3_FET_GEN_STAT;
4613                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4614                         } else {
4615                                 reg = MII_TG3_EXT_STAT;
4616                                 bit = MII_TG3_EXT_STAT_MDIX;
4617                         }
4618
4619                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4620                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4621
4622                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4623                 }
4624         }
4625
4626 relink:
4627         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4628                 tg3_phy_copper_begin(tp);
4629
4630                 if (tg3_flag(tp, ROBOSWITCH)) {
4631                         current_link_up = 1;
4632                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4633                         current_speed = SPEED_1000;
4634                         current_duplex = DUPLEX_FULL;
4635                         tp->link_config.active_speed = current_speed;
4636                         tp->link_config.active_duplex = current_duplex;
4637                 }
4638
4639                 tg3_readphy(tp, MII_BMSR, &bmsr);
4640                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4641                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4642                         current_link_up = 1;
4643         }
4644
4645         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4646         if (current_link_up == 1) {
4647                 if (tp->link_config.active_speed == SPEED_100 ||
4648                     tp->link_config.active_speed == SPEED_10)
4649                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4650                 else
4651                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4652         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4653                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4654         else
4655                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4656
4657         /* In order for the 5750 core in BCM4785 chip to work properly
4658          * in RGMII mode, the Led Control Register must be set up.
4659          */
4660         if (tg3_flag(tp, RGMII_MODE)) {
4661                 u32 led_ctrl = tr32(MAC_LED_CTRL);
4662                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4663
4664                 if (tp->link_config.active_speed == SPEED_10)
4665                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4666                 else if (tp->link_config.active_speed == SPEED_100)
4667                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4668                                      LED_CTRL_100MBPS_ON);
4669                 else if (tp->link_config.active_speed == SPEED_1000)
4670                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4671                                      LED_CTRL_1000MBPS_ON);
4672
4673                 tw32(MAC_LED_CTRL, led_ctrl);
4674                 udelay(40);
4675         }
4676
4677         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4678         if (tp->link_config.active_duplex == DUPLEX_HALF)
4679                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4680
4681         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4682                 if (current_link_up == 1 &&
4683                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4684                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4685                 else
4686                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4687         }
4688
4689         /* ??? Without this setting Netgear GA302T PHY does not
4690          * ??? send/receive packets...
4691          */
4692         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4693             tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
4694                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4695                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4696                 udelay(80);
4697         }
4698
4699         tw32_f(MAC_MODE, tp->mac_mode);
4700         udelay(40);
4701
4702         tg3_phy_eee_adjust(tp, current_link_up);
4703
4704         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4705                 /* Polled via timer. */
4706                 tw32_f(MAC_EVENT, 0);
4707         } else {
4708                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4709         }
4710         udelay(40);
4711
4712         if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
4713             current_link_up == 1 &&
4714             tp->link_config.active_speed == SPEED_1000 &&
4715             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4716                 udelay(120);
4717                 tw32_f(MAC_STATUS,
4718                      (MAC_STATUS_SYNC_CHANGED |
4719                       MAC_STATUS_CFG_CHANGED));
4720                 udelay(40);
4721                 tg3_write_mem(tp,
4722                               NIC_SRAM_FIRMWARE_MBOX,
4723                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4724         }
4725
4726         /* Prevent send BD corruption. */
4727         if (tg3_flag(tp, CLKREQ_BUG)) {
4728                 if (tp->link_config.active_speed == SPEED_100 ||
4729                     tp->link_config.active_speed == SPEED_10)
4730                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4731                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
4732                 else
4733                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4734                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
4735         }
4736
4737         tg3_test_and_report_link_chg(tp, current_link_up);
4738
4739         return 0;
4740 }
4741
4742 struct tg3_fiber_aneginfo {
4743         int state;
4744 #define ANEG_STATE_UNKNOWN              0
4745 #define ANEG_STATE_AN_ENABLE            1
4746 #define ANEG_STATE_RESTART_INIT         2
4747 #define ANEG_STATE_RESTART              3
4748 #define ANEG_STATE_DISABLE_LINK_OK      4
4749 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4750 #define ANEG_STATE_ABILITY_DETECT       6
4751 #define ANEG_STATE_ACK_DETECT_INIT      7
4752 #define ANEG_STATE_ACK_DETECT           8
4753 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4754 #define ANEG_STATE_COMPLETE_ACK         10
4755 #define ANEG_STATE_IDLE_DETECT_INIT     11
4756 #define ANEG_STATE_IDLE_DETECT          12
4757 #define ANEG_STATE_LINK_OK              13
4758 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4759 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4760
4761         u32 flags;
4762 #define MR_AN_ENABLE            0x00000001
4763 #define MR_RESTART_AN           0x00000002
4764 #define MR_AN_COMPLETE          0x00000004
4765 #define MR_PAGE_RX              0x00000008
4766 #define MR_NP_LOADED            0x00000010
4767 #define MR_TOGGLE_TX            0x00000020
4768 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4769 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4770 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4771 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4772 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4773 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4774 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4775 #define MR_TOGGLE_RX            0x00002000
4776 #define MR_NP_RX                0x00004000
4777
4778 #define MR_LINK_OK              0x80000000
4779
4780         unsigned long link_time, cur_time;
4781
4782         u32 ability_match_cfg;
4783         int ability_match_count;
4784
4785         char ability_match, idle_match, ack_match;
4786
4787         u32 txconfig, rxconfig;
4788 #define ANEG_CFG_NP             0x00000080
4789 #define ANEG_CFG_ACK            0x00000040
4790 #define ANEG_CFG_RF2            0x00000020
4791 #define ANEG_CFG_RF1            0x00000010
4792 #define ANEG_CFG_PS2            0x00000001
4793 #define ANEG_CFG_PS1            0x00008000
4794 #define ANEG_CFG_HD             0x00004000
4795 #define ANEG_CFG_FD             0x00002000
4796 #define ANEG_CFG_INVAL          0x00001f06
4797
4798 };
4799 #define ANEG_OK         0
4800 #define ANEG_DONE       1
4801 #define ANEG_TIMER_ENAB 2
4802 #define ANEG_FAILED     -1
4803
4804 #define ANEG_STATE_SETTLE_TIME  10000
4805
4806 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4807                                    struct tg3_fiber_aneginfo *ap)
4808 {
4809         u16 flowctrl;
4810         unsigned long delta;
4811         u32 rx_cfg_reg;
4812         int ret;
4813
4814         if (ap->state == ANEG_STATE_UNKNOWN) {
4815                 ap->rxconfig = 0;
4816                 ap->link_time = 0;
4817                 ap->cur_time = 0;
4818                 ap->ability_match_cfg = 0;
4819                 ap->ability_match_count = 0;
4820                 ap->ability_match = 0;
4821                 ap->idle_match = 0;
4822                 ap->ack_match = 0;
4823         }
4824         ap->cur_time++;
4825
4826         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4827                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4828
4829                 if (rx_cfg_reg != ap->ability_match_cfg) {
4830                         ap->ability_match_cfg = rx_cfg_reg;
4831                         ap->ability_match = 0;
4832                         ap->ability_match_count = 0;
4833                 } else {
4834                         if (++ap->ability_match_count > 1) {
4835                                 ap->ability_match = 1;
4836                                 ap->ability_match_cfg = rx_cfg_reg;
4837                         }
4838                 }
4839                 if (rx_cfg_reg & ANEG_CFG_ACK)
4840                         ap->ack_match = 1;
4841                 else
4842                         ap->ack_match = 0;
4843
4844                 ap->idle_match = 0;
4845         } else {
4846                 ap->idle_match = 1;
4847                 ap->ability_match_cfg = 0;
4848                 ap->ability_match_count = 0;
4849                 ap->ability_match = 0;
4850                 ap->ack_match = 0;
4851
4852                 rx_cfg_reg = 0;
4853         }
4854
4855         ap->rxconfig = rx_cfg_reg;
4856         ret = ANEG_OK;
4857
4858         switch (ap->state) {
4859         case ANEG_STATE_UNKNOWN:
4860                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4861                         ap->state = ANEG_STATE_AN_ENABLE;
4862
4863                 /* fallthru */
4864         case ANEG_STATE_AN_ENABLE:
4865                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4866                 if (ap->flags & MR_AN_ENABLE) {
4867                         ap->link_time = 0;
4868                         ap->cur_time = 0;
4869                         ap->ability_match_cfg = 0;
4870                         ap->ability_match_count = 0;
4871                         ap->ability_match = 0;
4872                         ap->idle_match = 0;
4873                         ap->ack_match = 0;
4874
4875                         ap->state = ANEG_STATE_RESTART_INIT;
4876                 } else {
4877                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4878                 }
4879                 break;
4880
4881         case ANEG_STATE_RESTART_INIT:
4882                 ap->link_time = ap->cur_time;
4883                 ap->flags &= ~(MR_NP_LOADED);
4884                 ap->txconfig = 0;
4885                 tw32(MAC_TX_AUTO_NEG, 0);
4886                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4887                 tw32_f(MAC_MODE, tp->mac_mode);
4888                 udelay(40);
4889
4890                 ret = ANEG_TIMER_ENAB;
4891                 ap->state = ANEG_STATE_RESTART;
4892
4893                 /* fallthru */
4894         case ANEG_STATE_RESTART:
4895                 delta = ap->cur_time - ap->link_time;
4896                 if (delta > ANEG_STATE_SETTLE_TIME)
4897                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4898                 else
4899                         ret = ANEG_TIMER_ENAB;
4900                 break;
4901
4902         case ANEG_STATE_DISABLE_LINK_OK:
4903                 ret = ANEG_DONE;
4904                 break;
4905
4906         case ANEG_STATE_ABILITY_DETECT_INIT:
4907                 ap->flags &= ~(MR_TOGGLE_TX);
4908                 ap->txconfig = ANEG_CFG_FD;
4909                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4910                 if (flowctrl & ADVERTISE_1000XPAUSE)
4911                         ap->txconfig |= ANEG_CFG_PS1;
4912                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4913                         ap->txconfig |= ANEG_CFG_PS2;
4914                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4915                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4916                 tw32_f(MAC_MODE, tp->mac_mode);
4917                 udelay(40);
4918
4919                 ap->state = ANEG_STATE_ABILITY_DETECT;
4920                 break;
4921
4922         case ANEG_STATE_ABILITY_DETECT:
4923                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4924                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4925                 break;
4926
4927         case ANEG_STATE_ACK_DETECT_INIT:
4928                 ap->txconfig |= ANEG_CFG_ACK;
4929                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4930                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4931                 tw32_f(MAC_MODE, tp->mac_mode);
4932                 udelay(40);
4933
4934                 ap->state = ANEG_STATE_ACK_DETECT;
4935
4936                 /* fallthru */
4937         case ANEG_STATE_ACK_DETECT:
4938                 if (ap->ack_match != 0) {
4939                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4940                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4941                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4942                         } else {
4943                                 ap->state = ANEG_STATE_AN_ENABLE;
4944                         }
4945                 } else if (ap->ability_match != 0 &&
4946                            ap->rxconfig == 0) {
4947                         ap->state = ANEG_STATE_AN_ENABLE;
4948                 }
4949                 break;
4950
4951         case ANEG_STATE_COMPLETE_ACK_INIT:
4952                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4953                         ret = ANEG_FAILED;
4954                         break;
4955                 }
4956                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4957                                MR_LP_ADV_HALF_DUPLEX |
4958                                MR_LP_ADV_SYM_PAUSE |
4959                                MR_LP_ADV_ASYM_PAUSE |
4960                                MR_LP_ADV_REMOTE_FAULT1 |
4961                                MR_LP_ADV_REMOTE_FAULT2 |
4962                                MR_LP_ADV_NEXT_PAGE |
4963                                MR_TOGGLE_RX |
4964                                MR_NP_RX);
4965                 if (ap->rxconfig & ANEG_CFG_FD)
4966                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4967                 if (ap->rxconfig & ANEG_CFG_HD)
4968                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4969                 if (ap->rxconfig & ANEG_CFG_PS1)
4970                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4971                 if (ap->rxconfig & ANEG_CFG_PS2)
4972                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4973                 if (ap->rxconfig & ANEG_CFG_RF1)
4974                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4975                 if (ap->rxconfig & ANEG_CFG_RF2)
4976                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4977                 if (ap->rxconfig & ANEG_CFG_NP)
4978                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4979
4980                 ap->link_time = ap->cur_time;
4981
4982                 ap->flags ^= (MR_TOGGLE_TX);
4983                 if (ap->rxconfig & 0x0008)
4984                         ap->flags |= MR_TOGGLE_RX;
4985                 if (ap->rxconfig & ANEG_CFG_NP)
4986                         ap->flags |= MR_NP_RX;
4987                 ap->flags |= MR_PAGE_RX;
4988
4989                 ap->state = ANEG_STATE_COMPLETE_ACK;
4990                 ret = ANEG_TIMER_ENAB;
4991                 break;
4992
4993         case ANEG_STATE_COMPLETE_ACK:
4994                 if (ap->ability_match != 0 &&
4995                     ap->rxconfig == 0) {
4996                         ap->state = ANEG_STATE_AN_ENABLE;
4997                         break;
4998                 }
4999                 delta = ap->cur_time - ap->link_time;
5000                 if (delta > ANEG_STATE_SETTLE_TIME) {
5001                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5002                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5003                         } else {
5004                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5005                                     !(ap->flags & MR_NP_RX)) {
5006                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5007                                 } else {
5008                                         ret = ANEG_FAILED;
5009                                 }
5010                         }
5011                 }
5012                 break;
5013
5014         case ANEG_STATE_IDLE_DETECT_INIT:
5015                 ap->link_time = ap->cur_time;
5016                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5017                 tw32_f(MAC_MODE, tp->mac_mode);
5018                 udelay(40);
5019
5020                 ap->state = ANEG_STATE_IDLE_DETECT;
5021                 ret = ANEG_TIMER_ENAB;
5022                 break;
5023
5024         case ANEG_STATE_IDLE_DETECT:
5025                 if (ap->ability_match != 0 &&
5026                     ap->rxconfig == 0) {
5027                         ap->state = ANEG_STATE_AN_ENABLE;
5028                         break;
5029                 }
5030                 delta = ap->cur_time - ap->link_time;
5031                 if (delta > ANEG_STATE_SETTLE_TIME) {
5032                         /* XXX another gem from the Broadcom driver :( */
5033                         ap->state = ANEG_STATE_LINK_OK;
5034                 }
5035                 break;
5036
5037         case ANEG_STATE_LINK_OK:
5038                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5039                 ret = ANEG_DONE;
5040                 break;
5041
5042         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5043                 /* ??? unimplemented */
5044                 break;
5045
5046         case ANEG_STATE_NEXT_PAGE_WAIT:
5047                 /* ??? unimplemented */
5048                 break;
5049
5050         default:
5051                 ret = ANEG_FAILED;
5052                 break;
5053         }
5054
5055         return ret;
5056 }
5057
5058 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5059 {
5060         int res = 0;
5061         struct tg3_fiber_aneginfo aninfo;
5062         int status = ANEG_FAILED;
5063         unsigned int tick;
5064         u32 tmp;
5065
5066         tw32_f(MAC_TX_AUTO_NEG, 0);
5067
5068         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5069         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5070         udelay(40);
5071
5072         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5073         udelay(40);
5074
5075         memset(&aninfo, 0, sizeof(aninfo));
5076         aninfo.flags |= MR_AN_ENABLE;
5077         aninfo.state = ANEG_STATE_UNKNOWN;
5078         aninfo.cur_time = 0;
5079         tick = 0;
5080         while (++tick < 195000) {
5081                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5082                 if (status == ANEG_DONE || status == ANEG_FAILED)
5083                         break;
5084
5085                 udelay(1);
5086         }
5087
5088         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5089         tw32_f(MAC_MODE, tp->mac_mode);
5090         udelay(40);
5091
5092         *txflags = aninfo.txconfig;
5093         *rxflags = aninfo.flags;
5094
5095         if (status == ANEG_DONE &&
5096             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5097                              MR_LP_ADV_FULL_DUPLEX)))
5098                 res = 1;
5099
5100         return res;
5101 }
5102
5103 static void tg3_init_bcm8002(struct tg3 *tp)
5104 {
5105         u32 mac_status = tr32(MAC_STATUS);
5106         int i;
5107
5108         /* Reset when initting first time or we have a link. */
5109         if (tg3_flag(tp, INIT_COMPLETE) &&
5110             !(mac_status & MAC_STATUS_PCS_SYNCED))
5111                 return;
5112
5113         /* Set PLL lock range. */
5114         tg3_writephy(tp, 0x16, 0x8007);
5115
5116         /* SW reset */
5117         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5118
5119         /* Wait for reset to complete. */
5120         /* XXX schedule_timeout() ... */
5121         for (i = 0; i < 500; i++)
5122                 udelay(10);
5123
5124         /* Config mode; select PMA/Ch 1 regs. */
5125         tg3_writephy(tp, 0x10, 0x8411);
5126
5127         /* Enable auto-lock and comdet, select txclk for tx. */
5128         tg3_writephy(tp, 0x11, 0x0a10);
5129
5130         tg3_writephy(tp, 0x18, 0x00a0);
5131         tg3_writephy(tp, 0x16, 0x41ff);
5132
5133         /* Assert and deassert POR. */
5134         tg3_writephy(tp, 0x13, 0x0400);
5135         udelay(40);
5136         tg3_writephy(tp, 0x13, 0x0000);
5137
5138         tg3_writephy(tp, 0x11, 0x0a50);
5139         udelay(40);
5140         tg3_writephy(tp, 0x11, 0x0a10);
5141
5142         /* Wait for signal to stabilize */
5143         /* XXX schedule_timeout() ... */
5144         for (i = 0; i < 15000; i++)
5145                 udelay(10);
5146
5147         /* Deselect the channel register so we can read the PHYID
5148          * later.
5149          */
5150         tg3_writephy(tp, 0x10, 0x8011);
5151 }
5152
5153 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5154 {
5155         u16 flowctrl;
5156         u32 sg_dig_ctrl, sg_dig_status;
5157         u32 serdes_cfg, expected_sg_dig_ctrl;
5158         int workaround, port_a;
5159         int current_link_up;
5160
5161         serdes_cfg = 0;
5162         expected_sg_dig_ctrl = 0;
5163         workaround = 0;
5164         port_a = 1;
5165         current_link_up = 0;
5166
5167         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5168             tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5169                 workaround = 1;
5170                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5171                         port_a = 0;
5172
5173                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5174                 /* preserve bits 20-23 for voltage regulator */
5175                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5176         }
5177
5178         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5179
5180         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5181                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5182                         if (workaround) {
5183                                 u32 val = serdes_cfg;
5184
5185                                 if (port_a)
5186                                         val |= 0xc010000;
5187                                 else
5188                                         val |= 0x4010000;
5189                                 tw32_f(MAC_SERDES_CFG, val);
5190                         }
5191
5192                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5193                 }
5194                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5195                         tg3_setup_flow_control(tp, 0, 0);
5196                         current_link_up = 1;
5197                 }
5198                 goto out;
5199         }
5200
5201         /* Want auto-negotiation.  */
5202         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5203
5204         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5205         if (flowctrl & ADVERTISE_1000XPAUSE)
5206                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5207         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5208                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5209
5210         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5211                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5212                     tp->serdes_counter &&
5213                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5214                                     MAC_STATUS_RCVD_CFG)) ==
5215                      MAC_STATUS_PCS_SYNCED)) {
5216                         tp->serdes_counter--;
5217                         current_link_up = 1;
5218                         goto out;
5219                 }
5220 restart_autoneg:
5221                 if (workaround)
5222                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5223                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5224                 udelay(5);
5225                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5226
5227                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5228                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5229         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5230                                  MAC_STATUS_SIGNAL_DET)) {
5231                 sg_dig_status = tr32(SG_DIG_STATUS);
5232                 mac_status = tr32(MAC_STATUS);
5233
5234                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5235                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5236                         u32 local_adv = 0, remote_adv = 0;
5237
5238                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5239                                 local_adv |= ADVERTISE_1000XPAUSE;
5240                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5241                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5242
5243                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5244                                 remote_adv |= LPA_1000XPAUSE;
5245                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5246                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5247
5248                         tp->link_config.rmt_adv =
5249                                            mii_adv_to_ethtool_adv_x(remote_adv);
5250
5251                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5252                         current_link_up = 1;
5253                         tp->serdes_counter = 0;
5254                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5255                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5256                         if (tp->serdes_counter)
5257                                 tp->serdes_counter--;
5258                         else {
5259                                 if (workaround) {
5260                                         u32 val = serdes_cfg;
5261
5262                                         if (port_a)
5263                                                 val |= 0xc010000;
5264                                         else
5265                                                 val |= 0x4010000;
5266
5267                                         tw32_f(MAC_SERDES_CFG, val);
5268                                 }
5269
5270                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5271                                 udelay(40);
5272
5273                                 /* Link parallel detection - link is up */
5274                                 /* only if we have PCS_SYNC and not */
5275                                 /* receiving config code words */
5276                                 mac_status = tr32(MAC_STATUS);
5277                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5278                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5279                                         tg3_setup_flow_control(tp, 0, 0);
5280                                         current_link_up = 1;
5281                                         tp->phy_flags |=
5282                                                 TG3_PHYFLG_PARALLEL_DETECT;
5283                                         tp->serdes_counter =
5284                                                 SERDES_PARALLEL_DET_TIMEOUT;
5285                                 } else
5286                                         goto restart_autoneg;
5287                         }
5288                 }
5289         } else {
5290                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5291                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5292         }
5293
5294 out:
5295         return current_link_up;
5296 }
5297
5298 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5299 {
5300         int current_link_up = 0;
5301
5302         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5303                 goto out;
5304
5305         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5306                 u32 txflags, rxflags;
5307                 int i;
5308
5309                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5310                         u32 local_adv = 0, remote_adv = 0;
5311
5312                         if (txflags & ANEG_CFG_PS1)
5313                                 local_adv |= ADVERTISE_1000XPAUSE;
5314                         if (txflags & ANEG_CFG_PS2)
5315                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5316
5317                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5318                                 remote_adv |= LPA_1000XPAUSE;
5319                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5320                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5321
5322                         tp->link_config.rmt_adv =
5323                                            mii_adv_to_ethtool_adv_x(remote_adv);
5324
5325                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5326
5327                         current_link_up = 1;
5328                 }
5329                 for (i = 0; i < 30; i++) {
5330                         udelay(20);
5331                         tw32_f(MAC_STATUS,
5332                                (MAC_STATUS_SYNC_CHANGED |
5333                                 MAC_STATUS_CFG_CHANGED));
5334                         udelay(40);
5335                         if ((tr32(MAC_STATUS) &
5336                              (MAC_STATUS_SYNC_CHANGED |
5337                               MAC_STATUS_CFG_CHANGED)) == 0)
5338                                 break;
5339                 }
5340
5341                 mac_status = tr32(MAC_STATUS);
5342                 if (current_link_up == 0 &&
5343                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5344                     !(mac_status & MAC_STATUS_RCVD_CFG))
5345                         current_link_up = 1;
5346         } else {
5347                 tg3_setup_flow_control(tp, 0, 0);
5348
5349                 /* Forcing 1000FD link up. */
5350                 current_link_up = 1;
5351
5352                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5353                 udelay(40);
5354
5355                 tw32_f(MAC_MODE, tp->mac_mode);
5356                 udelay(40);
5357         }
5358
5359 out:
5360         return current_link_up;
5361 }
5362
5363 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5364 {
5365         u32 orig_pause_cfg;
5366         u16 orig_active_speed;
5367         u8 orig_active_duplex;
5368         u32 mac_status;
5369         int current_link_up;
5370         int i;
5371
5372         orig_pause_cfg = tp->link_config.active_flowctrl;
5373         orig_active_speed = tp->link_config.active_speed;
5374         orig_active_duplex = tp->link_config.active_duplex;
5375
5376         if (!tg3_flag(tp, HW_AUTONEG) &&
5377             tp->link_up &&
5378             tg3_flag(tp, INIT_COMPLETE)) {
5379                 mac_status = tr32(MAC_STATUS);
5380                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5381                                MAC_STATUS_SIGNAL_DET |
5382                                MAC_STATUS_CFG_CHANGED |
5383                                MAC_STATUS_RCVD_CFG);
5384                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5385                                    MAC_STATUS_SIGNAL_DET)) {
5386                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5387                                             MAC_STATUS_CFG_CHANGED));
5388                         return 0;
5389                 }
5390         }
5391
5392         tw32_f(MAC_TX_AUTO_NEG, 0);
5393
5394         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5395         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5396         tw32_f(MAC_MODE, tp->mac_mode);
5397         udelay(40);
5398
5399         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5400                 tg3_init_bcm8002(tp);
5401
5402         /* Enable link change event even when serdes polling.  */
5403         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5404         udelay(40);
5405
5406         current_link_up = 0;
5407         tp->link_config.rmt_adv = 0;
5408         mac_status = tr32(MAC_STATUS);
5409
5410         if (tg3_flag(tp, HW_AUTONEG))
5411                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5412         else
5413                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5414
5415         tp->napi[0].hw_status->status =
5416                 (SD_STATUS_UPDATED |
5417                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5418
5419         for (i = 0; i < 100; i++) {
5420                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5421                                     MAC_STATUS_CFG_CHANGED));
5422                 udelay(5);
5423                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5424                                          MAC_STATUS_CFG_CHANGED |
5425                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5426                         break;
5427         }
5428
5429         mac_status = tr32(MAC_STATUS);
5430         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5431                 current_link_up = 0;
5432                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5433                     tp->serdes_counter == 0) {
5434                         tw32_f(MAC_MODE, (tp->mac_mode |
5435                                           MAC_MODE_SEND_CONFIGS));
5436                         udelay(1);
5437                         tw32_f(MAC_MODE, tp->mac_mode);
5438                 }
5439         }
5440
5441         if (current_link_up == 1) {
5442                 tp->link_config.active_speed = SPEED_1000;
5443                 tp->link_config.active_duplex = DUPLEX_FULL;
5444                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5445                                     LED_CTRL_LNKLED_OVERRIDE |
5446                                     LED_CTRL_1000MBPS_ON));
5447         } else {
5448                 tp->link_config.active_speed = SPEED_UNKNOWN;
5449                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5450                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5451                                     LED_CTRL_LNKLED_OVERRIDE |
5452                                     LED_CTRL_TRAFFIC_OVERRIDE));
5453         }
5454
5455         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5456                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5457                 if (orig_pause_cfg != now_pause_cfg ||
5458                     orig_active_speed != tp->link_config.active_speed ||
5459                     orig_active_duplex != tp->link_config.active_duplex)
5460                         tg3_link_report(tp);
5461         }
5462
5463         return 0;
5464 }
5465
5466 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5467 {
5468         int current_link_up, err = 0;
5469         u32 bmsr, bmcr;
5470         u16 current_speed;
5471         u8 current_duplex;
5472         u32 local_adv, remote_adv;
5473
5474         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5475         tw32_f(MAC_MODE, tp->mac_mode);
5476         udelay(40);
5477
5478         tg3_clear_mac_status(tp);
5479
5480         if (force_reset)
5481                 tg3_phy_reset(tp);
5482
5483         current_link_up = 0;
5484         current_speed = SPEED_UNKNOWN;
5485         current_duplex = DUPLEX_UNKNOWN;
5486         tp->link_config.rmt_adv = 0;
5487
5488         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5489         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5490         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5491                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5492                         bmsr |= BMSR_LSTATUS;
5493                 else
5494                         bmsr &= ~BMSR_LSTATUS;
5495         }
5496
5497         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5498
5499         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5500             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5501                 /* do nothing, just check for link up at the end */
5502         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5503                 u32 adv, newadv;
5504
5505                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5506                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5507                                  ADVERTISE_1000XPAUSE |
5508                                  ADVERTISE_1000XPSE_ASYM |
5509                                  ADVERTISE_SLCT);
5510
5511                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5512                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5513
5514                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5515                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5516                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5517                         tg3_writephy(tp, MII_BMCR, bmcr);
5518
5519                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5520                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5521                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5522
5523                         return err;
5524                 }
5525         } else {
5526                 u32 new_bmcr;
5527
5528                 bmcr &= ~BMCR_SPEED1000;
5529                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5530
5531                 if (tp->link_config.duplex == DUPLEX_FULL)
5532                         new_bmcr |= BMCR_FULLDPLX;
5533
5534                 if (new_bmcr != bmcr) {
5535                         /* BMCR_SPEED1000 is a reserved bit that needs
5536                          * to be set on write.
5537                          */
5538                         new_bmcr |= BMCR_SPEED1000;
5539
5540                         /* Force a linkdown */
5541                         if (tp->link_up) {
5542                                 u32 adv;
5543
5544                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5545                                 adv &= ~(ADVERTISE_1000XFULL |
5546                                          ADVERTISE_1000XHALF |
5547                                          ADVERTISE_SLCT);
5548                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5549                                 tg3_writephy(tp, MII_BMCR, bmcr |
5550                                                            BMCR_ANRESTART |
5551                                                            BMCR_ANENABLE);
5552                                 udelay(10);
5553                                 tg3_carrier_off(tp);
5554                         }
5555                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5556                         bmcr = new_bmcr;
5557                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5558                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5559                         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5560                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5561                                         bmsr |= BMSR_LSTATUS;
5562                                 else
5563                                         bmsr &= ~BMSR_LSTATUS;
5564                         }
5565                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5566                 }
5567         }
5568
5569         if (bmsr & BMSR_LSTATUS) {
5570                 current_speed = SPEED_1000;
5571                 current_link_up = 1;
5572                 if (bmcr & BMCR_FULLDPLX)
5573                         current_duplex = DUPLEX_FULL;
5574                 else
5575                         current_duplex = DUPLEX_HALF;
5576
5577                 local_adv = 0;
5578                 remote_adv = 0;
5579
5580                 if (bmcr & BMCR_ANENABLE) {
5581                         u32 common;
5582
5583                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5584                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5585                         common = local_adv & remote_adv;
5586                         if (common & (ADVERTISE_1000XHALF |
5587                                       ADVERTISE_1000XFULL)) {
5588                                 if (common & ADVERTISE_1000XFULL)
5589                                         current_duplex = DUPLEX_FULL;
5590                                 else
5591                                         current_duplex = DUPLEX_HALF;
5592
5593                                 tp->link_config.rmt_adv =
5594                                            mii_adv_to_ethtool_adv_x(remote_adv);
5595                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5596                                 /* Link is up via parallel detect */
5597                         } else {
5598                                 current_link_up = 0;
5599                         }
5600                 }
5601         }
5602
5603         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5604                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5605
5606         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5607         if (tp->link_config.active_duplex == DUPLEX_HALF)
5608                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5609
5610         tw32_f(MAC_MODE, tp->mac_mode);
5611         udelay(40);
5612
5613         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5614
5615         tp->link_config.active_speed = current_speed;
5616         tp->link_config.active_duplex = current_duplex;
5617
5618         tg3_test_and_report_link_chg(tp, current_link_up);
5619         return err;
5620 }
5621
5622 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5623 {
5624         if (tp->serdes_counter) {
5625                 /* Give autoneg time to complete. */
5626                 tp->serdes_counter--;
5627                 return;
5628         }
5629
5630         if (!tp->link_up &&
5631             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5632                 u32 bmcr;
5633
5634                 tg3_readphy(tp, MII_BMCR, &bmcr);
5635                 if (bmcr & BMCR_ANENABLE) {
5636                         u32 phy1, phy2;
5637
5638                         /* Select shadow register 0x1f */
5639                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5640                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5641
5642                         /* Select expansion interrupt status register */
5643                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5644                                          MII_TG3_DSP_EXP1_INT_STAT);
5645                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5646                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5647
5648                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5649                                 /* We have signal detect and not receiving
5650                                  * config code words, link is up by parallel
5651                                  * detection.
5652                                  */
5653
5654                                 bmcr &= ~BMCR_ANENABLE;
5655                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5656                                 tg3_writephy(tp, MII_BMCR, bmcr);
5657                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5658                         }
5659                 }
5660         } else if (tp->link_up &&
5661                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5662                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5663                 u32 phy2;
5664
5665                 /* Select expansion interrupt status register */
5666                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5667                                  MII_TG3_DSP_EXP1_INT_STAT);
5668                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5669                 if (phy2 & 0x20) {
5670                         u32 bmcr;
5671
5672                         /* Config code words received, turn on autoneg. */
5673                         tg3_readphy(tp, MII_BMCR, &bmcr);
5674                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5675
5676                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5677
5678                 }
5679         }
5680 }
5681
5682 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5683 {
5684         u32 val;
5685         int err;
5686
5687         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5688                 err = tg3_setup_fiber_phy(tp, force_reset);
5689         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5690                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5691         else
5692                 err = tg3_setup_copper_phy(tp, force_reset);
5693
5694         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
5695                 u32 scale;
5696
5697                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5698                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5699                         scale = 65;
5700                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5701                         scale = 6;
5702                 else
5703                         scale = 12;
5704
5705                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5706                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5707                 tw32(GRC_MISC_CFG, val);
5708         }
5709
5710         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5711               (6 << TX_LENGTHS_IPG_SHIFT);
5712         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
5713             tg3_asic_rev(tp) == ASIC_REV_5762)
5714                 val |= tr32(MAC_TX_LENGTHS) &
5715                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5716                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5717
5718         if (tp->link_config.active_speed == SPEED_1000 &&
5719             tp->link_config.active_duplex == DUPLEX_HALF)
5720                 tw32(MAC_TX_LENGTHS, val |
5721                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5722         else
5723                 tw32(MAC_TX_LENGTHS, val |
5724                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5725
5726         if (!tg3_flag(tp, 5705_PLUS)) {
5727                 if (tp->link_up) {
5728                         tw32(HOSTCC_STAT_COAL_TICKS,
5729                              tp->coal.stats_block_coalesce_usecs);
5730                 } else {
5731                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5732                 }
5733         }
5734
5735         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5736                 val = tr32(PCIE_PWR_MGMT_THRESH);
5737                 if (!tp->link_up)
5738                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5739                               tp->pwrmgmt_thresh;
5740                 else
5741                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5742                 tw32(PCIE_PWR_MGMT_THRESH, val);
5743         }
5744
5745         return err;
5746 }
5747
5748 /* tp->lock must be held */
5749 static u64 tg3_refclk_read(struct tg3 *tp)
5750 {
5751         u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
5752         return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
5753 }
5754
5755 /* tp->lock must be held */
5756 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
5757 {
5758         tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
5759         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
5760         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
5761         tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
5762 }
5763
5764 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
5765 static inline void tg3_full_unlock(struct tg3 *tp);
5766 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
5767 {
5768         struct tg3 *tp = netdev_priv(dev);
5769
5770         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5771                                 SOF_TIMESTAMPING_RX_SOFTWARE |
5772                                 SOF_TIMESTAMPING_SOFTWARE    |
5773                                 SOF_TIMESTAMPING_TX_HARDWARE |
5774                                 SOF_TIMESTAMPING_RX_HARDWARE |
5775                                 SOF_TIMESTAMPING_RAW_HARDWARE;
5776
5777         if (tp->ptp_clock)
5778                 info->phc_index = ptp_clock_index(tp->ptp_clock);
5779         else
5780                 info->phc_index = -1;
5781
5782         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
5783
5784         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
5785                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
5786                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
5787                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
5788         return 0;
5789 }
5790
5791 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
5792 {
5793         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5794         bool neg_adj = false;
5795         u32 correction = 0;
5796
5797         if (ppb < 0) {
5798                 neg_adj = true;
5799                 ppb = -ppb;
5800         }
5801
5802         /* Frequency adjustment is performed using hardware with a 24 bit
5803          * accumulator and a programmable correction value. On each clk, the
5804          * correction value gets added to the accumulator and when it
5805          * overflows, the time counter is incremented/decremented.
5806          *
5807          * So conversion from ppb to correction value is
5808          *              ppb * (1 << 24) / 1000000000
5809          */
5810         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
5811                      TG3_EAV_REF_CLK_CORRECT_MASK;
5812
5813         tg3_full_lock(tp, 0);
5814
5815         if (correction)
5816                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
5817                      TG3_EAV_REF_CLK_CORRECT_EN |
5818                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
5819         else
5820                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
5821
5822         tg3_full_unlock(tp);
5823
5824         return 0;
5825 }
5826
5827 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
5828 {
5829         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5830
5831         tg3_full_lock(tp, 0);
5832         tp->ptp_adjust += delta;
5833         tg3_full_unlock(tp);
5834
5835         return 0;
5836 }
5837
5838 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
5839 {
5840         u64 ns;
5841         u32 remainder;
5842         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5843
5844         tg3_full_lock(tp, 0);
5845         ns = tg3_refclk_read(tp);
5846         ns += tp->ptp_adjust;
5847         tg3_full_unlock(tp);
5848
5849         ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
5850         ts->tv_nsec = remainder;
5851
5852         return 0;
5853 }
5854
5855 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
5856                            const struct timespec *ts)
5857 {
5858         u64 ns;
5859         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5860
5861         ns = timespec_to_ns(ts);
5862
5863         tg3_full_lock(tp, 0);
5864         tg3_refclk_write(tp, ns);
5865         tp->ptp_adjust = 0;
5866         tg3_full_unlock(tp);
5867
5868         return 0;
5869 }
5870
5871 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
5872                           struct ptp_clock_request *rq, int on)
5873 {
5874         return -EOPNOTSUPP;
5875 }
5876
5877 static const struct ptp_clock_info tg3_ptp_caps = {
5878         .owner          = THIS_MODULE,
5879         .name           = "tg3 clock",
5880         .max_adj        = 250000000,
5881         .n_alarm        = 0,
5882         .n_ext_ts       = 0,
5883         .n_per_out      = 0,
5884         .pps            = 0,
5885         .adjfreq        = tg3_ptp_adjfreq,
5886         .adjtime        = tg3_ptp_adjtime,
5887         .gettime        = tg3_ptp_gettime,
5888         .settime        = tg3_ptp_settime,
5889         .enable         = tg3_ptp_enable,
5890 };
5891
5892 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
5893                                      struct skb_shared_hwtstamps *timestamp)
5894 {
5895         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
5896         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
5897                                            tp->ptp_adjust);
5898 }
5899
5900 /* tp->lock must be held */
5901 static void tg3_ptp_init(struct tg3 *tp)
5902 {
5903         if (!tg3_flag(tp, PTP_CAPABLE))
5904                 return;
5905
5906         /* Initialize the hardware clock to the system time. */
5907         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
5908         tp->ptp_adjust = 0;
5909         tp->ptp_info = tg3_ptp_caps;
5910 }
5911
5912 /* tp->lock must be held */
5913 static void tg3_ptp_resume(struct tg3 *tp)
5914 {
5915         if (!tg3_flag(tp, PTP_CAPABLE))
5916                 return;
5917
5918         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
5919         tp->ptp_adjust = 0;
5920 }
5921
5922 static void tg3_ptp_fini(struct tg3 *tp)
5923 {
5924         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
5925                 return;
5926
5927         ptp_clock_unregister(tp->ptp_clock);
5928         tp->ptp_clock = NULL;
5929         tp->ptp_adjust = 0;
5930 }
5931
5932 static inline int tg3_irq_sync(struct tg3 *tp)
5933 {
5934         return tp->irq_sync;
5935 }
5936
5937 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5938 {
5939         int i;
5940
5941         dst = (u32 *)((u8 *)dst + off);
5942         for (i = 0; i < len; i += sizeof(u32))
5943                 *dst++ = tr32(off + i);
5944 }
5945
5946 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5947 {
5948         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5949         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5950         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5951         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5952         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5953         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5954         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5955         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5956         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5957         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5958         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5959         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5960         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5961         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5962         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5963         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5964         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5965         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5966         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5967
5968         if (tg3_flag(tp, SUPPORT_MSIX))
5969                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5970
5971         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5972         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5973         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5974         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5975         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5976         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5977         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5978         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5979
5980         if (!tg3_flag(tp, 5705_PLUS)) {
5981                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5982                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5983                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5984         }
5985
5986         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5987         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5988         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5989         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5990         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5991
5992         if (tg3_flag(tp, NVRAM))
5993                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5994 }
5995
5996 static void tg3_dump_state(struct tg3 *tp)
5997 {
5998         int i;
5999         u32 *regs;
6000
6001         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6002         if (!regs)
6003                 return;
6004
6005         if (tg3_flag(tp, PCI_EXPRESS)) {
6006                 /* Read up to but not including private PCI registers */
6007                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6008                         regs[i / sizeof(u32)] = tr32(i);
6009         } else
6010                 tg3_dump_legacy_regs(tp, regs);
6011
6012         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6013                 if (!regs[i + 0] && !regs[i + 1] &&
6014                     !regs[i + 2] && !regs[i + 3])
6015                         continue;
6016
6017                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6018                            i * 4,
6019                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6020         }
6021
6022         kfree(regs);
6023
6024         for (i = 0; i < tp->irq_cnt; i++) {
6025                 struct tg3_napi *tnapi = &tp->napi[i];
6026
6027                 /* SW status block */
6028                 netdev_err(tp->dev,
6029                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6030                            i,
6031                            tnapi->hw_status->status,
6032                            tnapi->hw_status->status_tag,
6033                            tnapi->hw_status->rx_jumbo_consumer,
6034                            tnapi->hw_status->rx_consumer,
6035                            tnapi->hw_status->rx_mini_consumer,
6036                            tnapi->hw_status->idx[0].rx_producer,
6037                            tnapi->hw_status->idx[0].tx_consumer);
6038
6039                 netdev_err(tp->dev,
6040                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6041                            i,
6042                            tnapi->last_tag, tnapi->last_irq_tag,
6043                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6044                            tnapi->rx_rcb_ptr,
6045                            tnapi->prodring.rx_std_prod_idx,
6046                            tnapi->prodring.rx_std_cons_idx,
6047                            tnapi->prodring.rx_jmb_prod_idx,
6048                            tnapi->prodring.rx_jmb_cons_idx);
6049         }
6050 }
6051
6052 /* This is called whenever we suspect that the system chipset is re-
6053  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6054  * is bogus tx completions. We try to recover by setting the
6055  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6056  * in the workqueue.
6057  */
6058 static void tg3_tx_recover(struct tg3 *tp)
6059 {
6060         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6061                tp->write32_tx_mbox == tg3_write_indirect_mbox);
6062
6063         netdev_warn(tp->dev,
6064                     "The system may be re-ordering memory-mapped I/O "
6065                     "cycles to the network device, attempting to recover. "
6066                     "Please report the problem to the driver maintainer "
6067                     "and include system chipset information.\n");
6068
6069         spin_lock(&tp->lock);
6070         tg3_flag_set(tp, TX_RECOVERY_PENDING);
6071         spin_unlock(&tp->lock);
6072 }
6073
6074 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6075 {
6076         /* Tell compiler to fetch tx indices from memory. */
6077         barrier();
6078         return tnapi->tx_pending -
6079                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6080 }
6081
6082 /* Tigon3 never reports partial packet sends.  So we do not
6083  * need special logic to handle SKBs that have not had all
6084  * of their frags sent yet, like SunGEM does.
6085  */
6086 static void tg3_tx(struct tg3_napi *tnapi)
6087 {
6088         struct tg3 *tp = tnapi->tp;
6089         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6090         u32 sw_idx = tnapi->tx_cons;
6091         struct netdev_queue *txq;
6092         int index = tnapi - tp->napi;
6093         unsigned int pkts_compl = 0, bytes_compl = 0;
6094
6095         if (tg3_flag(tp, ENABLE_TSS))
6096                 index--;
6097
6098         txq = netdev_get_tx_queue(tp->dev, index);
6099
6100         while (sw_idx != hw_idx) {
6101                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6102                 struct sk_buff *skb = ri->skb;
6103                 int i, tx_bug = 0;
6104
6105                 if (unlikely(skb == NULL)) {
6106                         tg3_tx_recover(tp);
6107                         return;
6108                 }
6109
6110                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6111                         struct skb_shared_hwtstamps timestamp;
6112                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6113                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6114
6115                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6116
6117                         skb_tstamp_tx(skb, &timestamp);
6118                 }
6119
6120                 pci_unmap_single(tp->pdev,
6121                                  dma_unmap_addr(ri, mapping),
6122                                  skb_headlen(skb),
6123                                  PCI_DMA_TODEVICE);
6124
6125                 ri->skb = NULL;
6126
6127                 while (ri->fragmented) {
6128                         ri->fragmented = false;
6129                         sw_idx = NEXT_TX(sw_idx);
6130                         ri = &tnapi->tx_buffers[sw_idx];
6131                 }
6132
6133                 sw_idx = NEXT_TX(sw_idx);
6134
6135                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6136                         ri = &tnapi->tx_buffers[sw_idx];
6137                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6138                                 tx_bug = 1;
6139
6140                         pci_unmap_page(tp->pdev,
6141                                        dma_unmap_addr(ri, mapping),
6142                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
6143                                        PCI_DMA_TODEVICE);
6144
6145                         while (ri->fragmented) {
6146                                 ri->fragmented = false;
6147                                 sw_idx = NEXT_TX(sw_idx);
6148                                 ri = &tnapi->tx_buffers[sw_idx];
6149                         }
6150
6151                         sw_idx = NEXT_TX(sw_idx);
6152                 }
6153
6154                 pkts_compl++;
6155                 bytes_compl += skb->len;
6156
6157                 dev_kfree_skb(skb);
6158
6159                 if (unlikely(tx_bug)) {
6160                         tg3_tx_recover(tp);
6161                         return;
6162                 }
6163         }
6164
6165         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6166
6167         tnapi->tx_cons = sw_idx;
6168
6169         /* Need to make the tx_cons update visible to tg3_start_xmit()
6170          * before checking for netif_queue_stopped().  Without the
6171          * memory barrier, there is a small possibility that tg3_start_xmit()
6172          * will miss it and cause the queue to be stopped forever.
6173          */
6174         smp_mb();
6175
6176         if (unlikely(netif_tx_queue_stopped(txq) &&
6177                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6178                 __netif_tx_lock(txq, smp_processor_id());
6179                 if (netif_tx_queue_stopped(txq) &&
6180                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6181                         netif_tx_wake_queue(txq);
6182                 __netif_tx_unlock(txq);
6183         }
6184 }
6185
6186 static void tg3_frag_free(bool is_frag, void *data)
6187 {
6188         if (is_frag)
6189                 put_page(virt_to_head_page(data));
6190         else
6191                 kfree(data);
6192 }
6193
6194 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6195 {
6196         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6197                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6198
6199         if (!ri->data)
6200                 return;
6201
6202         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6203                          map_sz, PCI_DMA_FROMDEVICE);
6204         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6205         ri->data = NULL;
6206 }
6207
6208
6209 /* Returns size of skb allocated or < 0 on error.
6210  *
6211  * We only need to fill in the address because the other members
6212  * of the RX descriptor are invariant, see tg3_init_rings.
6213  *
6214  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6215  * posting buffers we only dirty the first cache line of the RX
6216  * descriptor (containing the address).  Whereas for the RX status
6217  * buffers the cpu only reads the last cacheline of the RX descriptor
6218  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6219  */
6220 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6221                              u32 opaque_key, u32 dest_idx_unmasked,
6222                              unsigned int *frag_size)
6223 {
6224         struct tg3_rx_buffer_desc *desc;
6225         struct ring_info *map;
6226         u8 *data;
6227         dma_addr_t mapping;
6228         int skb_size, data_size, dest_idx;
6229
6230         switch (opaque_key) {
6231         case RXD_OPAQUE_RING_STD:
6232                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6233                 desc = &tpr->rx_std[dest_idx];
6234                 map = &tpr->rx_std_buffers[dest_idx];
6235                 data_size = tp->rx_pkt_map_sz;
6236                 break;
6237
6238         case RXD_OPAQUE_RING_JUMBO:
6239                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6240                 desc = &tpr->rx_jmb[dest_idx].std;
6241                 map = &tpr->rx_jmb_buffers[dest_idx];
6242                 data_size = TG3_RX_JMB_MAP_SZ;
6243                 break;
6244
6245         default:
6246                 return -EINVAL;
6247         }
6248
6249         /* Do not overwrite any of the map or rp information
6250          * until we are sure we can commit to a new buffer.
6251          *
6252          * Callers depend upon this behavior and assume that
6253          * we leave everything unchanged if we fail.
6254          */
6255         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6256                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6257         if (skb_size <= PAGE_SIZE) {
6258                 data = netdev_alloc_frag(skb_size);
6259                 *frag_size = skb_size;
6260         } else {
6261                 data = kmalloc(skb_size, GFP_ATOMIC);
6262                 *frag_size = 0;
6263         }
6264         if (!data)
6265                 return -ENOMEM;
6266
6267         mapping = pci_map_single(tp->pdev,
6268                                  data + TG3_RX_OFFSET(tp),
6269                                  data_size,
6270                                  PCI_DMA_FROMDEVICE);
6271         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6272                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6273                 return -EIO;
6274         }
6275
6276         map->data = data;
6277         dma_unmap_addr_set(map, mapping, mapping);
6278
6279         desc->addr_hi = ((u64)mapping >> 32);
6280         desc->addr_lo = ((u64)mapping & 0xffffffff);
6281
6282         return data_size;
6283 }
6284
6285 /* We only need to move over in the address because the other
6286  * members of the RX descriptor are invariant.  See notes above
6287  * tg3_alloc_rx_data for full details.
6288  */
6289 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6290                            struct tg3_rx_prodring_set *dpr,
6291                            u32 opaque_key, int src_idx,
6292                            u32 dest_idx_unmasked)
6293 {
6294         struct tg3 *tp = tnapi->tp;
6295         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6296         struct ring_info *src_map, *dest_map;
6297         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6298         int dest_idx;
6299
6300         switch (opaque_key) {
6301         case RXD_OPAQUE_RING_STD:
6302                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6303                 dest_desc = &dpr->rx_std[dest_idx];
6304                 dest_map = &dpr->rx_std_buffers[dest_idx];
6305                 src_desc = &spr->rx_std[src_idx];
6306                 src_map = &spr->rx_std_buffers[src_idx];
6307                 break;
6308
6309         case RXD_OPAQUE_RING_JUMBO:
6310                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6311                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6312                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6313                 src_desc = &spr->rx_jmb[src_idx].std;
6314                 src_map = &spr->rx_jmb_buffers[src_idx];
6315                 break;
6316
6317         default:
6318                 return;
6319         }
6320
6321         dest_map->data = src_map->data;
6322         dma_unmap_addr_set(dest_map, mapping,
6323                            dma_unmap_addr(src_map, mapping));
6324         dest_desc->addr_hi = src_desc->addr_hi;
6325         dest_desc->addr_lo = src_desc->addr_lo;
6326
6327         /* Ensure that the update to the skb happens after the physical
6328          * addresses have been transferred to the new BD location.
6329          */
6330         smp_wmb();
6331
6332         src_map->data = NULL;
6333 }
6334
6335 /* The RX ring scheme is composed of multiple rings which post fresh
6336  * buffers to the chip, and one special ring the chip uses to report
6337  * status back to the host.
6338  *
6339  * The special ring reports the status of received packets to the
6340  * host.  The chip does not write into the original descriptor the
6341  * RX buffer was obtained from.  The chip simply takes the original
6342  * descriptor as provided by the host, updates the status and length
6343  * field, then writes this into the next status ring entry.
6344  *
6345  * Each ring the host uses to post buffers to the chip is described
6346  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6347  * it is first placed into the on-chip ram.  When the packet's length
6348  * is known, it walks down the TG3_BDINFO entries to select the ring.
6349  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6350  * which is within the range of the new packet's length is chosen.
6351  *
6352  * The "separate ring for rx status" scheme may sound queer, but it makes
6353  * sense from a cache coherency perspective.  If only the host writes
6354  * to the buffer post rings, and only the chip writes to the rx status
6355  * rings, then cache lines never move beyond shared-modified state.
6356  * If both the host and chip were to write into the same ring, cache line
6357  * eviction could occur since both entities want it in an exclusive state.
6358  */
6359 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6360 {
6361         struct tg3 *tp = tnapi->tp;
6362         u32 work_mask, rx_std_posted = 0;
6363         u32 std_prod_idx, jmb_prod_idx;
6364         u32 sw_idx = tnapi->rx_rcb_ptr;
6365         u16 hw_idx;
6366         int received;
6367         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6368
6369         hw_idx = *(tnapi->rx_rcb_prod_idx);
6370         /*
6371          * We need to order the read of hw_idx and the read of
6372          * the opaque cookie.
6373          */
6374         rmb();
6375         work_mask = 0;
6376         received = 0;
6377         std_prod_idx = tpr->rx_std_prod_idx;
6378         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6379         while (sw_idx != hw_idx && budget > 0) {
6380                 struct ring_info *ri;
6381                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6382                 unsigned int len;
6383                 struct sk_buff *skb;
6384                 dma_addr_t dma_addr;
6385                 u32 opaque_key, desc_idx, *post_ptr;
6386                 u8 *data;
6387                 u64 tstamp = 0;
6388
6389                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6390                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6391                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6392                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6393                         dma_addr = dma_unmap_addr(ri, mapping);
6394                         data = ri->data;
6395                         post_ptr = &std_prod_idx;
6396                         rx_std_posted++;
6397                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6398                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6399                         dma_addr = dma_unmap_addr(ri, mapping);
6400                         data = ri->data;
6401                         post_ptr = &jmb_prod_idx;
6402                 } else
6403                         goto next_pkt_nopost;
6404
6405                 work_mask |= opaque_key;
6406
6407                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6408                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6409                 drop_it:
6410                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6411                                        desc_idx, *post_ptr);
6412                 drop_it_no_recycle:
6413                         /* Other statistics kept track of by card. */
6414                         tp->rx_dropped++;
6415                         goto next_pkt;
6416                 }
6417
6418                 prefetch(data + TG3_RX_OFFSET(tp));
6419                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6420                       ETH_FCS_LEN;
6421
6422                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6423                      RXD_FLAG_PTPSTAT_PTPV1 ||
6424                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6425                      RXD_FLAG_PTPSTAT_PTPV2) {
6426                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6427                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6428                 }
6429
6430                 if (len > TG3_RX_COPY_THRESH(tp)) {
6431                         int skb_size;
6432                         unsigned int frag_size;
6433
6434                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6435                                                     *post_ptr, &frag_size);
6436                         if (skb_size < 0)
6437                                 goto drop_it;
6438
6439                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6440                                          PCI_DMA_FROMDEVICE);
6441
6442                         skb = build_skb(data, frag_size);
6443                         if (!skb) {
6444                                 tg3_frag_free(frag_size != 0, data);
6445                                 goto drop_it_no_recycle;
6446                         }
6447                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6448                         /* Ensure that the update to the data happens
6449                          * after the usage of the old DMA mapping.
6450                          */
6451                         smp_wmb();
6452
6453                         ri->data = NULL;
6454
6455                 } else {
6456                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6457                                        desc_idx, *post_ptr);
6458
6459                         skb = netdev_alloc_skb(tp->dev,
6460                                                len + TG3_RAW_IP_ALIGN);
6461                         if (skb == NULL)
6462                                 goto drop_it_no_recycle;
6463
6464                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6465                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6466                         memcpy(skb->data,
6467                                data + TG3_RX_OFFSET(tp),
6468                                len);
6469                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6470                 }
6471
6472                 skb_put(skb, len);
6473                 if (tstamp)
6474                         tg3_hwclock_to_timestamp(tp, tstamp,
6475                                                  skb_hwtstamps(skb));
6476
6477                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6478                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6479                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6480                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6481                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6482                 else
6483                         skb_checksum_none_assert(skb);
6484
6485                 skb->protocol = eth_type_trans(skb, tp->dev);
6486
6487                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6488                     skb->protocol != htons(ETH_P_8021Q)) {
6489                         dev_kfree_skb(skb);
6490                         goto drop_it_no_recycle;
6491                 }
6492
6493                 if (desc->type_flags & RXD_FLAG_VLAN &&
6494                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6495                         __vlan_hwaccel_put_tag(skb,
6496                                                desc->err_vlan & RXD_VLAN_MASK);
6497
6498                 napi_gro_receive(&tnapi->napi, skb);
6499
6500                 received++;
6501                 budget--;
6502
6503 next_pkt:
6504                 (*post_ptr)++;
6505
6506                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6507                         tpr->rx_std_prod_idx = std_prod_idx &
6508                                                tp->rx_std_ring_mask;
6509                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6510                                      tpr->rx_std_prod_idx);
6511                         work_mask &= ~RXD_OPAQUE_RING_STD;
6512                         rx_std_posted = 0;
6513                 }
6514 next_pkt_nopost:
6515                 sw_idx++;
6516                 sw_idx &= tp->rx_ret_ring_mask;
6517
6518                 /* Refresh hw_idx to see if there is new work */
6519                 if (sw_idx == hw_idx) {
6520                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6521                         rmb();
6522                 }
6523         }
6524
6525         /* ACK the status ring. */
6526         tnapi->rx_rcb_ptr = sw_idx;
6527         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6528
6529         /* Refill RX ring(s). */
6530         if (!tg3_flag(tp, ENABLE_RSS)) {
6531                 /* Sync BD data before updating mailbox */
6532                 wmb();
6533
6534                 if (work_mask & RXD_OPAQUE_RING_STD) {
6535                         tpr->rx_std_prod_idx = std_prod_idx &
6536                                                tp->rx_std_ring_mask;
6537                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6538                                      tpr->rx_std_prod_idx);
6539                 }
6540                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6541                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6542                                                tp->rx_jmb_ring_mask;
6543                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6544                                      tpr->rx_jmb_prod_idx);
6545                 }
6546                 mmiowb();
6547         } else if (work_mask) {
6548                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6549                  * updated before the producer indices can be updated.
6550                  */
6551                 smp_wmb();
6552
6553                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6554                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6555
6556                 if (tnapi != &tp->napi[1]) {
6557                         tp->rx_refill = true;
6558                         napi_schedule(&tp->napi[1].napi);
6559                 }
6560         }
6561
6562         return received;
6563 }
6564
6565 static void tg3_poll_link(struct tg3 *tp)
6566 {
6567         /* handle link change and other phy events */
6568         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6569                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6570
6571                 if (sblk->status & SD_STATUS_LINK_CHG) {
6572                         sblk->status = SD_STATUS_UPDATED |
6573                                        (sblk->status & ~SD_STATUS_LINK_CHG);
6574                         spin_lock(&tp->lock);
6575                         if (tg3_flag(tp, USE_PHYLIB)) {
6576                                 tw32_f(MAC_STATUS,
6577                                      (MAC_STATUS_SYNC_CHANGED |
6578                                       MAC_STATUS_CFG_CHANGED |
6579                                       MAC_STATUS_MI_COMPLETION |
6580                                       MAC_STATUS_LNKSTATE_CHANGED));
6581                                 udelay(40);
6582                         } else
6583                                 tg3_setup_phy(tp, 0);
6584                         spin_unlock(&tp->lock);
6585                 }
6586         }
6587 }
6588
6589 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6590                                 struct tg3_rx_prodring_set *dpr,
6591                                 struct tg3_rx_prodring_set *spr)
6592 {
6593         u32 si, di, cpycnt, src_prod_idx;
6594         int i, err = 0;
6595
6596         while (1) {
6597                 src_prod_idx = spr->rx_std_prod_idx;
6598
6599                 /* Make sure updates to the rx_std_buffers[] entries and the
6600                  * standard producer index are seen in the correct order.
6601                  */
6602                 smp_rmb();
6603
6604                 if (spr->rx_std_cons_idx == src_prod_idx)
6605                         break;
6606
6607                 if (spr->rx_std_cons_idx < src_prod_idx)
6608                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6609                 else
6610                         cpycnt = tp->rx_std_ring_mask + 1 -
6611                                  spr->rx_std_cons_idx;
6612
6613                 cpycnt = min(cpycnt,
6614                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6615
6616                 si = spr->rx_std_cons_idx;
6617                 di = dpr->rx_std_prod_idx;
6618
6619                 for (i = di; i < di + cpycnt; i++) {
6620                         if (dpr->rx_std_buffers[i].data) {
6621                                 cpycnt = i - di;
6622                                 err = -ENOSPC;
6623                                 break;
6624                         }
6625                 }
6626
6627                 if (!cpycnt)
6628                         break;
6629
6630                 /* Ensure that updates to the rx_std_buffers ring and the
6631                  * shadowed hardware producer ring from tg3_recycle_skb() are
6632                  * ordered correctly WRT the skb check above.
6633                  */
6634                 smp_rmb();
6635
6636                 memcpy(&dpr->rx_std_buffers[di],
6637                        &spr->rx_std_buffers[si],
6638                        cpycnt * sizeof(struct ring_info));
6639
6640                 for (i = 0; i < cpycnt; i++, di++, si++) {
6641                         struct tg3_rx_buffer_desc *sbd, *dbd;
6642                         sbd = &spr->rx_std[si];
6643                         dbd = &dpr->rx_std[di];
6644                         dbd->addr_hi = sbd->addr_hi;
6645                         dbd->addr_lo = sbd->addr_lo;
6646                 }
6647
6648                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6649                                        tp->rx_std_ring_mask;
6650                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6651                                        tp->rx_std_ring_mask;
6652         }
6653
6654         while (1) {
6655                 src_prod_idx = spr->rx_jmb_prod_idx;
6656
6657                 /* Make sure updates to the rx_jmb_buffers[] entries and
6658                  * the jumbo producer index are seen in the correct order.
6659                  */
6660                 smp_rmb();
6661
6662                 if (spr->rx_jmb_cons_idx == src_prod_idx)
6663                         break;
6664
6665                 if (spr->rx_jmb_cons_idx < src_prod_idx)
6666                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6667                 else
6668                         cpycnt = tp->rx_jmb_ring_mask + 1 -
6669                                  spr->rx_jmb_cons_idx;
6670
6671                 cpycnt = min(cpycnt,
6672                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6673
6674                 si = spr->rx_jmb_cons_idx;
6675                 di = dpr->rx_jmb_prod_idx;
6676
6677                 for (i = di; i < di + cpycnt; i++) {
6678                         if (dpr->rx_jmb_buffers[i].data) {
6679                                 cpycnt = i - di;
6680                                 err = -ENOSPC;
6681                                 break;
6682                         }
6683                 }
6684
6685                 if (!cpycnt)
6686                         break;
6687
6688                 /* Ensure that updates to the rx_jmb_buffers ring and the
6689                  * shadowed hardware producer ring from tg3_recycle_skb() are
6690                  * ordered correctly WRT the skb check above.
6691                  */
6692                 smp_rmb();
6693
6694                 memcpy(&dpr->rx_jmb_buffers[di],
6695                        &spr->rx_jmb_buffers[si],
6696                        cpycnt * sizeof(struct ring_info));
6697
6698                 for (i = 0; i < cpycnt; i++, di++, si++) {
6699                         struct tg3_rx_buffer_desc *sbd, *dbd;
6700                         sbd = &spr->rx_jmb[si].std;
6701                         dbd = &dpr->rx_jmb[di].std;
6702                         dbd->addr_hi = sbd->addr_hi;
6703                         dbd->addr_lo = sbd->addr_lo;
6704                 }
6705
6706                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6707                                        tp->rx_jmb_ring_mask;
6708                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6709                                        tp->rx_jmb_ring_mask;
6710         }
6711
6712         return err;
6713 }
6714
6715 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6716 {
6717         struct tg3 *tp = tnapi->tp;
6718
6719         /* run TX completion thread */
6720         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6721                 tg3_tx(tnapi);
6722                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6723                         return work_done;
6724         }
6725
6726         if (!tnapi->rx_rcb_prod_idx)
6727                 return work_done;
6728
6729         /* run RX thread, within the bounds set by NAPI.
6730          * All RX "locking" is done by ensuring outside
6731          * code synchronizes with tg3->napi.poll()
6732          */
6733         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6734                 work_done += tg3_rx(tnapi, budget - work_done);
6735
6736         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6737                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6738                 int i, err = 0;
6739                 u32 std_prod_idx = dpr->rx_std_prod_idx;
6740                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6741
6742                 tp->rx_refill = false;
6743                 for (i = 1; i <= tp->rxq_cnt; i++)
6744                         err |= tg3_rx_prodring_xfer(tp, dpr,
6745                                                     &tp->napi[i].prodring);
6746
6747                 wmb();
6748
6749                 if (std_prod_idx != dpr->rx_std_prod_idx)
6750                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6751                                      dpr->rx_std_prod_idx);
6752
6753                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6754                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6755                                      dpr->rx_jmb_prod_idx);
6756
6757                 mmiowb();
6758
6759                 if (err)
6760                         tw32_f(HOSTCC_MODE, tp->coal_now);
6761         }
6762
6763         return work_done;
6764 }
6765
6766 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6767 {
6768         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6769                 schedule_work(&tp->reset_task);
6770 }
6771
6772 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6773 {
6774         cancel_work_sync(&tp->reset_task);
6775         tg3_flag_clear(tp, RESET_TASK_PENDING);
6776         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6777 }
6778
6779 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6780 {
6781         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6782         struct tg3 *tp = tnapi->tp;
6783         int work_done = 0;
6784         struct tg3_hw_status *sblk = tnapi->hw_status;
6785
6786         while (1) {
6787                 work_done = tg3_poll_work(tnapi, work_done, budget);
6788
6789                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6790                         goto tx_recovery;
6791
6792                 if (unlikely(work_done >= budget))
6793                         break;
6794
6795                 /* tp->last_tag is used in tg3_int_reenable() below
6796                  * to tell the hw how much work has been processed,
6797                  * so we must read it before checking for more work.
6798                  */
6799                 tnapi->last_tag = sblk->status_tag;
6800                 tnapi->last_irq_tag = tnapi->last_tag;
6801                 rmb();
6802
6803                 /* check for RX/TX work to do */
6804                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6805                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6806
6807                         /* This test here is not race free, but will reduce
6808                          * the number of interrupts by looping again.
6809                          */
6810                         if (tnapi == &tp->napi[1] && tp->rx_refill)
6811                                 continue;
6812
6813                         napi_complete(napi);
6814                         /* Reenable interrupts. */
6815                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6816
6817                         /* This test here is synchronized by napi_schedule()
6818                          * and napi_complete() to close the race condition.
6819                          */
6820                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6821                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
6822                                                   HOSTCC_MODE_ENABLE |
6823                                                   tnapi->coal_now);
6824                         }
6825                         mmiowb();
6826                         break;
6827                 }
6828         }
6829
6830         return work_done;
6831
6832 tx_recovery:
6833         /* work_done is guaranteed to be less than budget. */
6834         napi_complete(napi);
6835         tg3_reset_task_schedule(tp);
6836         return work_done;
6837 }
6838
6839 static void tg3_process_error(struct tg3 *tp)
6840 {
6841         u32 val;
6842         bool real_error = false;
6843
6844         if (tg3_flag(tp, ERROR_PROCESSED))
6845                 return;
6846
6847         /* Check Flow Attention register */
6848         val = tr32(HOSTCC_FLOW_ATTN);
6849         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6850                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6851                 real_error = true;
6852         }
6853
6854         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6855                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6856                 real_error = true;
6857         }
6858
6859         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6860                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6861                 real_error = true;
6862         }
6863
6864         if (!real_error)
6865                 return;
6866
6867         tg3_dump_state(tp);
6868
6869         tg3_flag_set(tp, ERROR_PROCESSED);
6870         tg3_reset_task_schedule(tp);
6871 }
6872
6873 static int tg3_poll(struct napi_struct *napi, int budget)
6874 {
6875         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6876         struct tg3 *tp = tnapi->tp;
6877         int work_done = 0;
6878         struct tg3_hw_status *sblk = tnapi->hw_status;
6879
6880         while (1) {
6881                 if (sblk->status & SD_STATUS_ERROR)
6882                         tg3_process_error(tp);
6883
6884                 tg3_poll_link(tp);
6885
6886                 work_done = tg3_poll_work(tnapi, work_done, budget);
6887
6888                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6889                         goto tx_recovery;
6890
6891                 if (unlikely(work_done >= budget))
6892                         break;
6893
6894                 if (tg3_flag(tp, TAGGED_STATUS)) {
6895                         /* tp->last_tag is used in tg3_int_reenable() below
6896                          * to tell the hw how much work has been processed,
6897                          * so we must read it before checking for more work.
6898                          */
6899                         tnapi->last_tag = sblk->status_tag;
6900                         tnapi->last_irq_tag = tnapi->last_tag;
6901                         rmb();
6902                 } else
6903                         sblk->status &= ~SD_STATUS_UPDATED;
6904
6905                 if (likely(!tg3_has_work(tnapi))) {
6906                         napi_complete(napi);
6907                         tg3_int_reenable(tnapi);
6908                         break;
6909                 }
6910         }
6911
6912         return work_done;
6913
6914 tx_recovery:
6915         /* work_done is guaranteed to be less than budget. */
6916         napi_complete(napi);
6917         tg3_reset_task_schedule(tp);
6918         return work_done;
6919 }
6920
6921 static void tg3_napi_disable(struct tg3 *tp)
6922 {
6923         int i;
6924
6925         for (i = tp->irq_cnt - 1; i >= 0; i--)
6926                 napi_disable(&tp->napi[i].napi);
6927 }
6928
6929 static void tg3_napi_enable(struct tg3 *tp)
6930 {
6931         int i;
6932
6933         for (i = 0; i < tp->irq_cnt; i++)
6934                 napi_enable(&tp->napi[i].napi);
6935 }
6936
6937 static void tg3_napi_init(struct tg3 *tp)
6938 {
6939         int i;
6940
6941         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6942         for (i = 1; i < tp->irq_cnt; i++)
6943                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6944 }
6945
6946 static void tg3_napi_fini(struct tg3 *tp)
6947 {
6948         int i;
6949
6950         for (i = 0; i < tp->irq_cnt; i++)
6951                 netif_napi_del(&tp->napi[i].napi);
6952 }
6953
6954 static inline void tg3_netif_stop(struct tg3 *tp)
6955 {
6956         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6957         tg3_napi_disable(tp);
6958         netif_carrier_off(tp->dev);
6959         netif_tx_disable(tp->dev);
6960 }
6961
6962 /* tp->lock must be held */
6963 static inline void tg3_netif_start(struct tg3 *tp)
6964 {
6965         tg3_ptp_resume(tp);
6966
6967         /* NOTE: unconditional netif_tx_wake_all_queues is only
6968          * appropriate so long as all callers are assured to
6969          * have free tx slots (such as after tg3_init_hw)
6970          */
6971         netif_tx_wake_all_queues(tp->dev);
6972
6973         if (tp->link_up)
6974                 netif_carrier_on(tp->dev);
6975
6976         tg3_napi_enable(tp);
6977         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6978         tg3_enable_ints(tp);
6979 }
6980
6981 static void tg3_irq_quiesce(struct tg3 *tp)
6982 {
6983         int i;
6984
6985         BUG_ON(tp->irq_sync);
6986
6987         tp->irq_sync = 1;
6988         smp_mb();
6989
6990         for (i = 0; i < tp->irq_cnt; i++)
6991                 synchronize_irq(tp->napi[i].irq_vec);
6992 }
6993
6994 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6995  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6996  * with as well.  Most of the time, this is not necessary except when
6997  * shutting down the device.
6998  */
6999 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7000 {
7001         spin_lock_bh(&tp->lock);
7002         if (irq_sync)
7003                 tg3_irq_quiesce(tp);
7004 }
7005
7006 static inline void tg3_full_unlock(struct tg3 *tp)
7007 {
7008         spin_unlock_bh(&tp->lock);
7009 }
7010
7011 /* One-shot MSI handler - Chip automatically disables interrupt
7012  * after sending MSI so driver doesn't have to do it.
7013  */
7014 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7015 {
7016         struct tg3_napi *tnapi = dev_id;
7017         struct tg3 *tp = tnapi->tp;
7018
7019         prefetch(tnapi->hw_status);
7020         if (tnapi->rx_rcb)
7021                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7022
7023         if (likely(!tg3_irq_sync(tp)))
7024                 napi_schedule(&tnapi->napi);
7025
7026         return IRQ_HANDLED;
7027 }
7028
7029 /* MSI ISR - No need to check for interrupt sharing and no need to
7030  * flush status block and interrupt mailbox. PCI ordering rules
7031  * guarantee that MSI will arrive after the status block.
7032  */
7033 static irqreturn_t tg3_msi(int irq, void *dev_id)
7034 {
7035         struct tg3_napi *tnapi = dev_id;
7036         struct tg3 *tp = tnapi->tp;
7037
7038         prefetch(tnapi->hw_status);
7039         if (tnapi->rx_rcb)
7040                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7041         /*
7042          * Writing any value to intr-mbox-0 clears PCI INTA# and
7043          * chip-internal interrupt pending events.
7044          * Writing non-zero to intr-mbox-0 additional tells the
7045          * NIC to stop sending us irqs, engaging "in-intr-handler"
7046          * event coalescing.
7047          */
7048         tw32_mailbox(tnapi->int_mbox, 0x00000001);
7049         if (likely(!tg3_irq_sync(tp)))
7050                 napi_schedule(&tnapi->napi);
7051
7052         return IRQ_RETVAL(1);
7053 }
7054
7055 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7056 {
7057         struct tg3_napi *tnapi = dev_id;
7058         struct tg3 *tp = tnapi->tp;
7059         struct tg3_hw_status *sblk = tnapi->hw_status;
7060         unsigned int handled = 1;
7061
7062         /* In INTx mode, it is possible for the interrupt to arrive at
7063          * the CPU before the status block posted prior to the interrupt.
7064          * Reading the PCI State register will confirm whether the
7065          * interrupt is ours and will flush the status block.
7066          */
7067         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7068                 if (tg3_flag(tp, CHIP_RESETTING) ||
7069                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7070                         handled = 0;
7071                         goto out;
7072                 }
7073         }
7074
7075         /*
7076          * Writing any value to intr-mbox-0 clears PCI INTA# and
7077          * chip-internal interrupt pending events.
7078          * Writing non-zero to intr-mbox-0 additional tells the
7079          * NIC to stop sending us irqs, engaging "in-intr-handler"
7080          * event coalescing.
7081          *
7082          * Flush the mailbox to de-assert the IRQ immediately to prevent
7083          * spurious interrupts.  The flush impacts performance but
7084          * excessive spurious interrupts can be worse in some cases.
7085          */
7086         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7087         if (tg3_irq_sync(tp))
7088                 goto out;
7089         sblk->status &= ~SD_STATUS_UPDATED;
7090         if (likely(tg3_has_work(tnapi))) {
7091                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7092                 napi_schedule(&tnapi->napi);
7093         } else {
7094                 /* No work, shared interrupt perhaps?  re-enable
7095                  * interrupts, and flush that PCI write
7096                  */
7097                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7098                                0x00000000);
7099         }
7100 out:
7101         return IRQ_RETVAL(handled);
7102 }
7103
7104 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7105 {
7106         struct tg3_napi *tnapi = dev_id;
7107         struct tg3 *tp = tnapi->tp;
7108         struct tg3_hw_status *sblk = tnapi->hw_status;
7109         unsigned int handled = 1;
7110
7111         /* In INTx mode, it is possible for the interrupt to arrive at
7112          * the CPU before the status block posted prior to the interrupt.
7113          * Reading the PCI State register will confirm whether the
7114          * interrupt is ours and will flush the status block.
7115          */
7116         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7117                 if (tg3_flag(tp, CHIP_RESETTING) ||
7118                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7119                         handled = 0;
7120                         goto out;
7121                 }
7122         }
7123
7124         /*
7125          * writing any value to intr-mbox-0 clears PCI INTA# and
7126          * chip-internal interrupt pending events.
7127          * writing non-zero to intr-mbox-0 additional tells the
7128          * NIC to stop sending us irqs, engaging "in-intr-handler"
7129          * event coalescing.
7130          *
7131          * Flush the mailbox to de-assert the IRQ immediately to prevent
7132          * spurious interrupts.  The flush impacts performance but
7133          * excessive spurious interrupts can be worse in some cases.
7134          */
7135         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7136
7137         /*
7138          * In a shared interrupt configuration, sometimes other devices'
7139          * interrupts will scream.  We record the current status tag here
7140          * so that the above check can report that the screaming interrupts
7141          * are unhandled.  Eventually they will be silenced.
7142          */
7143         tnapi->last_irq_tag = sblk->status_tag;
7144
7145         if (tg3_irq_sync(tp))
7146                 goto out;
7147
7148         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7149
7150         napi_schedule(&tnapi->napi);
7151
7152 out:
7153         return IRQ_RETVAL(handled);
7154 }
7155
7156 /* ISR for interrupt test */
7157 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7158 {
7159         struct tg3_napi *tnapi = dev_id;
7160         struct tg3 *tp = tnapi->tp;
7161         struct tg3_hw_status *sblk = tnapi->hw_status;
7162
7163         if ((sblk->status & SD_STATUS_UPDATED) ||
7164             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7165                 tg3_disable_ints(tp);
7166                 return IRQ_RETVAL(1);
7167         }
7168         return IRQ_RETVAL(0);
7169 }
7170
7171 #ifdef CONFIG_NET_POLL_CONTROLLER
7172 static void tg3_poll_controller(struct net_device *dev)
7173 {
7174         int i;
7175         struct tg3 *tp = netdev_priv(dev);
7176
7177         if (tg3_irq_sync(tp))
7178                 return;
7179
7180         for (i = 0; i < tp->irq_cnt; i++)
7181                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7182 }
7183 #endif
7184
7185 static void tg3_tx_timeout(struct net_device *dev)
7186 {
7187         struct tg3 *tp = netdev_priv(dev);
7188
7189         if (netif_msg_tx_err(tp)) {
7190                 netdev_err(dev, "transmit timed out, resetting\n");
7191                 tg3_dump_state(tp);
7192         }
7193
7194         tg3_reset_task_schedule(tp);
7195 }
7196
7197 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7198 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7199 {
7200         u32 base = (u32) mapping & 0xffffffff;
7201
7202         return (base > 0xffffdcc0) && (base + len + 8 < base);
7203 }
7204
7205 /* Test for DMA addresses > 40-bit */
7206 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7207                                           int len)
7208 {
7209 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7210         if (tg3_flag(tp, 40BIT_DMA_BUG))
7211                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7212         return 0;
7213 #else
7214         return 0;
7215 #endif
7216 }
7217
7218 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7219                                  dma_addr_t mapping, u32 len, u32 flags,
7220                                  u32 mss, u32 vlan)
7221 {
7222         txbd->addr_hi = ((u64) mapping >> 32);
7223         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7224         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7225         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7226 }
7227
7228 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7229                             dma_addr_t map, u32 len, u32 flags,
7230                             u32 mss, u32 vlan)
7231 {
7232         struct tg3 *tp = tnapi->tp;
7233         bool hwbug = false;
7234
7235         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7236                 hwbug = true;
7237
7238         if (tg3_4g_overflow_test(map, len))
7239                 hwbug = true;
7240
7241         if (tg3_40bit_overflow_test(tp, map, len))
7242                 hwbug = true;
7243
7244         if (tp->dma_limit) {
7245                 u32 prvidx = *entry;
7246                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7247                 while (len > tp->dma_limit && *budget) {
7248                         u32 frag_len = tp->dma_limit;
7249                         len -= tp->dma_limit;
7250
7251                         /* Avoid the 8byte DMA problem */
7252                         if (len <= 8) {
7253                                 len += tp->dma_limit / 2;
7254                                 frag_len = tp->dma_limit / 2;
7255                         }
7256
7257                         tnapi->tx_buffers[*entry].fragmented = true;
7258
7259                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7260                                       frag_len, tmp_flag, mss, vlan);
7261                         *budget -= 1;
7262                         prvidx = *entry;
7263                         *entry = NEXT_TX(*entry);
7264
7265                         map += frag_len;
7266                 }
7267
7268                 if (len) {
7269                         if (*budget) {
7270                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7271                                               len, flags, mss, vlan);
7272                                 *budget -= 1;
7273                                 *entry = NEXT_TX(*entry);
7274                         } else {
7275                                 hwbug = true;
7276                                 tnapi->tx_buffers[prvidx].fragmented = false;
7277                         }
7278                 }
7279         } else {
7280                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7281                               len, flags, mss, vlan);
7282                 *entry = NEXT_TX(*entry);
7283         }
7284
7285         return hwbug;
7286 }
7287
7288 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7289 {
7290         int i;
7291         struct sk_buff *skb;
7292         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7293
7294         skb = txb->skb;
7295         txb->skb = NULL;
7296
7297         pci_unmap_single(tnapi->tp->pdev,
7298                          dma_unmap_addr(txb, mapping),
7299                          skb_headlen(skb),
7300                          PCI_DMA_TODEVICE);
7301
7302         while (txb->fragmented) {
7303                 txb->fragmented = false;
7304                 entry = NEXT_TX(entry);
7305                 txb = &tnapi->tx_buffers[entry];
7306         }
7307
7308         for (i = 0; i <= last; i++) {
7309                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7310
7311                 entry = NEXT_TX(entry);
7312                 txb = &tnapi->tx_buffers[entry];
7313
7314                 pci_unmap_page(tnapi->tp->pdev,
7315                                dma_unmap_addr(txb, mapping),
7316                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7317
7318                 while (txb->fragmented) {
7319                         txb->fragmented = false;
7320                         entry = NEXT_TX(entry);
7321                         txb = &tnapi->tx_buffers[entry];
7322                 }
7323         }
7324 }
7325
7326 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7327 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7328                                        struct sk_buff **pskb,
7329                                        u32 *entry, u32 *budget,
7330                                        u32 base_flags, u32 mss, u32 vlan)
7331 {
7332         struct tg3 *tp = tnapi->tp;
7333         struct sk_buff *new_skb, *skb = *pskb;
7334         dma_addr_t new_addr = 0;
7335         int ret = 0;
7336
7337         if (tg3_asic_rev(tp) != ASIC_REV_5701)
7338                 new_skb = skb_copy(skb, GFP_ATOMIC);
7339         else {
7340                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7341
7342                 new_skb = skb_copy_expand(skb,
7343                                           skb_headroom(skb) + more_headroom,
7344                                           skb_tailroom(skb), GFP_ATOMIC);
7345         }
7346
7347         if (!new_skb) {
7348                 ret = -1;
7349         } else {
7350                 /* New SKB is guaranteed to be linear. */
7351                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7352                                           PCI_DMA_TODEVICE);
7353                 /* Make sure the mapping succeeded */
7354                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7355                         dev_kfree_skb(new_skb);
7356                         ret = -1;
7357                 } else {
7358                         u32 save_entry = *entry;
7359
7360                         base_flags |= TXD_FLAG_END;
7361
7362                         tnapi->tx_buffers[*entry].skb = new_skb;
7363                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7364                                            mapping, new_addr);
7365
7366                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7367                                             new_skb->len, base_flags,
7368                                             mss, vlan)) {
7369                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7370                                 dev_kfree_skb(new_skb);
7371                                 ret = -1;
7372                         }
7373                 }
7374         }
7375
7376         dev_kfree_skb(skb);
7377         *pskb = new_skb;
7378         return ret;
7379 }
7380
7381 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7382
7383 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7384  * TSO header is greater than 80 bytes.
7385  */
7386 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7387 {
7388         struct sk_buff *segs, *nskb;
7389         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7390
7391         /* Estimate the number of fragments in the worst case */
7392         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7393                 netif_stop_queue(tp->dev);
7394
7395                 /* netif_tx_stop_queue() must be done before checking
7396                  * checking tx index in tg3_tx_avail() below, because in
7397                  * tg3_tx(), we update tx index before checking for
7398                  * netif_tx_queue_stopped().
7399                  */
7400                 smp_mb();
7401                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7402                         return NETDEV_TX_BUSY;
7403
7404                 netif_wake_queue(tp->dev);
7405         }
7406
7407         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7408         if (IS_ERR(segs))
7409                 goto tg3_tso_bug_end;
7410
7411         do {
7412                 nskb = segs;
7413                 segs = segs->next;
7414                 nskb->next = NULL;
7415                 tg3_start_xmit(nskb, tp->dev);
7416         } while (segs);
7417
7418 tg3_tso_bug_end:
7419         dev_kfree_skb(skb);
7420
7421         return NETDEV_TX_OK;
7422 }
7423
7424 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7425  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7426  */
7427 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7428 {
7429         struct tg3 *tp = netdev_priv(dev);
7430         u32 len, entry, base_flags, mss, vlan = 0;
7431         u32 budget;
7432         int i = -1, would_hit_hwbug;
7433         dma_addr_t mapping;
7434         struct tg3_napi *tnapi;
7435         struct netdev_queue *txq;
7436         unsigned int last;
7437
7438         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7439         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7440         if (tg3_flag(tp, ENABLE_TSS))
7441                 tnapi++;
7442
7443         budget = tg3_tx_avail(tnapi);
7444
7445         /* We are running in BH disabled context with netif_tx_lock
7446          * and TX reclaim runs via tp->napi.poll inside of a software
7447          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7448          * no IRQ context deadlocks to worry about either.  Rejoice!
7449          */
7450         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7451                 if (!netif_tx_queue_stopped(txq)) {
7452                         netif_tx_stop_queue(txq);
7453
7454                         /* This is a hard error, log it. */
7455                         netdev_err(dev,
7456                                    "BUG! Tx Ring full when queue awake!\n");
7457                 }
7458                 return NETDEV_TX_BUSY;
7459         }
7460
7461         entry = tnapi->tx_prod;
7462         base_flags = 0;
7463         if (skb->ip_summed == CHECKSUM_PARTIAL)
7464                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7465
7466         mss = skb_shinfo(skb)->gso_size;
7467         if (mss) {
7468                 struct iphdr *iph;
7469                 u32 tcp_opt_len, hdr_len;
7470
7471                 if (skb_header_cloned(skb) &&
7472                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7473                         goto drop;
7474
7475                 iph = ip_hdr(skb);
7476                 tcp_opt_len = tcp_optlen(skb);
7477
7478                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7479
7480                 if (!skb_is_gso_v6(skb)) {
7481                         iph->check = 0;
7482                         iph->tot_len = htons(mss + hdr_len);
7483                 }
7484
7485                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7486                     tg3_flag(tp, TSO_BUG))
7487                         return tg3_tso_bug(tp, skb);
7488
7489                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7490                                TXD_FLAG_CPU_POST_DMA);
7491
7492                 if (tg3_flag(tp, HW_TSO_1) ||
7493                     tg3_flag(tp, HW_TSO_2) ||
7494                     tg3_flag(tp, HW_TSO_3)) {
7495                         tcp_hdr(skb)->check = 0;
7496                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7497                 } else
7498                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7499                                                                  iph->daddr, 0,
7500                                                                  IPPROTO_TCP,
7501                                                                  0);
7502
7503                 if (tg3_flag(tp, HW_TSO_3)) {
7504                         mss |= (hdr_len & 0xc) << 12;
7505                         if (hdr_len & 0x10)
7506                                 base_flags |= 0x00000010;
7507                         base_flags |= (hdr_len & 0x3e0) << 5;
7508                 } else if (tg3_flag(tp, HW_TSO_2))
7509                         mss |= hdr_len << 9;
7510                 else if (tg3_flag(tp, HW_TSO_1) ||
7511                          tg3_asic_rev(tp) == ASIC_REV_5705) {
7512                         if (tcp_opt_len || iph->ihl > 5) {
7513                                 int tsflags;
7514
7515                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7516                                 mss |= (tsflags << 11);
7517                         }
7518                 } else {
7519                         if (tcp_opt_len || iph->ihl > 5) {
7520                                 int tsflags;
7521
7522                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7523                                 base_flags |= tsflags << 12;
7524                         }
7525                 }
7526         }
7527
7528         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7529             !mss && skb->len > VLAN_ETH_FRAME_LEN)
7530                 base_flags |= TXD_FLAG_JMB_PKT;
7531
7532         if (vlan_tx_tag_present(skb)) {
7533                 base_flags |= TXD_FLAG_VLAN;
7534                 vlan = vlan_tx_tag_get(skb);
7535         }
7536
7537         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7538             tg3_flag(tp, TX_TSTAMP_EN)) {
7539                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7540                 base_flags |= TXD_FLAG_HWTSTAMP;
7541         }
7542
7543         len = skb_headlen(skb);
7544
7545         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7546         if (pci_dma_mapping_error(tp->pdev, mapping))
7547                 goto drop;
7548
7549
7550         tnapi->tx_buffers[entry].skb = skb;
7551         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7552
7553         would_hit_hwbug = 0;
7554
7555         if (tg3_flag(tp, 5701_DMA_BUG))
7556                 would_hit_hwbug = 1;
7557
7558         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7559                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7560                             mss, vlan)) {
7561                 would_hit_hwbug = 1;
7562         } else if (skb_shinfo(skb)->nr_frags > 0) {
7563                 u32 tmp_mss = mss;
7564
7565                 if (!tg3_flag(tp, HW_TSO_1) &&
7566                     !tg3_flag(tp, HW_TSO_2) &&
7567                     !tg3_flag(tp, HW_TSO_3))
7568                         tmp_mss = 0;
7569
7570                 /* Now loop through additional data
7571                  * fragments, and queue them.
7572                  */
7573                 last = skb_shinfo(skb)->nr_frags - 1;
7574                 for (i = 0; i <= last; i++) {
7575                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7576
7577                         len = skb_frag_size(frag);
7578                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7579                                                    len, DMA_TO_DEVICE);
7580
7581                         tnapi->tx_buffers[entry].skb = NULL;
7582                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7583                                            mapping);
7584                         if (dma_mapping_error(&tp->pdev->dev, mapping))
7585                                 goto dma_error;
7586
7587                         if (!budget ||
7588                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7589                                             len, base_flags |
7590                                             ((i == last) ? TXD_FLAG_END : 0),
7591                                             tmp_mss, vlan)) {
7592                                 would_hit_hwbug = 1;
7593                                 break;
7594                         }
7595                 }
7596         }
7597
7598         if (would_hit_hwbug) {
7599                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7600
7601                 /* If the workaround fails due to memory/mapping
7602                  * failure, silently drop this packet.
7603                  */
7604                 entry = tnapi->tx_prod;
7605                 budget = tg3_tx_avail(tnapi);
7606                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7607                                                 base_flags, mss, vlan))
7608                         goto drop_nofree;
7609         }
7610
7611         skb_tx_timestamp(skb);
7612         netdev_tx_sent_queue(txq, skb->len);
7613
7614         /* Sync BD data before updating mailbox */
7615         wmb();
7616
7617         /* Packets are ready, update Tx producer idx local and on card. */
7618         tw32_tx_mbox(tnapi->prodmbox, entry);
7619
7620         tnapi->tx_prod = entry;
7621         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7622                 netif_tx_stop_queue(txq);
7623
7624                 /* netif_tx_stop_queue() must be done before checking
7625                  * checking tx index in tg3_tx_avail() below, because in
7626                  * tg3_tx(), we update tx index before checking for
7627                  * netif_tx_queue_stopped().
7628                  */
7629                 smp_mb();
7630                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7631                         netif_tx_wake_queue(txq);
7632         }
7633
7634         mmiowb();
7635         return NETDEV_TX_OK;
7636
7637 dma_error:
7638         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7639         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7640 drop:
7641         dev_kfree_skb(skb);
7642 drop_nofree:
7643         tp->tx_dropped++;
7644         return NETDEV_TX_OK;
7645 }
7646
7647 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7648 {
7649         if (enable) {
7650                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7651                                   MAC_MODE_PORT_MODE_MASK);
7652
7653                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7654
7655                 if (!tg3_flag(tp, 5705_PLUS))
7656                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7657
7658                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7659                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7660                 else
7661                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7662         } else {
7663                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7664
7665                 if (tg3_flag(tp, 5705_PLUS) ||
7666                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7667                     tg3_asic_rev(tp) == ASIC_REV_5700)
7668                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7669         }
7670
7671         tw32(MAC_MODE, tp->mac_mode);
7672         udelay(40);
7673 }
7674
7675 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7676 {
7677         u32 val, bmcr, mac_mode, ptest = 0;
7678
7679         tg3_phy_toggle_apd(tp, false);
7680         tg3_phy_toggle_automdix(tp, 0);
7681
7682         if (extlpbk && tg3_phy_set_extloopbk(tp))
7683                 return -EIO;
7684
7685         bmcr = BMCR_FULLDPLX;
7686         switch (speed) {
7687         case SPEED_10:
7688                 break;
7689         case SPEED_100:
7690                 bmcr |= BMCR_SPEED100;
7691                 break;
7692         case SPEED_1000:
7693         default:
7694                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7695                         speed = SPEED_100;
7696                         bmcr |= BMCR_SPEED100;
7697                 } else {
7698                         speed = SPEED_1000;
7699                         bmcr |= BMCR_SPEED1000;
7700                 }
7701         }
7702
7703         if (extlpbk) {
7704                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7705                         tg3_readphy(tp, MII_CTRL1000, &val);
7706                         val |= CTL1000_AS_MASTER |
7707                                CTL1000_ENABLE_MASTER;
7708                         tg3_writephy(tp, MII_CTRL1000, val);
7709                 } else {
7710                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7711                                 MII_TG3_FET_PTEST_TRIM_2;
7712                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7713                 }
7714         } else
7715                 bmcr |= BMCR_LOOPBACK;
7716
7717         tg3_writephy(tp, MII_BMCR, bmcr);
7718
7719         /* The write needs to be flushed for the FETs */
7720         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7721                 tg3_readphy(tp, MII_BMCR, &bmcr);
7722
7723         udelay(40);
7724
7725         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7726             tg3_asic_rev(tp) == ASIC_REV_5785) {
7727                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7728                              MII_TG3_FET_PTEST_FRC_TX_LINK |
7729                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
7730
7731                 /* The write needs to be flushed for the AC131 */
7732                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7733         }
7734
7735         /* Reset to prevent losing 1st rx packet intermittently */
7736         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7737             tg3_flag(tp, 5780_CLASS)) {
7738                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7739                 udelay(10);
7740                 tw32_f(MAC_RX_MODE, tp->rx_mode);
7741         }
7742
7743         mac_mode = tp->mac_mode &
7744                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7745         if (speed == SPEED_1000)
7746                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7747         else
7748                 mac_mode |= MAC_MODE_PORT_MODE_MII;
7749
7750         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
7751                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7752
7753                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7754                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
7755                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7756                         mac_mode |= MAC_MODE_LINK_POLARITY;
7757
7758                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7759                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7760         }
7761
7762         tw32(MAC_MODE, mac_mode);
7763         udelay(40);
7764
7765         return 0;
7766 }
7767
7768 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7769 {
7770         struct tg3 *tp = netdev_priv(dev);
7771
7772         if (features & NETIF_F_LOOPBACK) {
7773                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7774                         return;
7775
7776                 spin_lock_bh(&tp->lock);
7777                 tg3_mac_loopback(tp, true);
7778                 netif_carrier_on(tp->dev);
7779                 spin_unlock_bh(&tp->lock);
7780                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7781         } else {
7782                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7783                         return;
7784
7785                 spin_lock_bh(&tp->lock);
7786                 tg3_mac_loopback(tp, false);
7787                 /* Force link status check */
7788                 tg3_setup_phy(tp, 1);
7789                 spin_unlock_bh(&tp->lock);
7790                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7791         }
7792 }
7793
7794 static netdev_features_t tg3_fix_features(struct net_device *dev,
7795         netdev_features_t features)
7796 {
7797         struct tg3 *tp = netdev_priv(dev);
7798
7799         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7800                 features &= ~NETIF_F_ALL_TSO;
7801
7802         return features;
7803 }
7804
7805 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7806 {
7807         netdev_features_t changed = dev->features ^ features;
7808
7809         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7810                 tg3_set_loopback(dev, features);
7811
7812         return 0;
7813 }
7814
7815 static void tg3_rx_prodring_free(struct tg3 *tp,
7816                                  struct tg3_rx_prodring_set *tpr)
7817 {
7818         int i;
7819
7820         if (tpr != &tp->napi[0].prodring) {
7821                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7822                      i = (i + 1) & tp->rx_std_ring_mask)
7823                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7824                                         tp->rx_pkt_map_sz);
7825
7826                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7827                         for (i = tpr->rx_jmb_cons_idx;
7828                              i != tpr->rx_jmb_prod_idx;
7829                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7830                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7831                                                 TG3_RX_JMB_MAP_SZ);
7832                         }
7833                 }
7834
7835                 return;
7836         }
7837
7838         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7839                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7840                                 tp->rx_pkt_map_sz);
7841
7842         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7843                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7844                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7845                                         TG3_RX_JMB_MAP_SZ);
7846         }
7847 }
7848
7849 /* Initialize rx rings for packet processing.
7850  *
7851  * The chip has been shut down and the driver detached from
7852  * the networking, so no interrupts or new tx packets will
7853  * end up in the driver.  tp->{tx,}lock are held and thus
7854  * we may not sleep.
7855  */
7856 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7857                                  struct tg3_rx_prodring_set *tpr)
7858 {
7859         u32 i, rx_pkt_dma_sz;
7860
7861         tpr->rx_std_cons_idx = 0;
7862         tpr->rx_std_prod_idx = 0;
7863         tpr->rx_jmb_cons_idx = 0;
7864         tpr->rx_jmb_prod_idx = 0;
7865
7866         if (tpr != &tp->napi[0].prodring) {
7867                 memset(&tpr->rx_std_buffers[0], 0,
7868                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7869                 if (tpr->rx_jmb_buffers)
7870                         memset(&tpr->rx_jmb_buffers[0], 0,
7871                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7872                 goto done;
7873         }
7874
7875         /* Zero out all descriptors. */
7876         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7877
7878         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7879         if (tg3_flag(tp, 5780_CLASS) &&
7880             tp->dev->mtu > ETH_DATA_LEN)
7881                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7882         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7883
7884         /* Initialize invariants of the rings, we only set this
7885          * stuff once.  This works because the card does not
7886          * write into the rx buffer posting rings.
7887          */
7888         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7889                 struct tg3_rx_buffer_desc *rxd;
7890
7891                 rxd = &tpr->rx_std[i];
7892                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7893                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7894                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7895                                (i << RXD_OPAQUE_INDEX_SHIFT));
7896         }
7897
7898         /* Now allocate fresh SKBs for each rx ring. */
7899         for (i = 0; i < tp->rx_pending; i++) {
7900                 unsigned int frag_size;
7901
7902                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7903                                       &frag_size) < 0) {
7904                         netdev_warn(tp->dev,
7905                                     "Using a smaller RX standard ring. Only "
7906                                     "%d out of %d buffers were allocated "
7907                                     "successfully\n", i, tp->rx_pending);
7908                         if (i == 0)
7909                                 goto initfail;
7910                         tp->rx_pending = i;
7911                         break;
7912                 }
7913         }
7914
7915         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7916                 goto done;
7917
7918         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7919
7920         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7921                 goto done;
7922
7923         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7924                 struct tg3_rx_buffer_desc *rxd;
7925
7926                 rxd = &tpr->rx_jmb[i].std;
7927                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7928                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7929                                   RXD_FLAG_JUMBO;
7930                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7931                        (i << RXD_OPAQUE_INDEX_SHIFT));
7932         }
7933
7934         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7935                 unsigned int frag_size;
7936
7937                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7938                                       &frag_size) < 0) {
7939                         netdev_warn(tp->dev,
7940                                     "Using a smaller RX jumbo ring. Only %d "
7941                                     "out of %d buffers were allocated "
7942                                     "successfully\n", i, tp->rx_jumbo_pending);
7943                         if (i == 0)
7944                                 goto initfail;
7945                         tp->rx_jumbo_pending = i;
7946                         break;
7947                 }
7948         }
7949
7950 done:
7951         return 0;
7952
7953 initfail:
7954         tg3_rx_prodring_free(tp, tpr);
7955         return -ENOMEM;
7956 }
7957
7958 static void tg3_rx_prodring_fini(struct tg3 *tp,
7959                                  struct tg3_rx_prodring_set *tpr)
7960 {
7961         kfree(tpr->rx_std_buffers);
7962         tpr->rx_std_buffers = NULL;
7963         kfree(tpr->rx_jmb_buffers);
7964         tpr->rx_jmb_buffers = NULL;
7965         if (tpr->rx_std) {
7966                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7967                                   tpr->rx_std, tpr->rx_std_mapping);
7968                 tpr->rx_std = NULL;
7969         }
7970         if (tpr->rx_jmb) {
7971                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7972                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7973                 tpr->rx_jmb = NULL;
7974         }
7975 }
7976
7977 static int tg3_rx_prodring_init(struct tg3 *tp,
7978                                 struct tg3_rx_prodring_set *tpr)
7979 {
7980         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7981                                       GFP_KERNEL);
7982         if (!tpr->rx_std_buffers)
7983                 return -ENOMEM;
7984
7985         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7986                                          TG3_RX_STD_RING_BYTES(tp),
7987                                          &tpr->rx_std_mapping,
7988                                          GFP_KERNEL);
7989         if (!tpr->rx_std)
7990                 goto err_out;
7991
7992         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7993                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7994                                               GFP_KERNEL);
7995                 if (!tpr->rx_jmb_buffers)
7996                         goto err_out;
7997
7998                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7999                                                  TG3_RX_JMB_RING_BYTES(tp),
8000                                                  &tpr->rx_jmb_mapping,
8001                                                  GFP_KERNEL);
8002                 if (!tpr->rx_jmb)
8003                         goto err_out;
8004         }
8005
8006         return 0;
8007
8008 err_out:
8009         tg3_rx_prodring_fini(tp, tpr);
8010         return -ENOMEM;
8011 }
8012
8013 /* Free up pending packets in all rx/tx rings.
8014  *
8015  * The chip has been shut down and the driver detached from
8016  * the networking, so no interrupts or new tx packets will
8017  * end up in the driver.  tp->{tx,}lock is not held and we are not
8018  * in an interrupt context and thus may sleep.
8019  */
8020 static void tg3_free_rings(struct tg3 *tp)
8021 {
8022         int i, j;
8023
8024         for (j = 0; j < tp->irq_cnt; j++) {
8025                 struct tg3_napi *tnapi = &tp->napi[j];
8026
8027                 tg3_rx_prodring_free(tp, &tnapi->prodring);
8028
8029                 if (!tnapi->tx_buffers)
8030                         continue;
8031
8032                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8033                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8034
8035                         if (!skb)
8036                                 continue;
8037
8038                         tg3_tx_skb_unmap(tnapi, i,
8039                                          skb_shinfo(skb)->nr_frags - 1);
8040
8041                         dev_kfree_skb_any(skb);
8042                 }
8043                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8044         }
8045 }
8046
8047 /* Initialize tx/rx rings for packet processing.
8048  *
8049  * The chip has been shut down and the driver detached from
8050  * the networking, so no interrupts or new tx packets will
8051  * end up in the driver.  tp->{tx,}lock are held and thus
8052  * we may not sleep.
8053  */
8054 static int tg3_init_rings(struct tg3 *tp)
8055 {
8056         int i;
8057
8058         /* Free up all the SKBs. */
8059         tg3_free_rings(tp);
8060
8061         for (i = 0; i < tp->irq_cnt; i++) {
8062                 struct tg3_napi *tnapi = &tp->napi[i];
8063
8064                 tnapi->last_tag = 0;
8065                 tnapi->last_irq_tag = 0;
8066                 tnapi->hw_status->status = 0;
8067                 tnapi->hw_status->status_tag = 0;
8068                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8069
8070                 tnapi->tx_prod = 0;
8071                 tnapi->tx_cons = 0;
8072                 if (tnapi->tx_ring)
8073                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8074
8075                 tnapi->rx_rcb_ptr = 0;
8076                 if (tnapi->rx_rcb)
8077                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8078
8079                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8080                         tg3_free_rings(tp);
8081                         return -ENOMEM;
8082                 }
8083         }
8084
8085         return 0;
8086 }
8087
8088 static void tg3_mem_tx_release(struct tg3 *tp)
8089 {
8090         int i;
8091
8092         for (i = 0; i < tp->irq_max; i++) {
8093                 struct tg3_napi *tnapi = &tp->napi[i];
8094
8095                 if (tnapi->tx_ring) {
8096                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8097                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
8098                         tnapi->tx_ring = NULL;
8099                 }
8100
8101                 kfree(tnapi->tx_buffers);
8102                 tnapi->tx_buffers = NULL;
8103         }
8104 }
8105
8106 static int tg3_mem_tx_acquire(struct tg3 *tp)
8107 {
8108         int i;
8109         struct tg3_napi *tnapi = &tp->napi[0];
8110
8111         /* If multivector TSS is enabled, vector 0 does not handle
8112          * tx interrupts.  Don't allocate any resources for it.
8113          */
8114         if (tg3_flag(tp, ENABLE_TSS))
8115                 tnapi++;
8116
8117         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8118                 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8119                                             TG3_TX_RING_SIZE, GFP_KERNEL);
8120                 if (!tnapi->tx_buffers)
8121                         goto err_out;
8122
8123                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8124                                                     TG3_TX_RING_BYTES,
8125                                                     &tnapi->tx_desc_mapping,
8126                                                     GFP_KERNEL);
8127                 if (!tnapi->tx_ring)
8128                         goto err_out;
8129         }
8130
8131         return 0;
8132
8133 err_out:
8134         tg3_mem_tx_release(tp);
8135         return -ENOMEM;
8136 }
8137
8138 static void tg3_mem_rx_release(struct tg3 *tp)
8139 {
8140         int i;
8141
8142         for (i = 0; i < tp->irq_max; i++) {
8143                 struct tg3_napi *tnapi = &tp->napi[i];
8144
8145                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8146
8147                 if (!tnapi->rx_rcb)
8148                         continue;
8149
8150                 dma_free_coherent(&tp->pdev->dev,
8151                                   TG3_RX_RCB_RING_BYTES(tp),
8152                                   tnapi->rx_rcb,
8153                                   tnapi->rx_rcb_mapping);
8154                 tnapi->rx_rcb = NULL;
8155         }
8156 }
8157
8158 static int tg3_mem_rx_acquire(struct tg3 *tp)
8159 {
8160         unsigned int i, limit;
8161
8162         limit = tp->rxq_cnt;
8163
8164         /* If RSS is enabled, we need a (dummy) producer ring
8165          * set on vector zero.  This is the true hw prodring.
8166          */
8167         if (tg3_flag(tp, ENABLE_RSS))
8168                 limit++;
8169
8170         for (i = 0; i < limit; i++) {
8171                 struct tg3_napi *tnapi = &tp->napi[i];
8172
8173                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8174                         goto err_out;
8175
8176                 /* If multivector RSS is enabled, vector 0
8177                  * does not handle rx or tx interrupts.
8178                  * Don't allocate any resources for it.
8179                  */
8180                 if (!i && tg3_flag(tp, ENABLE_RSS))
8181                         continue;
8182
8183                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8184                                                    TG3_RX_RCB_RING_BYTES(tp),
8185                                                    &tnapi->rx_rcb_mapping,
8186                                                    GFP_KERNEL | __GFP_ZERO);
8187                 if (!tnapi->rx_rcb)
8188                         goto err_out;
8189         }
8190
8191         return 0;
8192
8193 err_out:
8194         tg3_mem_rx_release(tp);
8195         return -ENOMEM;
8196 }
8197
8198 /*
8199  * Must not be invoked with interrupt sources disabled and
8200  * the hardware shutdown down.
8201  */
8202 static void tg3_free_consistent(struct tg3 *tp)
8203 {
8204         int i;
8205
8206         for (i = 0; i < tp->irq_cnt; i++) {
8207                 struct tg3_napi *tnapi = &tp->napi[i];
8208
8209                 if (tnapi->hw_status) {
8210                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8211                                           tnapi->hw_status,
8212                                           tnapi->status_mapping);
8213                         tnapi->hw_status = NULL;
8214                 }
8215         }
8216
8217         tg3_mem_rx_release(tp);
8218         tg3_mem_tx_release(tp);
8219
8220         if (tp->hw_stats) {
8221                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8222                                   tp->hw_stats, tp->stats_mapping);
8223                 tp->hw_stats = NULL;
8224         }
8225 }
8226
8227 /*
8228  * Must not be invoked with interrupt sources disabled and
8229  * the hardware shutdown down.  Can sleep.
8230  */
8231 static int tg3_alloc_consistent(struct tg3 *tp)
8232 {
8233         int i;
8234
8235         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8236                                           sizeof(struct tg3_hw_stats),
8237                                           &tp->stats_mapping,
8238                                           GFP_KERNEL | __GFP_ZERO);
8239         if (!tp->hw_stats)
8240                 goto err_out;
8241
8242         for (i = 0; i < tp->irq_cnt; i++) {
8243                 struct tg3_napi *tnapi = &tp->napi[i];
8244                 struct tg3_hw_status *sblk;
8245
8246                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8247                                                       TG3_HW_STATUS_SIZE,
8248                                                       &tnapi->status_mapping,
8249                                                       GFP_KERNEL | __GFP_ZERO);
8250                 if (!tnapi->hw_status)
8251                         goto err_out;
8252
8253                 sblk = tnapi->hw_status;
8254
8255                 if (tg3_flag(tp, ENABLE_RSS)) {
8256                         u16 *prodptr = NULL;
8257
8258                         /*
8259                          * When RSS is enabled, the status block format changes
8260                          * slightly.  The "rx_jumbo_consumer", "reserved",
8261                          * and "rx_mini_consumer" members get mapped to the
8262                          * other three rx return ring producer indexes.
8263                          */
8264                         switch (i) {
8265                         case 1:
8266                                 prodptr = &sblk->idx[0].rx_producer;
8267                                 break;
8268                         case 2:
8269                                 prodptr = &sblk->rx_jumbo_consumer;
8270                                 break;
8271                         case 3:
8272                                 prodptr = &sblk->reserved;
8273                                 break;
8274                         case 4:
8275                                 prodptr = &sblk->rx_mini_consumer;
8276                                 break;
8277                         }
8278                         tnapi->rx_rcb_prod_idx = prodptr;
8279                 } else {
8280                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8281                 }
8282         }
8283
8284         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8285                 goto err_out;
8286
8287         return 0;
8288
8289 err_out:
8290         tg3_free_consistent(tp);
8291         return -ENOMEM;
8292 }
8293
8294 #define MAX_WAIT_CNT 1000
8295
8296 /* To stop a block, clear the enable bit and poll till it
8297  * clears.  tp->lock is held.
8298  */
8299 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
8300 {
8301         unsigned int i;
8302         u32 val;
8303
8304         if (tg3_flag(tp, 5705_PLUS)) {
8305                 switch (ofs) {
8306                 case RCVLSC_MODE:
8307                 case DMAC_MODE:
8308                 case MBFREE_MODE:
8309                 case BUFMGR_MODE:
8310                 case MEMARB_MODE:
8311                         /* We can't enable/disable these bits of the
8312                          * 5705/5750, just say success.
8313                          */
8314                         return 0;
8315
8316                 default:
8317                         break;
8318                 }
8319         }
8320
8321         val = tr32(ofs);
8322         val &= ~enable_bit;
8323         tw32_f(ofs, val);
8324
8325         for (i = 0; i < MAX_WAIT_CNT; i++) {
8326                 udelay(100);
8327                 val = tr32(ofs);
8328                 if ((val & enable_bit) == 0)
8329                         break;
8330         }
8331
8332         if (i == MAX_WAIT_CNT && !silent) {
8333                 dev_err(&tp->pdev->dev,
8334                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8335                         ofs, enable_bit);
8336                 return -ENODEV;
8337         }
8338
8339         return 0;
8340 }
8341
8342 /* tp->lock is held. */
8343 static int tg3_abort_hw(struct tg3 *tp, int silent)
8344 {
8345         int i, err;
8346
8347         tg3_disable_ints(tp);
8348
8349         tp->rx_mode &= ~RX_MODE_ENABLE;
8350         tw32_f(MAC_RX_MODE, tp->rx_mode);
8351         udelay(10);
8352
8353         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8354         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8355         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8356         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8357         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8358         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8359
8360         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8361         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8362         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8363         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8364         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8365         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8366         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8367
8368         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8369         tw32_f(MAC_MODE, tp->mac_mode);
8370         udelay(40);
8371
8372         tp->tx_mode &= ~TX_MODE_ENABLE;
8373         tw32_f(MAC_TX_MODE, tp->tx_mode);
8374
8375         for (i = 0; i < MAX_WAIT_CNT; i++) {
8376                 udelay(100);
8377                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8378                         break;
8379         }
8380         if (i >= MAX_WAIT_CNT) {
8381                 dev_err(&tp->pdev->dev,
8382                         "%s timed out, TX_MODE_ENABLE will not clear "
8383                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8384                 err |= -ENODEV;
8385         }
8386
8387         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8388         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8389         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8390
8391         tw32(FTQ_RESET, 0xffffffff);
8392         tw32(FTQ_RESET, 0x00000000);
8393
8394         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8395         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8396
8397         for (i = 0; i < tp->irq_cnt; i++) {
8398                 struct tg3_napi *tnapi = &tp->napi[i];
8399                 if (tnapi->hw_status)
8400                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8401         }
8402
8403         return err;
8404 }
8405
8406 /* Save PCI command register before chip reset */
8407 static void tg3_save_pci_state(struct tg3 *tp)
8408 {
8409         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8410 }
8411
8412 /* Restore PCI state after chip reset */
8413 static void tg3_restore_pci_state(struct tg3 *tp)
8414 {
8415         u32 val;
8416
8417         /* Re-enable indirect register accesses. */
8418         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8419                                tp->misc_host_ctrl);
8420
8421         /* Set MAX PCI retry to zero. */
8422         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8423         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8424             tg3_flag(tp, PCIX_MODE))
8425                 val |= PCISTATE_RETRY_SAME_DMA;
8426         /* Allow reads and writes to the APE register and memory space. */
8427         if (tg3_flag(tp, ENABLE_APE))
8428                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8429                        PCISTATE_ALLOW_APE_SHMEM_WR |
8430                        PCISTATE_ALLOW_APE_PSPACE_WR;
8431         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8432
8433         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8434
8435         if (!tg3_flag(tp, PCI_EXPRESS)) {
8436                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8437                                       tp->pci_cacheline_sz);
8438                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8439                                       tp->pci_lat_timer);
8440         }
8441
8442         /* Make sure PCI-X relaxed ordering bit is clear. */
8443         if (tg3_flag(tp, PCIX_MODE)) {
8444                 u16 pcix_cmd;
8445
8446                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8447                                      &pcix_cmd);
8448                 pcix_cmd &= ~PCI_X_CMD_ERO;
8449                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8450                                       pcix_cmd);
8451         }
8452
8453         if (tg3_flag(tp, 5780_CLASS)) {
8454
8455                 /* Chip reset on 5780 will reset MSI enable bit,
8456                  * so need to restore it.
8457                  */
8458                 if (tg3_flag(tp, USING_MSI)) {
8459                         u16 ctrl;
8460
8461                         pci_read_config_word(tp->pdev,
8462                                              tp->msi_cap + PCI_MSI_FLAGS,
8463                                              &ctrl);
8464                         pci_write_config_word(tp->pdev,
8465                                               tp->msi_cap + PCI_MSI_FLAGS,
8466                                               ctrl | PCI_MSI_FLAGS_ENABLE);
8467                         val = tr32(MSGINT_MODE);
8468                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8469                 }
8470         }
8471 }
8472
8473 /* tp->lock is held. */
8474 static int tg3_chip_reset(struct tg3 *tp)
8475 {
8476         u32 val;
8477         void (*write_op)(struct tg3 *, u32, u32);
8478         int i, err;
8479
8480         tg3_nvram_lock(tp);
8481
8482         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8483
8484         /* No matching tg3_nvram_unlock() after this because
8485          * chip reset below will undo the nvram lock.
8486          */
8487         tp->nvram_lock_cnt = 0;
8488
8489         /* GRC_MISC_CFG core clock reset will clear the memory
8490          * enable bit in PCI register 4 and the MSI enable bit
8491          * on some chips, so we save relevant registers here.
8492          */
8493         tg3_save_pci_state(tp);
8494
8495         if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8496             tg3_flag(tp, 5755_PLUS))
8497                 tw32(GRC_FASTBOOT_PC, 0);
8498
8499         /*
8500          * We must avoid the readl() that normally takes place.
8501          * It locks machines, causes machine checks, and other
8502          * fun things.  So, temporarily disable the 5701
8503          * hardware workaround, while we do the reset.
8504          */
8505         write_op = tp->write32;
8506         if (write_op == tg3_write_flush_reg32)
8507                 tp->write32 = tg3_write32;
8508
8509         /* Prevent the irq handler from reading or writing PCI registers
8510          * during chip reset when the memory enable bit in the PCI command
8511          * register may be cleared.  The chip does not generate interrupt
8512          * at this time, but the irq handler may still be called due to irq
8513          * sharing or irqpoll.
8514          */
8515         tg3_flag_set(tp, CHIP_RESETTING);
8516         for (i = 0; i < tp->irq_cnt; i++) {
8517                 struct tg3_napi *tnapi = &tp->napi[i];
8518                 if (tnapi->hw_status) {
8519                         tnapi->hw_status->status = 0;
8520                         tnapi->hw_status->status_tag = 0;
8521                 }
8522                 tnapi->last_tag = 0;
8523                 tnapi->last_irq_tag = 0;
8524         }
8525         smp_mb();
8526
8527         for (i = 0; i < tp->irq_cnt; i++)
8528                 synchronize_irq(tp->napi[i].irq_vec);
8529
8530         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8531                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8532                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8533         }
8534
8535         /* do the reset */
8536         val = GRC_MISC_CFG_CORECLK_RESET;
8537
8538         if (tg3_flag(tp, PCI_EXPRESS)) {
8539                 /* Force PCIe 1.0a mode */
8540                 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8541                     !tg3_flag(tp, 57765_PLUS) &&
8542                     tr32(TG3_PCIE_PHY_TSTCTL) ==
8543                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8544                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8545
8546                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8547                         tw32(GRC_MISC_CFG, (1 << 29));
8548                         val |= (1 << 29);
8549                 }
8550         }
8551
8552         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8553                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8554                 tw32(GRC_VCPU_EXT_CTRL,
8555                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8556         }
8557
8558         /* Manage gphy power for all CPMU absent PCIe devices. */
8559         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8560                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8561
8562         tw32(GRC_MISC_CFG, val);
8563
8564         /* restore 5701 hardware bug workaround write method */
8565         tp->write32 = write_op;
8566
8567         /* Unfortunately, we have to delay before the PCI read back.
8568          * Some 575X chips even will not respond to a PCI cfg access
8569          * when the reset command is given to the chip.
8570          *
8571          * How do these hardware designers expect things to work
8572          * properly if the PCI write is posted for a long period
8573          * of time?  It is always necessary to have some method by
8574          * which a register read back can occur to push the write
8575          * out which does the reset.
8576          *
8577          * For most tg3 variants the trick below was working.
8578          * Ho hum...
8579          */
8580         udelay(120);
8581
8582         /* Flush PCI posted writes.  The normal MMIO registers
8583          * are inaccessible at this time so this is the only
8584          * way to make this reliably (actually, this is no longer
8585          * the case, see above).  I tried to use indirect
8586          * register read/write but this upset some 5701 variants.
8587          */
8588         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8589
8590         udelay(120);
8591
8592         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8593                 u16 val16;
8594
8595                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
8596                         int j;
8597                         u32 cfg_val;
8598
8599                         /* Wait for link training to complete.  */
8600                         for (j = 0; j < 5000; j++)
8601                                 udelay(100);
8602
8603                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8604                         pci_write_config_dword(tp->pdev, 0xc4,
8605                                                cfg_val | (1 << 15));
8606                 }
8607
8608                 /* Clear the "no snoop" and "relaxed ordering" bits. */
8609                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8610                 /*
8611                  * Older PCIe devices only support the 128 byte
8612                  * MPS setting.  Enforce the restriction.
8613                  */
8614                 if (!tg3_flag(tp, CPMU_PRESENT))
8615                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8616                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8617
8618                 /* Clear error status */
8619                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8620                                       PCI_EXP_DEVSTA_CED |
8621                                       PCI_EXP_DEVSTA_NFED |
8622                                       PCI_EXP_DEVSTA_FED |
8623                                       PCI_EXP_DEVSTA_URD);
8624         }
8625
8626         tg3_restore_pci_state(tp);
8627
8628         tg3_flag_clear(tp, CHIP_RESETTING);
8629         tg3_flag_clear(tp, ERROR_PROCESSED);
8630
8631         val = 0;
8632         if (tg3_flag(tp, 5780_CLASS))
8633                 val = tr32(MEMARB_MODE);
8634         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8635
8636         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
8637                 tg3_stop_fw(tp);
8638                 tw32(0x5000, 0x400);
8639         }
8640
8641         if (tg3_flag(tp, IS_SSB_CORE)) {
8642                 /*
8643                  * BCM4785: In order to avoid repercussions from using
8644                  * potentially defective internal ROM, stop the Rx RISC CPU,
8645                  * which is not required.
8646                  */
8647                 tg3_stop_fw(tp);
8648                 tg3_halt_cpu(tp, RX_CPU_BASE);
8649         }
8650
8651         tw32(GRC_MODE, tp->grc_mode);
8652
8653         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
8654                 val = tr32(0xc4);
8655
8656                 tw32(0xc4, val | (1 << 15));
8657         }
8658
8659         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8660             tg3_asic_rev(tp) == ASIC_REV_5705) {
8661                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8662                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
8663                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8664                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8665         }
8666
8667         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8668                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8669                 val = tp->mac_mode;
8670         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8671                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8672                 val = tp->mac_mode;
8673         } else
8674                 val = 0;
8675
8676         tw32_f(MAC_MODE, val);
8677         udelay(40);
8678
8679         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8680
8681         err = tg3_poll_fw(tp);
8682         if (err)
8683                 return err;
8684
8685         tg3_mdio_start(tp);
8686
8687         if (tg3_flag(tp, PCI_EXPRESS) &&
8688             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
8689             tg3_asic_rev(tp) != ASIC_REV_5785 &&
8690             !tg3_flag(tp, 57765_PLUS)) {
8691                 val = tr32(0x7c00);
8692
8693                 tw32(0x7c00, val | (1 << 25));
8694         }
8695
8696         if (tg3_asic_rev(tp) == ASIC_REV_5720) {
8697                 val = tr32(TG3_CPMU_CLCK_ORIDE);
8698                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8699         }
8700
8701         /* Reprobe ASF enable state.  */
8702         tg3_flag_clear(tp, ENABLE_ASF);
8703         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8704         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8705         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8706                 u32 nic_cfg;
8707
8708                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8709                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8710                         tg3_flag_set(tp, ENABLE_ASF);
8711                         tp->last_event_jiffies = jiffies;
8712                         if (tg3_flag(tp, 5750_PLUS))
8713                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8714                 }
8715         }
8716
8717         return 0;
8718 }
8719
8720 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8721 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8722
8723 /* tp->lock is held. */
8724 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8725 {
8726         int err;
8727
8728         tg3_stop_fw(tp);
8729
8730         tg3_write_sig_pre_reset(tp, kind);
8731
8732         tg3_abort_hw(tp, silent);
8733         err = tg3_chip_reset(tp);
8734
8735         __tg3_set_mac_addr(tp, 0);
8736
8737         tg3_write_sig_legacy(tp, kind);
8738         tg3_write_sig_post_reset(tp, kind);
8739
8740         if (tp->hw_stats) {
8741                 /* Save the stats across chip resets... */
8742                 tg3_get_nstats(tp, &tp->net_stats_prev);
8743                 tg3_get_estats(tp, &tp->estats_prev);
8744
8745                 /* And make sure the next sample is new data */
8746                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8747         }
8748
8749         if (err)
8750                 return err;
8751
8752         return 0;
8753 }
8754
8755 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8756 {
8757         struct tg3 *tp = netdev_priv(dev);
8758         struct sockaddr *addr = p;
8759         int err = 0, skip_mac_1 = 0;
8760
8761         if (!is_valid_ether_addr(addr->sa_data))
8762                 return -EADDRNOTAVAIL;
8763
8764         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8765
8766         if (!netif_running(dev))
8767                 return 0;
8768
8769         if (tg3_flag(tp, ENABLE_ASF)) {
8770                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8771
8772                 addr0_high = tr32(MAC_ADDR_0_HIGH);
8773                 addr0_low = tr32(MAC_ADDR_0_LOW);
8774                 addr1_high = tr32(MAC_ADDR_1_HIGH);
8775                 addr1_low = tr32(MAC_ADDR_1_LOW);
8776
8777                 /* Skip MAC addr 1 if ASF is using it. */
8778                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8779                     !(addr1_high == 0 && addr1_low == 0))
8780                         skip_mac_1 = 1;
8781         }
8782         spin_lock_bh(&tp->lock);
8783         __tg3_set_mac_addr(tp, skip_mac_1);
8784         spin_unlock_bh(&tp->lock);
8785
8786         return err;
8787 }
8788
8789 /* tp->lock is held. */
8790 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8791                            dma_addr_t mapping, u32 maxlen_flags,
8792                            u32 nic_addr)
8793 {
8794         tg3_write_mem(tp,
8795                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8796                       ((u64) mapping >> 32));
8797         tg3_write_mem(tp,
8798                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8799                       ((u64) mapping & 0xffffffff));
8800         tg3_write_mem(tp,
8801                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8802                        maxlen_flags);
8803
8804         if (!tg3_flag(tp, 5705_PLUS))
8805                 tg3_write_mem(tp,
8806                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8807                               nic_addr);
8808 }
8809
8810
8811 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8812 {
8813         int i = 0;
8814
8815         if (!tg3_flag(tp, ENABLE_TSS)) {
8816                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8817                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8818                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8819         } else {
8820                 tw32(HOSTCC_TXCOL_TICKS, 0);
8821                 tw32(HOSTCC_TXMAX_FRAMES, 0);
8822                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8823
8824                 for (; i < tp->txq_cnt; i++) {
8825                         u32 reg;
8826
8827                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8828                         tw32(reg, ec->tx_coalesce_usecs);
8829                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8830                         tw32(reg, ec->tx_max_coalesced_frames);
8831                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8832                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8833                 }
8834         }
8835
8836         for (; i < tp->irq_max - 1; i++) {
8837                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8838                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8839                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8840         }
8841 }
8842
8843 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8844 {
8845         int i = 0;
8846         u32 limit = tp->rxq_cnt;
8847
8848         if (!tg3_flag(tp, ENABLE_RSS)) {
8849                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8850                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8851                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8852                 limit--;
8853         } else {
8854                 tw32(HOSTCC_RXCOL_TICKS, 0);
8855                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8856                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8857         }
8858
8859         for (; i < limit; i++) {
8860                 u32 reg;
8861
8862                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8863                 tw32(reg, ec->rx_coalesce_usecs);
8864                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8865                 tw32(reg, ec->rx_max_coalesced_frames);
8866                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8867                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8868         }
8869
8870         for (; i < tp->irq_max - 1; i++) {
8871                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8872                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8873                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8874         }
8875 }
8876
8877 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8878 {
8879         tg3_coal_tx_init(tp, ec);
8880         tg3_coal_rx_init(tp, ec);
8881
8882         if (!tg3_flag(tp, 5705_PLUS)) {
8883                 u32 val = ec->stats_block_coalesce_usecs;
8884
8885                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8886                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8887
8888                 if (!tp->link_up)
8889                         val = 0;
8890
8891                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8892         }
8893 }
8894
8895 /* tp->lock is held. */
8896 static void tg3_rings_reset(struct tg3 *tp)
8897 {
8898         int i;
8899         u32 stblk, txrcb, rxrcb, limit;
8900         struct tg3_napi *tnapi = &tp->napi[0];
8901
8902         /* Disable all transmit rings but the first. */
8903         if (!tg3_flag(tp, 5705_PLUS))
8904                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8905         else if (tg3_flag(tp, 5717_PLUS))
8906                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8907         else if (tg3_flag(tp, 57765_CLASS) ||
8908                  tg3_asic_rev(tp) == ASIC_REV_5762)
8909                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8910         else
8911                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8912
8913         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8914              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8915                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8916                               BDINFO_FLAGS_DISABLED);
8917
8918
8919         /* Disable all receive return rings but the first. */
8920         if (tg3_flag(tp, 5717_PLUS))
8921                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8922         else if (!tg3_flag(tp, 5705_PLUS))
8923                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8924         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
8925                  tg3_asic_rev(tp) == ASIC_REV_5762 ||
8926                  tg3_flag(tp, 57765_CLASS))
8927                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8928         else
8929                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8930
8931         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8932              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8933                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8934                               BDINFO_FLAGS_DISABLED);
8935
8936         /* Disable interrupts */
8937         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8938         tp->napi[0].chk_msi_cnt = 0;
8939         tp->napi[0].last_rx_cons = 0;
8940         tp->napi[0].last_tx_cons = 0;
8941
8942         /* Zero mailbox registers. */
8943         if (tg3_flag(tp, SUPPORT_MSIX)) {
8944                 for (i = 1; i < tp->irq_max; i++) {
8945                         tp->napi[i].tx_prod = 0;
8946                         tp->napi[i].tx_cons = 0;
8947                         if (tg3_flag(tp, ENABLE_TSS))
8948                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8949                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8950                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8951                         tp->napi[i].chk_msi_cnt = 0;
8952                         tp->napi[i].last_rx_cons = 0;
8953                         tp->napi[i].last_tx_cons = 0;
8954                 }
8955                 if (!tg3_flag(tp, ENABLE_TSS))
8956                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8957         } else {
8958                 tp->napi[0].tx_prod = 0;
8959                 tp->napi[0].tx_cons = 0;
8960                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8961                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8962         }
8963
8964         /* Make sure the NIC-based send BD rings are disabled. */
8965         if (!tg3_flag(tp, 5705_PLUS)) {
8966                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8967                 for (i = 0; i < 16; i++)
8968                         tw32_tx_mbox(mbox + i * 8, 0);
8969         }
8970
8971         txrcb = NIC_SRAM_SEND_RCB;
8972         rxrcb = NIC_SRAM_RCV_RET_RCB;
8973
8974         /* Clear status block in ram. */
8975         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8976
8977         /* Set status block DMA address */
8978         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8979              ((u64) tnapi->status_mapping >> 32));
8980         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8981              ((u64) tnapi->status_mapping & 0xffffffff));
8982
8983         if (tnapi->tx_ring) {
8984                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8985                                (TG3_TX_RING_SIZE <<
8986                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8987                                NIC_SRAM_TX_BUFFER_DESC);
8988                 txrcb += TG3_BDINFO_SIZE;
8989         }
8990
8991         if (tnapi->rx_rcb) {
8992                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8993                                (tp->rx_ret_ring_mask + 1) <<
8994                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8995                 rxrcb += TG3_BDINFO_SIZE;
8996         }
8997
8998         stblk = HOSTCC_STATBLCK_RING1;
8999
9000         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9001                 u64 mapping = (u64)tnapi->status_mapping;
9002                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9003                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9004
9005                 /* Clear status block in ram. */
9006                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9007
9008                 if (tnapi->tx_ring) {
9009                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9010                                        (TG3_TX_RING_SIZE <<
9011                                         BDINFO_FLAGS_MAXLEN_SHIFT),
9012                                        NIC_SRAM_TX_BUFFER_DESC);
9013                         txrcb += TG3_BDINFO_SIZE;
9014                 }
9015
9016                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9017                                ((tp->rx_ret_ring_mask + 1) <<
9018                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
9019
9020                 stblk += 8;
9021                 rxrcb += TG3_BDINFO_SIZE;
9022         }
9023 }
9024
9025 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9026 {
9027         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9028
9029         if (!tg3_flag(tp, 5750_PLUS) ||
9030             tg3_flag(tp, 5780_CLASS) ||
9031             tg3_asic_rev(tp) == ASIC_REV_5750 ||
9032             tg3_asic_rev(tp) == ASIC_REV_5752 ||
9033             tg3_flag(tp, 57765_PLUS))
9034                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9035         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9036                  tg3_asic_rev(tp) == ASIC_REV_5787)
9037                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9038         else
9039                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9040
9041         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9042         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9043
9044         val = min(nic_rep_thresh, host_rep_thresh);
9045         tw32(RCVBDI_STD_THRESH, val);
9046
9047         if (tg3_flag(tp, 57765_PLUS))
9048                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9049
9050         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9051                 return;
9052
9053         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9054
9055         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9056
9057         val = min(bdcache_maxcnt / 2, host_rep_thresh);
9058         tw32(RCVBDI_JUMBO_THRESH, val);
9059
9060         if (tg3_flag(tp, 57765_PLUS))
9061                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9062 }
9063
9064 static inline u32 calc_crc(unsigned char *buf, int len)
9065 {
9066         u32 reg;
9067         u32 tmp;
9068         int j, k;
9069
9070         reg = 0xffffffff;
9071
9072         for (j = 0; j < len; j++) {
9073                 reg ^= buf[j];
9074
9075                 for (k = 0; k < 8; k++) {
9076                         tmp = reg & 0x01;
9077
9078                         reg >>= 1;
9079
9080                         if (tmp)
9081                                 reg ^= 0xedb88320;
9082                 }
9083         }
9084
9085         return ~reg;
9086 }
9087
9088 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9089 {
9090         /* accept or reject all multicast frames */
9091         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9092         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9093         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9094         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9095 }
9096
9097 static void __tg3_set_rx_mode(struct net_device *dev)
9098 {
9099         struct tg3 *tp = netdev_priv(dev);
9100         u32 rx_mode;
9101
9102         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9103                                   RX_MODE_KEEP_VLAN_TAG);
9104
9105 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9106         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9107          * flag clear.
9108          */
9109         if (!tg3_flag(tp, ENABLE_ASF))
9110                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9111 #endif
9112
9113         if (dev->flags & IFF_PROMISC) {
9114                 /* Promiscuous mode. */
9115                 rx_mode |= RX_MODE_PROMISC;
9116         } else if (dev->flags & IFF_ALLMULTI) {
9117                 /* Accept all multicast. */
9118                 tg3_set_multi(tp, 1);
9119         } else if (netdev_mc_empty(dev)) {
9120                 /* Reject all multicast. */
9121                 tg3_set_multi(tp, 0);
9122         } else {
9123                 /* Accept one or more multicast(s). */
9124                 struct netdev_hw_addr *ha;
9125                 u32 mc_filter[4] = { 0, };
9126                 u32 regidx;
9127                 u32 bit;
9128                 u32 crc;
9129
9130                 netdev_for_each_mc_addr(ha, dev) {
9131                         crc = calc_crc(ha->addr, ETH_ALEN);
9132                         bit = ~crc & 0x7f;
9133                         regidx = (bit & 0x60) >> 5;
9134                         bit &= 0x1f;
9135                         mc_filter[regidx] |= (1 << bit);
9136                 }
9137
9138                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9139                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9140                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9141                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9142         }
9143
9144         if (rx_mode != tp->rx_mode) {
9145                 tp->rx_mode = rx_mode;
9146                 tw32_f(MAC_RX_MODE, rx_mode);
9147                 udelay(10);
9148         }
9149 }
9150
9151 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9152 {
9153         int i;
9154
9155         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9156                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9157 }
9158
9159 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9160 {
9161         int i;
9162
9163         if (!tg3_flag(tp, SUPPORT_MSIX))
9164                 return;
9165
9166         if (tp->rxq_cnt == 1) {
9167                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9168                 return;
9169         }
9170
9171         /* Validate table against current IRQ count */
9172         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9173                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9174                         break;
9175         }
9176
9177         if (i != TG3_RSS_INDIR_TBL_SIZE)
9178                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9179 }
9180
9181 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9182 {
9183         int i = 0;
9184         u32 reg = MAC_RSS_INDIR_TBL_0;
9185
9186         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9187                 u32 val = tp->rss_ind_tbl[i];
9188                 i++;
9189                 for (; i % 8; i++) {
9190                         val <<= 4;
9191                         val |= tp->rss_ind_tbl[i];
9192                 }
9193                 tw32(reg, val);
9194                 reg += 4;
9195         }
9196 }
9197
9198 /* tp->lock is held. */
9199 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
9200 {
9201         u32 val, rdmac_mode;
9202         int i, err, limit;
9203         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9204
9205         tg3_disable_ints(tp);
9206
9207         tg3_stop_fw(tp);
9208
9209         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9210
9211         if (tg3_flag(tp, INIT_COMPLETE))
9212                 tg3_abort_hw(tp, 1);
9213
9214         /* Enable MAC control of LPI */
9215         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
9216                 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
9217                       TG3_CPMU_EEE_LNKIDL_UART_IDL;
9218                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9219                         val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
9220
9221                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
9222
9223                 tw32_f(TG3_CPMU_EEE_CTRL,
9224                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9225
9226                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9227                       TG3_CPMU_EEEMD_LPI_IN_TX |
9228                       TG3_CPMU_EEEMD_LPI_IN_RX |
9229                       TG3_CPMU_EEEMD_EEE_ENABLE;
9230
9231                 if (tg3_asic_rev(tp) != ASIC_REV_5717)
9232                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9233
9234                 if (tg3_flag(tp, ENABLE_APE))
9235                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9236
9237                 tw32_f(TG3_CPMU_EEE_MODE, val);
9238
9239                 tw32_f(TG3_CPMU_EEE_DBTMR1,
9240                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9241                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9242
9243                 tw32_f(TG3_CPMU_EEE_DBTMR2,
9244                        TG3_CPMU_DBTMR2_APE_TX_2047US |
9245                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9246         }
9247
9248         if (reset_phy)
9249                 tg3_phy_reset(tp);
9250
9251         err = tg3_chip_reset(tp);
9252         if (err)
9253                 return err;
9254
9255         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9256
9257         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9258                 val = tr32(TG3_CPMU_CTRL);
9259                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9260                 tw32(TG3_CPMU_CTRL, val);
9261
9262                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9263                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9264                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9265                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9266
9267                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9268                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9269                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9270                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9271
9272                 val = tr32(TG3_CPMU_HST_ACC);
9273                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9274                 val |= CPMU_HST_ACC_MACCLK_6_25;
9275                 tw32(TG3_CPMU_HST_ACC, val);
9276         }
9277
9278         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9279                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9280                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9281                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9282                 tw32(PCIE_PWR_MGMT_THRESH, val);
9283
9284                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9285                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9286
9287                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9288
9289                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9290                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9291         }
9292
9293         if (tg3_flag(tp, L1PLLPD_EN)) {
9294                 u32 grc_mode = tr32(GRC_MODE);
9295
9296                 /* Access the lower 1K of PL PCIE block registers. */
9297                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9298                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9299
9300                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9301                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9302                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9303
9304                 tw32(GRC_MODE, grc_mode);
9305         }
9306
9307         if (tg3_flag(tp, 57765_CLASS)) {
9308                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9309                         u32 grc_mode = tr32(GRC_MODE);
9310
9311                         /* Access the lower 1K of PL PCIE block registers. */
9312                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9313                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9314
9315                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9316                                    TG3_PCIE_PL_LO_PHYCTL5);
9317                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9318                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9319
9320                         tw32(GRC_MODE, grc_mode);
9321                 }
9322
9323                 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9324                         u32 grc_mode;
9325
9326                         /* Fix transmit hangs */
9327                         val = tr32(TG3_CPMU_PADRNG_CTL);
9328                         val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9329                         tw32(TG3_CPMU_PADRNG_CTL, val);
9330
9331                         grc_mode = tr32(GRC_MODE);
9332
9333                         /* Access the lower 1K of DL PCIE block registers. */
9334                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9335                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9336
9337                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9338                                    TG3_PCIE_DL_LO_FTSMAX);
9339                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9340                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9341                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9342
9343                         tw32(GRC_MODE, grc_mode);
9344                 }
9345
9346                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9347                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9348                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9349                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9350         }
9351
9352         /* This works around an issue with Athlon chipsets on
9353          * B3 tigon3 silicon.  This bit has no effect on any
9354          * other revision.  But do not set this on PCI Express
9355          * chips and don't even touch the clocks if the CPMU is present.
9356          */
9357         if (!tg3_flag(tp, CPMU_PRESENT)) {
9358                 if (!tg3_flag(tp, PCI_EXPRESS))
9359                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9360                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9361         }
9362
9363         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9364             tg3_flag(tp, PCIX_MODE)) {
9365                 val = tr32(TG3PCI_PCISTATE);
9366                 val |= PCISTATE_RETRY_SAME_DMA;
9367                 tw32(TG3PCI_PCISTATE, val);
9368         }
9369
9370         if (tg3_flag(tp, ENABLE_APE)) {
9371                 /* Allow reads and writes to the
9372                  * APE register and memory space.
9373                  */
9374                 val = tr32(TG3PCI_PCISTATE);
9375                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9376                        PCISTATE_ALLOW_APE_SHMEM_WR |
9377                        PCISTATE_ALLOW_APE_PSPACE_WR;
9378                 tw32(TG3PCI_PCISTATE, val);
9379         }
9380
9381         if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9382                 /* Enable some hw fixes.  */
9383                 val = tr32(TG3PCI_MSI_DATA);
9384                 val |= (1 << 26) | (1 << 28) | (1 << 29);
9385                 tw32(TG3PCI_MSI_DATA, val);
9386         }
9387
9388         /* Descriptor ring init may make accesses to the
9389          * NIC SRAM area to setup the TX descriptors, so we
9390          * can only do this after the hardware has been
9391          * successfully reset.
9392          */
9393         err = tg3_init_rings(tp);
9394         if (err)
9395                 return err;
9396
9397         if (tg3_flag(tp, 57765_PLUS)) {
9398                 val = tr32(TG3PCI_DMA_RW_CTRL) &
9399                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9400                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9401                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9402                 if (!tg3_flag(tp, 57765_CLASS) &&
9403                     tg3_asic_rev(tp) != ASIC_REV_5717 &&
9404                     tg3_asic_rev(tp) != ASIC_REV_5762)
9405                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
9406                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9407         } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9408                    tg3_asic_rev(tp) != ASIC_REV_5761) {
9409                 /* This value is determined during the probe time DMA
9410                  * engine test, tg3_test_dma.
9411                  */
9412                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9413         }
9414
9415         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9416                           GRC_MODE_4X_NIC_SEND_RINGS |
9417                           GRC_MODE_NO_TX_PHDR_CSUM |
9418                           GRC_MODE_NO_RX_PHDR_CSUM);
9419         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9420
9421         /* Pseudo-header checksum is done by hardware logic and not
9422          * the offload processers, so make the chip do the pseudo-
9423          * header checksums on receive.  For transmit it is more
9424          * convenient to do the pseudo-header checksum in software
9425          * as Linux does that on transmit for us in all cases.
9426          */
9427         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9428
9429         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9430         if (tp->rxptpctl)
9431                 tw32(TG3_RX_PTP_CTL,
9432                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9433
9434         if (tg3_flag(tp, PTP_CAPABLE))
9435                 val |= GRC_MODE_TIME_SYNC_ENABLE;
9436
9437         tw32(GRC_MODE, tp->grc_mode | val);
9438
9439         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
9440         val = tr32(GRC_MISC_CFG);
9441         val &= ~0xff;
9442         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9443         tw32(GRC_MISC_CFG, val);
9444
9445         /* Initialize MBUF/DESC pool. */
9446         if (tg3_flag(tp, 5750_PLUS)) {
9447                 /* Do nothing.  */
9448         } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9449                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9450                 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9451                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9452                 else
9453                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9454                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9455                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9456         } else if (tg3_flag(tp, TSO_CAPABLE)) {
9457                 int fw_len;
9458
9459                 fw_len = tp->fw_len;
9460                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9461                 tw32(BUFMGR_MB_POOL_ADDR,
9462                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9463                 tw32(BUFMGR_MB_POOL_SIZE,
9464                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9465         }
9466
9467         if (tp->dev->mtu <= ETH_DATA_LEN) {
9468                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9469                      tp->bufmgr_config.mbuf_read_dma_low_water);
9470                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9471                      tp->bufmgr_config.mbuf_mac_rx_low_water);
9472                 tw32(BUFMGR_MB_HIGH_WATER,
9473                      tp->bufmgr_config.mbuf_high_water);
9474         } else {
9475                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9476                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9477                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9478                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9479                 tw32(BUFMGR_MB_HIGH_WATER,
9480                      tp->bufmgr_config.mbuf_high_water_jumbo);
9481         }
9482         tw32(BUFMGR_DMA_LOW_WATER,
9483              tp->bufmgr_config.dma_low_water);
9484         tw32(BUFMGR_DMA_HIGH_WATER,
9485              tp->bufmgr_config.dma_high_water);
9486
9487         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9488         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9489                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9490         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9491             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9492             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9493                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9494         tw32(BUFMGR_MODE, val);
9495         for (i = 0; i < 2000; i++) {
9496                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9497                         break;
9498                 udelay(10);
9499         }
9500         if (i >= 2000) {
9501                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9502                 return -ENODEV;
9503         }
9504
9505         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9506                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9507
9508         tg3_setup_rxbd_thresholds(tp);
9509
9510         /* Initialize TG3_BDINFO's at:
9511          *  RCVDBDI_STD_BD:     standard eth size rx ring
9512          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
9513          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
9514          *
9515          * like so:
9516          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
9517          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
9518          *                              ring attribute flags
9519          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
9520          *
9521          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9522          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9523          *
9524          * The size of each ring is fixed in the firmware, but the location is
9525          * configurable.
9526          */
9527         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9528              ((u64) tpr->rx_std_mapping >> 32));
9529         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9530              ((u64) tpr->rx_std_mapping & 0xffffffff));
9531         if (!tg3_flag(tp, 5717_PLUS))
9532                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9533                      NIC_SRAM_RX_BUFFER_DESC);
9534
9535         /* Disable the mini ring */
9536         if (!tg3_flag(tp, 5705_PLUS))
9537                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9538                      BDINFO_FLAGS_DISABLED);
9539
9540         /* Program the jumbo buffer descriptor ring control
9541          * blocks on those devices that have them.
9542          */
9543         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9544             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9545
9546                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9547                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9548                              ((u64) tpr->rx_jmb_mapping >> 32));
9549                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9550                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9551                         val = TG3_RX_JMB_RING_SIZE(tp) <<
9552                               BDINFO_FLAGS_MAXLEN_SHIFT;
9553                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9554                              val | BDINFO_FLAGS_USE_EXT_RECV);
9555                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9556                             tg3_flag(tp, 57765_CLASS) ||
9557                             tg3_asic_rev(tp) == ASIC_REV_5762)
9558                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9559                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9560                 } else {
9561                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9562                              BDINFO_FLAGS_DISABLED);
9563                 }
9564
9565                 if (tg3_flag(tp, 57765_PLUS)) {
9566                         val = TG3_RX_STD_RING_SIZE(tp);
9567                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9568                         val |= (TG3_RX_STD_DMA_SZ << 2);
9569                 } else
9570                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9571         } else
9572                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9573
9574         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9575
9576         tpr->rx_std_prod_idx = tp->rx_pending;
9577         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9578
9579         tpr->rx_jmb_prod_idx =
9580                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9581         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9582
9583         tg3_rings_reset(tp);
9584
9585         /* Initialize MAC address and backoff seed. */
9586         __tg3_set_mac_addr(tp, 0);
9587
9588         /* MTU + ethernet header + FCS + optional VLAN tag */
9589         tw32(MAC_RX_MTU_SIZE,
9590              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9591
9592         /* The slot time is changed by tg3_setup_phy if we
9593          * run at gigabit with half duplex.
9594          */
9595         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9596               (6 << TX_LENGTHS_IPG_SHIFT) |
9597               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9598
9599         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9600             tg3_asic_rev(tp) == ASIC_REV_5762)
9601                 val |= tr32(MAC_TX_LENGTHS) &
9602                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
9603                         TX_LENGTHS_CNT_DWN_VAL_MSK);
9604
9605         tw32(MAC_TX_LENGTHS, val);
9606
9607         /* Receive rules. */
9608         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9609         tw32(RCVLPC_CONFIG, 0x0181);
9610
9611         /* Calculate RDMAC_MODE setting early, we need it to determine
9612          * the RCVLPC_STATE_ENABLE mask.
9613          */
9614         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9615                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9616                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9617                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9618                       RDMAC_MODE_LNGREAD_ENAB);
9619
9620         if (tg3_asic_rev(tp) == ASIC_REV_5717)
9621                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9622
9623         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
9624             tg3_asic_rev(tp) == ASIC_REV_5785 ||
9625             tg3_asic_rev(tp) == ASIC_REV_57780)
9626                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9627                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9628                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9629
9630         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9631             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9632                 if (tg3_flag(tp, TSO_CAPABLE) &&
9633                     tg3_asic_rev(tp) == ASIC_REV_5705) {
9634                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9635                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9636                            !tg3_flag(tp, IS_5788)) {
9637                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9638                 }
9639         }
9640
9641         if (tg3_flag(tp, PCI_EXPRESS))
9642                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9643
9644         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9645                 tp->dma_limit = 0;
9646                 if (tp->dev->mtu <= ETH_DATA_LEN) {
9647                         rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
9648                         tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
9649                 }
9650         }
9651
9652         if (tg3_flag(tp, HW_TSO_1) ||
9653             tg3_flag(tp, HW_TSO_2) ||
9654             tg3_flag(tp, HW_TSO_3))
9655                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9656
9657         if (tg3_flag(tp, 57765_PLUS) ||
9658             tg3_asic_rev(tp) == ASIC_REV_5785 ||
9659             tg3_asic_rev(tp) == ASIC_REV_57780)
9660                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9661
9662         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9663             tg3_asic_rev(tp) == ASIC_REV_5762)
9664                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9665
9666         if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
9667             tg3_asic_rev(tp) == ASIC_REV_5784 ||
9668             tg3_asic_rev(tp) == ASIC_REV_5785 ||
9669             tg3_asic_rev(tp) == ASIC_REV_57780 ||
9670             tg3_flag(tp, 57765_PLUS)) {
9671                 u32 tgtreg;
9672
9673                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9674                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
9675                 else
9676                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
9677
9678                 val = tr32(tgtreg);
9679                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9680                     tg3_asic_rev(tp) == ASIC_REV_5762) {
9681                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9682                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9683                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9684                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9685                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9686                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9687                 }
9688                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9689         }
9690
9691         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
9692             tg3_asic_rev(tp) == ASIC_REV_5720 ||
9693             tg3_asic_rev(tp) == ASIC_REV_5762) {
9694                 u32 tgtreg;
9695
9696                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9697                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
9698                 else
9699                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
9700
9701                 val = tr32(tgtreg);
9702                 tw32(tgtreg, val |
9703                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9704                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9705         }
9706
9707         /* Receive/send statistics. */
9708         if (tg3_flag(tp, 5750_PLUS)) {
9709                 val = tr32(RCVLPC_STATS_ENABLE);
9710                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9711                 tw32(RCVLPC_STATS_ENABLE, val);
9712         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9713                    tg3_flag(tp, TSO_CAPABLE)) {
9714                 val = tr32(RCVLPC_STATS_ENABLE);
9715                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9716                 tw32(RCVLPC_STATS_ENABLE, val);
9717         } else {
9718                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9719         }
9720         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9721         tw32(SNDDATAI_STATSENAB, 0xffffff);
9722         tw32(SNDDATAI_STATSCTRL,
9723              (SNDDATAI_SCTRL_ENABLE |
9724               SNDDATAI_SCTRL_FASTUPD));
9725
9726         /* Setup host coalescing engine. */
9727         tw32(HOSTCC_MODE, 0);
9728         for (i = 0; i < 2000; i++) {
9729                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9730                         break;
9731                 udelay(10);
9732         }
9733
9734         __tg3_set_coalesce(tp, &tp->coal);
9735
9736         if (!tg3_flag(tp, 5705_PLUS)) {
9737                 /* Status/statistics block address.  See tg3_timer,
9738                  * the tg3_periodic_fetch_stats call there, and
9739                  * tg3_get_stats to see how this works for 5705/5750 chips.
9740                  */
9741                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9742                      ((u64) tp->stats_mapping >> 32));
9743                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9744                      ((u64) tp->stats_mapping & 0xffffffff));
9745                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9746
9747                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9748
9749                 /* Clear statistics and status block memory areas */
9750                 for (i = NIC_SRAM_STATS_BLK;
9751                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9752                      i += sizeof(u32)) {
9753                         tg3_write_mem(tp, i, 0);
9754                         udelay(40);
9755                 }
9756         }
9757
9758         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9759
9760         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9761         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9762         if (!tg3_flag(tp, 5705_PLUS))
9763                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9764
9765         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9766                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9767                 /* reset to prevent losing 1st rx packet intermittently */
9768                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9769                 udelay(10);
9770         }
9771
9772         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9773                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9774                         MAC_MODE_FHDE_ENABLE;
9775         if (tg3_flag(tp, ENABLE_APE))
9776                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9777         if (!tg3_flag(tp, 5705_PLUS) &&
9778             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9779             tg3_asic_rev(tp) != ASIC_REV_5700)
9780                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9781         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9782         udelay(40);
9783
9784         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9785          * If TG3_FLAG_IS_NIC is zero, we should read the
9786          * register to preserve the GPIO settings for LOMs. The GPIOs,
9787          * whether used as inputs or outputs, are set by boot code after
9788          * reset.
9789          */
9790         if (!tg3_flag(tp, IS_NIC)) {
9791                 u32 gpio_mask;
9792
9793                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9794                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9795                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9796
9797                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
9798                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9799                                      GRC_LCLCTRL_GPIO_OUTPUT3;
9800
9801                 if (tg3_asic_rev(tp) == ASIC_REV_5755)
9802                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9803
9804                 tp->grc_local_ctrl &= ~gpio_mask;
9805                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9806
9807                 /* GPIO1 must be driven high for eeprom write protect */
9808                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9809                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9810                                                GRC_LCLCTRL_GPIO_OUTPUT1);
9811         }
9812         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9813         udelay(100);
9814
9815         if (tg3_flag(tp, USING_MSIX)) {
9816                 val = tr32(MSGINT_MODE);
9817                 val |= MSGINT_MODE_ENABLE;
9818                 if (tp->irq_cnt > 1)
9819                         val |= MSGINT_MODE_MULTIVEC_EN;
9820                 if (!tg3_flag(tp, 1SHOT_MSI))
9821                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9822                 tw32(MSGINT_MODE, val);
9823         }
9824
9825         if (!tg3_flag(tp, 5705_PLUS)) {
9826                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9827                 udelay(40);
9828         }
9829
9830         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9831                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9832                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9833                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9834                WDMAC_MODE_LNGREAD_ENAB);
9835
9836         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9837             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9838                 if (tg3_flag(tp, TSO_CAPABLE) &&
9839                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
9840                      tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
9841                         /* nothing */
9842                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9843                            !tg3_flag(tp, IS_5788)) {
9844                         val |= WDMAC_MODE_RX_ACCEL;
9845                 }
9846         }
9847
9848         /* Enable host coalescing bug fix */
9849         if (tg3_flag(tp, 5755_PLUS))
9850                 val |= WDMAC_MODE_STATUS_TAG_FIX;
9851
9852         if (tg3_asic_rev(tp) == ASIC_REV_5785)
9853                 val |= WDMAC_MODE_BURST_ALL_DATA;
9854
9855         tw32_f(WDMAC_MODE, val);
9856         udelay(40);
9857
9858         if (tg3_flag(tp, PCIX_MODE)) {
9859                 u16 pcix_cmd;
9860
9861                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9862                                      &pcix_cmd);
9863                 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
9864                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9865                         pcix_cmd |= PCI_X_CMD_READ_2K;
9866                 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
9867                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9868                         pcix_cmd |= PCI_X_CMD_READ_2K;
9869                 }
9870                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9871                                       pcix_cmd);
9872         }
9873
9874         tw32_f(RDMAC_MODE, rdmac_mode);
9875         udelay(40);
9876
9877         if (tg3_asic_rev(tp) == ASIC_REV_5719) {
9878                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9879                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9880                                 break;
9881                 }
9882                 if (i < TG3_NUM_RDMA_CHANNELS) {
9883                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9884                         val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9885                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9886                         tg3_flag_set(tp, 5719_RDMA_BUG);
9887                 }
9888         }
9889
9890         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9891         if (!tg3_flag(tp, 5705_PLUS))
9892                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9893
9894         if (tg3_asic_rev(tp) == ASIC_REV_5761)
9895                 tw32(SNDDATAC_MODE,
9896                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9897         else
9898                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9899
9900         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9901         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9902         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9903         if (tg3_flag(tp, LRG_PROD_RING_CAP))
9904                 val |= RCVDBDI_MODE_LRG_RING_SZ;
9905         tw32(RCVDBDI_MODE, val);
9906         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9907         if (tg3_flag(tp, HW_TSO_1) ||
9908             tg3_flag(tp, HW_TSO_2) ||
9909             tg3_flag(tp, HW_TSO_3))
9910                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9911         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9912         if (tg3_flag(tp, ENABLE_TSS))
9913                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9914         tw32(SNDBDI_MODE, val);
9915         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9916
9917         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
9918                 err = tg3_load_5701_a0_firmware_fix(tp);
9919                 if (err)
9920                         return err;
9921         }
9922
9923         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9924                 /* Ignore any errors for the firmware download. If download
9925                  * fails, the device will operate with EEE disabled
9926                  */
9927                 tg3_load_57766_firmware(tp);
9928         }
9929
9930         if (tg3_flag(tp, TSO_CAPABLE)) {
9931                 err = tg3_load_tso_firmware(tp);
9932                 if (err)
9933                         return err;
9934         }
9935
9936         tp->tx_mode = TX_MODE_ENABLE;
9937
9938         if (tg3_flag(tp, 5755_PLUS) ||
9939             tg3_asic_rev(tp) == ASIC_REV_5906)
9940                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9941
9942         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9943             tg3_asic_rev(tp) == ASIC_REV_5762) {
9944                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9945                 tp->tx_mode &= ~val;
9946                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9947         }
9948
9949         tw32_f(MAC_TX_MODE, tp->tx_mode);
9950         udelay(100);
9951
9952         if (tg3_flag(tp, ENABLE_RSS)) {
9953                 tg3_rss_write_indir_tbl(tp);
9954
9955                 /* Setup the "secret" hash key. */
9956                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9957                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9958                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9959                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9960                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9961                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9962                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9963                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9964                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9965                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9966         }
9967
9968         tp->rx_mode = RX_MODE_ENABLE;
9969         if (tg3_flag(tp, 5755_PLUS))
9970                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9971
9972         if (tg3_flag(tp, ENABLE_RSS))
9973                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9974                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
9975                                RX_MODE_RSS_IPV6_HASH_EN |
9976                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
9977                                RX_MODE_RSS_IPV4_HASH_EN |
9978                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
9979
9980         tw32_f(MAC_RX_MODE, tp->rx_mode);
9981         udelay(10);
9982
9983         tw32(MAC_LED_CTRL, tp->led_ctrl);
9984
9985         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9986         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9987                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9988                 udelay(10);
9989         }
9990         tw32_f(MAC_RX_MODE, tp->rx_mode);
9991         udelay(10);
9992
9993         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9994                 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
9995                     !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9996                         /* Set drive transmission level to 1.2V  */
9997                         /* only if the signal pre-emphasis bit is not set  */
9998                         val = tr32(MAC_SERDES_CFG);
9999                         val &= 0xfffff000;
10000                         val |= 0x880;
10001                         tw32(MAC_SERDES_CFG, val);
10002                 }
10003                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10004                         tw32(MAC_SERDES_CFG, 0x616000);
10005         }
10006
10007         /* Prevent chip from dropping frames when flow control
10008          * is enabled.
10009          */
10010         if (tg3_flag(tp, 57765_CLASS))
10011                 val = 1;
10012         else
10013                 val = 2;
10014         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10015
10016         if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10017             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10018                 /* Use hardware link auto-negotiation */
10019                 tg3_flag_set(tp, HW_AUTONEG);
10020         }
10021
10022         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10023             tg3_asic_rev(tp) == ASIC_REV_5714) {
10024                 u32 tmp;
10025
10026                 tmp = tr32(SERDES_RX_CTRL);
10027                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10028                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10029                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10030                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10031         }
10032
10033         if (!tg3_flag(tp, USE_PHYLIB)) {
10034                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10035                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10036
10037                 err = tg3_setup_phy(tp, 0);
10038                 if (err)
10039                         return err;
10040
10041                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10042                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10043                         u32 tmp;
10044
10045                         /* Clear CRC stats. */
10046                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10047                                 tg3_writephy(tp, MII_TG3_TEST1,
10048                                              tmp | MII_TG3_TEST1_CRC_EN);
10049                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10050                         }
10051                 }
10052         }
10053
10054         __tg3_set_rx_mode(tp->dev);
10055
10056         /* Initialize receive rules. */
10057         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10058         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10059         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10060         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10061
10062         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10063                 limit = 8;
10064         else
10065                 limit = 16;
10066         if (tg3_flag(tp, ENABLE_ASF))
10067                 limit -= 4;
10068         switch (limit) {
10069         case 16:
10070                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10071         case 15:
10072                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10073         case 14:
10074                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10075         case 13:
10076                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10077         case 12:
10078                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10079         case 11:
10080                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10081         case 10:
10082                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10083         case 9:
10084                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10085         case 8:
10086                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10087         case 7:
10088                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10089         case 6:
10090                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10091         case 5:
10092                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10093         case 4:
10094                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10095         case 3:
10096                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10097         case 2:
10098         case 1:
10099
10100         default:
10101                 break;
10102         }
10103
10104         if (tg3_flag(tp, ENABLE_APE))
10105                 /* Write our heartbeat update interval to APE. */
10106                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10107                                 APE_HOST_HEARTBEAT_INT_DISABLE);
10108
10109         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10110
10111         return 0;
10112 }
10113
10114 /* Called at device open time to get the chip ready for
10115  * packet processing.  Invoked with tp->lock held.
10116  */
10117 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
10118 {
10119         tg3_switch_clocks(tp);
10120
10121         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10122
10123         return tg3_reset_hw(tp, reset_phy);
10124 }
10125
10126 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10127 {
10128         int i;
10129
10130         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10131                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10132
10133                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10134                 off += len;
10135
10136                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10137                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10138                         memset(ocir, 0, TG3_OCIR_LEN);
10139         }
10140 }
10141
10142 /* sysfs attributes for hwmon */
10143 static ssize_t tg3_show_temp(struct device *dev,
10144                              struct device_attribute *devattr, char *buf)
10145 {
10146         struct pci_dev *pdev = to_pci_dev(dev);
10147         struct net_device *netdev = pci_get_drvdata(pdev);
10148         struct tg3 *tp = netdev_priv(netdev);
10149         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10150         u32 temperature;
10151
10152         spin_lock_bh(&tp->lock);
10153         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10154                                 sizeof(temperature));
10155         spin_unlock_bh(&tp->lock);
10156         return sprintf(buf, "%u\n", temperature);
10157 }
10158
10159
10160 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10161                           TG3_TEMP_SENSOR_OFFSET);
10162 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10163                           TG3_TEMP_CAUTION_OFFSET);
10164 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10165                           TG3_TEMP_MAX_OFFSET);
10166
10167 static struct attribute *tg3_attributes[] = {
10168         &sensor_dev_attr_temp1_input.dev_attr.attr,
10169         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10170         &sensor_dev_attr_temp1_max.dev_attr.attr,
10171         NULL
10172 };
10173
10174 static const struct attribute_group tg3_group = {
10175         .attrs = tg3_attributes,
10176 };
10177
10178 static void tg3_hwmon_close(struct tg3 *tp)
10179 {
10180         if (tp->hwmon_dev) {
10181                 hwmon_device_unregister(tp->hwmon_dev);
10182                 tp->hwmon_dev = NULL;
10183                 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10184         }
10185 }
10186
10187 static void tg3_hwmon_open(struct tg3 *tp)
10188 {
10189         int i, err;
10190         u32 size = 0;
10191         struct pci_dev *pdev = tp->pdev;
10192         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10193
10194         tg3_sd_scan_scratchpad(tp, ocirs);
10195
10196         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10197                 if (!ocirs[i].src_data_length)
10198                         continue;
10199
10200                 size += ocirs[i].src_hdr_length;
10201                 size += ocirs[i].src_data_length;
10202         }
10203
10204         if (!size)
10205                 return;
10206
10207         /* Register hwmon sysfs hooks */
10208         err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10209         if (err) {
10210                 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10211                 return;
10212         }
10213
10214         tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10215         if (IS_ERR(tp->hwmon_dev)) {
10216                 tp->hwmon_dev = NULL;
10217                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10218                 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10219         }
10220 }
10221
10222
10223 #define TG3_STAT_ADD32(PSTAT, REG) \
10224 do {    u32 __val = tr32(REG); \
10225         (PSTAT)->low += __val; \
10226         if ((PSTAT)->low < __val) \
10227                 (PSTAT)->high += 1; \
10228 } while (0)
10229
10230 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10231 {
10232         struct tg3_hw_stats *sp = tp->hw_stats;
10233
10234         if (!tp->link_up)
10235                 return;
10236
10237         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10238         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10239         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10240         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10241         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10242         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10243         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10244         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10245         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10246         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10247         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10248         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10249         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10250         if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
10251                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10252                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10253                 u32 val;
10254
10255                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10256                 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
10257                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10258                 tg3_flag_clear(tp, 5719_RDMA_BUG);
10259         }
10260
10261         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10262         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10263         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10264         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10265         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10266         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10267         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10268         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10269         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10270         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10271         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10272         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10273         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10274         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10275
10276         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10277         if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10278             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10279             tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10280                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10281         } else {
10282                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10283                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10284                 if (val) {
10285                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10286                         sp->rx_discards.low += val;
10287                         if (sp->rx_discards.low < val)
10288                                 sp->rx_discards.high += 1;
10289                 }
10290                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10291         }
10292         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10293 }
10294
10295 static void tg3_chk_missed_msi(struct tg3 *tp)
10296 {
10297         u32 i;
10298
10299         for (i = 0; i < tp->irq_cnt; i++) {
10300                 struct tg3_napi *tnapi = &tp->napi[i];
10301
10302                 if (tg3_has_work(tnapi)) {
10303                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10304                             tnapi->last_tx_cons == tnapi->tx_cons) {
10305                                 if (tnapi->chk_msi_cnt < 1) {
10306                                         tnapi->chk_msi_cnt++;
10307                                         return;
10308                                 }
10309                                 tg3_msi(0, tnapi);
10310                         }
10311                 }
10312                 tnapi->chk_msi_cnt = 0;
10313                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10314                 tnapi->last_tx_cons = tnapi->tx_cons;
10315         }
10316 }
10317
10318 static void tg3_timer(unsigned long __opaque)
10319 {
10320         struct tg3 *tp = (struct tg3 *) __opaque;
10321
10322         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10323                 goto restart_timer;
10324
10325         spin_lock(&tp->lock);
10326
10327         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10328             tg3_flag(tp, 57765_CLASS))
10329                 tg3_chk_missed_msi(tp);
10330
10331         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10332                 /* BCM4785: Flush posted writes from GbE to host memory. */
10333                 tr32(HOSTCC_MODE);
10334         }
10335
10336         if (!tg3_flag(tp, TAGGED_STATUS)) {
10337                 /* All of this garbage is because when using non-tagged
10338                  * IRQ status the mailbox/status_block protocol the chip
10339                  * uses with the cpu is race prone.
10340                  */
10341                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10342                         tw32(GRC_LOCAL_CTRL,
10343                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10344                 } else {
10345                         tw32(HOSTCC_MODE, tp->coalesce_mode |
10346                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10347                 }
10348
10349                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10350                         spin_unlock(&tp->lock);
10351                         tg3_reset_task_schedule(tp);
10352                         goto restart_timer;
10353                 }
10354         }
10355
10356         /* This part only runs once per second. */
10357         if (!--tp->timer_counter) {
10358                 if (tg3_flag(tp, 5705_PLUS))
10359                         tg3_periodic_fetch_stats(tp);
10360
10361                 if (tp->setlpicnt && !--tp->setlpicnt)
10362                         tg3_phy_eee_enable(tp);
10363
10364                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10365                         u32 mac_stat;
10366                         int phy_event;
10367
10368                         mac_stat = tr32(MAC_STATUS);
10369
10370                         phy_event = 0;
10371                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10372                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10373                                         phy_event = 1;
10374                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10375                                 phy_event = 1;
10376
10377                         if (phy_event)
10378                                 tg3_setup_phy(tp, 0);
10379                 } else if (tg3_flag(tp, POLL_SERDES)) {
10380                         u32 mac_stat = tr32(MAC_STATUS);
10381                         int need_setup = 0;
10382
10383                         if (tp->link_up &&
10384                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10385                                 need_setup = 1;
10386                         }
10387                         if (!tp->link_up &&
10388                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
10389                                          MAC_STATUS_SIGNAL_DET))) {
10390                                 need_setup = 1;
10391                         }
10392                         if (need_setup) {
10393                                 if (!tp->serdes_counter) {
10394                                         tw32_f(MAC_MODE,
10395                                              (tp->mac_mode &
10396                                               ~MAC_MODE_PORT_MODE_MASK));
10397                                         udelay(40);
10398                                         tw32_f(MAC_MODE, tp->mac_mode);
10399                                         udelay(40);
10400                                 }
10401                                 tg3_setup_phy(tp, 0);
10402                         }
10403                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10404                            tg3_flag(tp, 5780_CLASS)) {
10405                         tg3_serdes_parallel_detect(tp);
10406                 }
10407
10408                 tp->timer_counter = tp->timer_multiplier;
10409         }
10410
10411         /* Heartbeat is only sent once every 2 seconds.
10412          *
10413          * The heartbeat is to tell the ASF firmware that the host
10414          * driver is still alive.  In the event that the OS crashes,
10415          * ASF needs to reset the hardware to free up the FIFO space
10416          * that may be filled with rx packets destined for the host.
10417          * If the FIFO is full, ASF will no longer function properly.
10418          *
10419          * Unintended resets have been reported on real time kernels
10420          * where the timer doesn't run on time.  Netpoll will also have
10421          * same problem.
10422          *
10423          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10424          * to check the ring condition when the heartbeat is expiring
10425          * before doing the reset.  This will prevent most unintended
10426          * resets.
10427          */
10428         if (!--tp->asf_counter) {
10429                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10430                         tg3_wait_for_event_ack(tp);
10431
10432                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10433                                       FWCMD_NICDRV_ALIVE3);
10434                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10435                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10436                                       TG3_FW_UPDATE_TIMEOUT_SEC);
10437
10438                         tg3_generate_fw_event(tp);
10439                 }
10440                 tp->asf_counter = tp->asf_multiplier;
10441         }
10442
10443         spin_unlock(&tp->lock);
10444
10445 restart_timer:
10446         tp->timer.expires = jiffies + tp->timer_offset;
10447         add_timer(&tp->timer);
10448 }
10449
10450 static void tg3_timer_init(struct tg3 *tp)
10451 {
10452         if (tg3_flag(tp, TAGGED_STATUS) &&
10453             tg3_asic_rev(tp) != ASIC_REV_5717 &&
10454             !tg3_flag(tp, 57765_CLASS))
10455                 tp->timer_offset = HZ;
10456         else
10457                 tp->timer_offset = HZ / 10;
10458
10459         BUG_ON(tp->timer_offset > HZ);
10460
10461         tp->timer_multiplier = (HZ / tp->timer_offset);
10462         tp->asf_multiplier = (HZ / tp->timer_offset) *
10463                              TG3_FW_UPDATE_FREQ_SEC;
10464
10465         init_timer(&tp->timer);
10466         tp->timer.data = (unsigned long) tp;
10467         tp->timer.function = tg3_timer;
10468 }
10469
10470 static void tg3_timer_start(struct tg3 *tp)
10471 {
10472         tp->asf_counter   = tp->asf_multiplier;
10473         tp->timer_counter = tp->timer_multiplier;
10474
10475         tp->timer.expires = jiffies + tp->timer_offset;
10476         add_timer(&tp->timer);
10477 }
10478
10479 static void tg3_timer_stop(struct tg3 *tp)
10480 {
10481         del_timer_sync(&tp->timer);
10482 }
10483
10484 /* Restart hardware after configuration changes, self-test, etc.
10485  * Invoked with tp->lock held.
10486  */
10487 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
10488         __releases(tp->lock)
10489         __acquires(tp->lock)
10490 {
10491         int err;
10492
10493         err = tg3_init_hw(tp, reset_phy);
10494         if (err) {
10495                 netdev_err(tp->dev,
10496                            "Failed to re-initialize device, aborting\n");
10497                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10498                 tg3_full_unlock(tp);
10499                 tg3_timer_stop(tp);
10500                 tp->irq_sync = 0;
10501                 tg3_napi_enable(tp);
10502                 dev_close(tp->dev);
10503                 tg3_full_lock(tp, 0);
10504         }
10505         return err;
10506 }
10507
10508 static void tg3_reset_task(struct work_struct *work)
10509 {
10510         struct tg3 *tp = container_of(work, struct tg3, reset_task);
10511         int err;
10512
10513         tg3_full_lock(tp, 0);
10514
10515         if (!netif_running(tp->dev)) {
10516                 tg3_flag_clear(tp, RESET_TASK_PENDING);
10517                 tg3_full_unlock(tp);
10518                 return;
10519         }
10520
10521         tg3_full_unlock(tp);
10522
10523         tg3_phy_stop(tp);
10524
10525         tg3_netif_stop(tp);
10526
10527         tg3_full_lock(tp, 1);
10528
10529         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10530                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10531                 tp->write32_rx_mbox = tg3_write_flush_reg32;
10532                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10533                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10534         }
10535
10536         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10537         err = tg3_init_hw(tp, 1);
10538         if (err)
10539                 goto out;
10540
10541         tg3_netif_start(tp);
10542
10543 out:
10544         tg3_full_unlock(tp);
10545
10546         if (!err)
10547                 tg3_phy_start(tp);
10548
10549         tg3_flag_clear(tp, RESET_TASK_PENDING);
10550 }
10551
10552 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10553 {
10554         irq_handler_t fn;
10555         unsigned long flags;
10556         char *name;
10557         struct tg3_napi *tnapi = &tp->napi[irq_num];
10558
10559         if (tp->irq_cnt == 1)
10560                 name = tp->dev->name;
10561         else {
10562                 name = &tnapi->irq_lbl[0];
10563                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10564                 name[IFNAMSIZ-1] = 0;
10565         }
10566
10567         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10568                 fn = tg3_msi;
10569                 if (tg3_flag(tp, 1SHOT_MSI))
10570                         fn = tg3_msi_1shot;
10571                 flags = 0;
10572         } else {
10573                 fn = tg3_interrupt;
10574                 if (tg3_flag(tp, TAGGED_STATUS))
10575                         fn = tg3_interrupt_tagged;
10576                 flags = IRQF_SHARED;
10577         }
10578
10579         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10580 }
10581
10582 static int tg3_test_interrupt(struct tg3 *tp)
10583 {
10584         struct tg3_napi *tnapi = &tp->napi[0];
10585         struct net_device *dev = tp->dev;
10586         int err, i, intr_ok = 0;
10587         u32 val;
10588
10589         if (!netif_running(dev))
10590                 return -ENODEV;
10591
10592         tg3_disable_ints(tp);
10593
10594         free_irq(tnapi->irq_vec, tnapi);
10595
10596         /*
10597          * Turn off MSI one shot mode.  Otherwise this test has no
10598          * observable way to know whether the interrupt was delivered.
10599          */
10600         if (tg3_flag(tp, 57765_PLUS)) {
10601                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10602                 tw32(MSGINT_MODE, val);
10603         }
10604
10605         err = request_irq(tnapi->irq_vec, tg3_test_isr,
10606                           IRQF_SHARED, dev->name, tnapi);
10607         if (err)
10608                 return err;
10609
10610         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10611         tg3_enable_ints(tp);
10612
10613         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10614                tnapi->coal_now);
10615
10616         for (i = 0; i < 5; i++) {
10617                 u32 int_mbox, misc_host_ctrl;
10618
10619                 int_mbox = tr32_mailbox(tnapi->int_mbox);
10620                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10621
10622                 if ((int_mbox != 0) ||
10623                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10624                         intr_ok = 1;
10625                         break;
10626                 }
10627
10628                 if (tg3_flag(tp, 57765_PLUS) &&
10629                     tnapi->hw_status->status_tag != tnapi->last_tag)
10630                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10631
10632                 msleep(10);
10633         }
10634
10635         tg3_disable_ints(tp);
10636
10637         free_irq(tnapi->irq_vec, tnapi);
10638
10639         err = tg3_request_irq(tp, 0);
10640
10641         if (err)
10642                 return err;
10643
10644         if (intr_ok) {
10645                 /* Reenable MSI one shot mode. */
10646                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10647                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10648                         tw32(MSGINT_MODE, val);
10649                 }
10650                 return 0;
10651         }
10652
10653         return -EIO;
10654 }
10655
10656 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10657  * successfully restored
10658  */
10659 static int tg3_test_msi(struct tg3 *tp)
10660 {
10661         int err;
10662         u16 pci_cmd;
10663
10664         if (!tg3_flag(tp, USING_MSI))
10665                 return 0;
10666
10667         /* Turn off SERR reporting in case MSI terminates with Master
10668          * Abort.
10669          */
10670         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10671         pci_write_config_word(tp->pdev, PCI_COMMAND,
10672                               pci_cmd & ~PCI_COMMAND_SERR);
10673
10674         err = tg3_test_interrupt(tp);
10675
10676         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10677
10678         if (!err)
10679                 return 0;
10680
10681         /* other failures */
10682         if (err != -EIO)
10683                 return err;
10684
10685         /* MSI test failed, go back to INTx mode */
10686         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10687                     "to INTx mode. Please report this failure to the PCI "
10688                     "maintainer and include system chipset information\n");
10689
10690         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10691
10692         pci_disable_msi(tp->pdev);
10693
10694         tg3_flag_clear(tp, USING_MSI);
10695         tp->napi[0].irq_vec = tp->pdev->irq;
10696
10697         err = tg3_request_irq(tp, 0);
10698         if (err)
10699                 return err;
10700
10701         /* Need to reset the chip because the MSI cycle may have terminated
10702          * with Master Abort.
10703          */
10704         tg3_full_lock(tp, 1);
10705
10706         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10707         err = tg3_init_hw(tp, 1);
10708
10709         tg3_full_unlock(tp);
10710
10711         if (err)
10712                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10713
10714         return err;
10715 }
10716
10717 static int tg3_request_firmware(struct tg3 *tp)
10718 {
10719         const struct tg3_firmware_hdr *fw_hdr;
10720
10721         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10722                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10723                            tp->fw_needed);
10724                 return -ENOENT;
10725         }
10726
10727         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
10728
10729         /* Firmware blob starts with version numbers, followed by
10730          * start address and _full_ length including BSS sections
10731          * (which must be longer than the actual data, of course
10732          */
10733
10734         tp->fw_len = be32_to_cpu(fw_hdr->len);  /* includes bss */
10735         if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
10736                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10737                            tp->fw_len, tp->fw_needed);
10738                 release_firmware(tp->fw);
10739                 tp->fw = NULL;
10740                 return -EINVAL;
10741         }
10742
10743         /* We no longer need firmware; we have it. */
10744         tp->fw_needed = NULL;
10745         return 0;
10746 }
10747
10748 static u32 tg3_irq_count(struct tg3 *tp)
10749 {
10750         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10751
10752         if (irq_cnt > 1) {
10753                 /* We want as many rx rings enabled as there are cpus.
10754                  * In multiqueue MSI-X mode, the first MSI-X vector
10755                  * only deals with link interrupts, etc, so we add
10756                  * one to the number of vectors we are requesting.
10757                  */
10758                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
10759         }
10760
10761         return irq_cnt;
10762 }
10763
10764 static bool tg3_enable_msix(struct tg3 *tp)
10765 {
10766         int i, rc;
10767         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
10768
10769         tp->txq_cnt = tp->txq_req;
10770         tp->rxq_cnt = tp->rxq_req;
10771         if (!tp->rxq_cnt)
10772                 tp->rxq_cnt = netif_get_num_default_rss_queues();
10773         if (tp->rxq_cnt > tp->rxq_max)
10774                 tp->rxq_cnt = tp->rxq_max;
10775
10776         /* Disable multiple TX rings by default.  Simple round-robin hardware
10777          * scheduling of the TX rings can cause starvation of rings with
10778          * small packets when other rings have TSO or jumbo packets.
10779          */
10780         if (!tp->txq_req)
10781                 tp->txq_cnt = 1;
10782
10783         tp->irq_cnt = tg3_irq_count(tp);
10784
10785         for (i = 0; i < tp->irq_max; i++) {
10786                 msix_ent[i].entry  = i;
10787                 msix_ent[i].vector = 0;
10788         }
10789
10790         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10791         if (rc < 0) {
10792                 return false;
10793         } else if (rc != 0) {
10794                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10795                         return false;
10796                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10797                               tp->irq_cnt, rc);
10798                 tp->irq_cnt = rc;
10799                 tp->rxq_cnt = max(rc - 1, 1);
10800                 if (tp->txq_cnt)
10801                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
10802         }
10803
10804         for (i = 0; i < tp->irq_max; i++)
10805                 tp->napi[i].irq_vec = msix_ent[i].vector;
10806
10807         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
10808                 pci_disable_msix(tp->pdev);
10809                 return false;
10810         }
10811
10812         if (tp->irq_cnt == 1)
10813                 return true;
10814
10815         tg3_flag_set(tp, ENABLE_RSS);
10816
10817         if (tp->txq_cnt > 1)
10818                 tg3_flag_set(tp, ENABLE_TSS);
10819
10820         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
10821
10822         return true;
10823 }
10824
10825 static void tg3_ints_init(struct tg3 *tp)
10826 {
10827         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10828             !tg3_flag(tp, TAGGED_STATUS)) {
10829                 /* All MSI supporting chips should support tagged
10830                  * status.  Assert that this is the case.
10831                  */
10832                 netdev_warn(tp->dev,
10833                             "MSI without TAGGED_STATUS? Not using MSI\n");
10834                 goto defcfg;
10835         }
10836
10837         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10838                 tg3_flag_set(tp, USING_MSIX);
10839         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10840                 tg3_flag_set(tp, USING_MSI);
10841
10842         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10843                 u32 msi_mode = tr32(MSGINT_MODE);
10844                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10845                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10846                 if (!tg3_flag(tp, 1SHOT_MSI))
10847                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10848                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10849         }
10850 defcfg:
10851         if (!tg3_flag(tp, USING_MSIX)) {
10852                 tp->irq_cnt = 1;
10853                 tp->napi[0].irq_vec = tp->pdev->irq;
10854         }
10855
10856         if (tp->irq_cnt == 1) {
10857                 tp->txq_cnt = 1;
10858                 tp->rxq_cnt = 1;
10859                 netif_set_real_num_tx_queues(tp->dev, 1);
10860                 netif_set_real_num_rx_queues(tp->dev, 1);
10861         }
10862 }
10863
10864 static void tg3_ints_fini(struct tg3 *tp)
10865 {
10866         if (tg3_flag(tp, USING_MSIX))
10867                 pci_disable_msix(tp->pdev);
10868         else if (tg3_flag(tp, USING_MSI))
10869                 pci_disable_msi(tp->pdev);
10870         tg3_flag_clear(tp, USING_MSI);
10871         tg3_flag_clear(tp, USING_MSIX);
10872         tg3_flag_clear(tp, ENABLE_RSS);
10873         tg3_flag_clear(tp, ENABLE_TSS);
10874 }
10875
10876 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
10877                      bool init)
10878 {
10879         struct net_device *dev = tp->dev;
10880         int i, err;
10881
10882         /*
10883          * Setup interrupts first so we know how
10884          * many NAPI resources to allocate
10885          */
10886         tg3_ints_init(tp);
10887
10888         tg3_rss_check_indir_tbl(tp);
10889
10890         /* The placement of this call is tied
10891          * to the setup and use of Host TX descriptors.
10892          */
10893         err = tg3_alloc_consistent(tp);
10894         if (err)
10895                 goto err_out1;
10896
10897         tg3_napi_init(tp);
10898
10899         tg3_napi_enable(tp);
10900
10901         for (i = 0; i < tp->irq_cnt; i++) {
10902                 struct tg3_napi *tnapi = &tp->napi[i];
10903                 err = tg3_request_irq(tp, i);
10904                 if (err) {
10905                         for (i--; i >= 0; i--) {
10906                                 tnapi = &tp->napi[i];
10907                                 free_irq(tnapi->irq_vec, tnapi);
10908                         }
10909                         goto err_out2;
10910                 }
10911         }
10912
10913         tg3_full_lock(tp, 0);
10914
10915         err = tg3_init_hw(tp, reset_phy);
10916         if (err) {
10917                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10918                 tg3_free_rings(tp);
10919         }
10920
10921         tg3_full_unlock(tp);
10922
10923         if (err)
10924                 goto err_out3;
10925
10926         if (test_irq && tg3_flag(tp, USING_MSI)) {
10927                 err = tg3_test_msi(tp);
10928
10929                 if (err) {
10930                         tg3_full_lock(tp, 0);
10931                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10932                         tg3_free_rings(tp);
10933                         tg3_full_unlock(tp);
10934
10935                         goto err_out2;
10936                 }
10937
10938                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10939                         u32 val = tr32(PCIE_TRANSACTION_CFG);
10940
10941                         tw32(PCIE_TRANSACTION_CFG,
10942                              val | PCIE_TRANS_CFG_1SHOT_MSI);
10943                 }
10944         }
10945
10946         tg3_phy_start(tp);
10947
10948         tg3_hwmon_open(tp);
10949
10950         tg3_full_lock(tp, 0);
10951
10952         tg3_timer_start(tp);
10953         tg3_flag_set(tp, INIT_COMPLETE);
10954         tg3_enable_ints(tp);
10955
10956         if (init)
10957                 tg3_ptp_init(tp);
10958         else
10959                 tg3_ptp_resume(tp);
10960
10961
10962         tg3_full_unlock(tp);
10963
10964         netif_tx_start_all_queues(dev);
10965
10966         /*
10967          * Reset loopback feature if it was turned on while the device was down
10968          * make sure that it's installed properly now.
10969          */
10970         if (dev->features & NETIF_F_LOOPBACK)
10971                 tg3_set_loopback(dev, dev->features);
10972
10973         return 0;
10974
10975 err_out3:
10976         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10977                 struct tg3_napi *tnapi = &tp->napi[i];
10978                 free_irq(tnapi->irq_vec, tnapi);
10979         }
10980
10981 err_out2:
10982         tg3_napi_disable(tp);
10983         tg3_napi_fini(tp);
10984         tg3_free_consistent(tp);
10985
10986 err_out1:
10987         tg3_ints_fini(tp);
10988
10989         return err;
10990 }
10991
10992 static void tg3_stop(struct tg3 *tp)
10993 {
10994         int i;
10995
10996         tg3_reset_task_cancel(tp);
10997         tg3_netif_stop(tp);
10998
10999         tg3_timer_stop(tp);
11000
11001         tg3_hwmon_close(tp);
11002
11003         tg3_phy_stop(tp);
11004
11005         tg3_full_lock(tp, 1);
11006
11007         tg3_disable_ints(tp);
11008
11009         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11010         tg3_free_rings(tp);
11011         tg3_flag_clear(tp, INIT_COMPLETE);
11012
11013         tg3_full_unlock(tp);
11014
11015         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11016                 struct tg3_napi *tnapi = &tp->napi[i];
11017                 free_irq(tnapi->irq_vec, tnapi);
11018         }
11019
11020         tg3_ints_fini(tp);
11021
11022         tg3_napi_fini(tp);
11023
11024         tg3_free_consistent(tp);
11025 }
11026
11027 static int tg3_open(struct net_device *dev)
11028 {
11029         struct tg3 *tp = netdev_priv(dev);
11030         int err;
11031
11032         if (tp->fw_needed) {
11033                 err = tg3_request_firmware(tp);
11034                 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11035                         if (err) {
11036                                 netdev_warn(tp->dev, "EEE capability disabled\n");
11037                                 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11038                         } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11039                                 netdev_warn(tp->dev, "EEE capability restored\n");
11040                                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11041                         }
11042                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11043                         if (err)
11044                                 return err;
11045                 } else if (err) {
11046                         netdev_warn(tp->dev, "TSO capability disabled\n");
11047                         tg3_flag_clear(tp, TSO_CAPABLE);
11048                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11049                         netdev_notice(tp->dev, "TSO capability restored\n");
11050                         tg3_flag_set(tp, TSO_CAPABLE);
11051                 }
11052         }
11053
11054         tg3_carrier_off(tp);
11055
11056         err = tg3_power_up(tp);
11057         if (err)
11058                 return err;
11059
11060         tg3_full_lock(tp, 0);
11061
11062         tg3_disable_ints(tp);
11063         tg3_flag_clear(tp, INIT_COMPLETE);
11064
11065         tg3_full_unlock(tp);
11066
11067         err = tg3_start(tp, true, true, true);
11068         if (err) {
11069                 tg3_frob_aux_power(tp, false);
11070                 pci_set_power_state(tp->pdev, PCI_D3hot);
11071         }
11072
11073         if (tg3_flag(tp, PTP_CAPABLE)) {
11074                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11075                                                    &tp->pdev->dev);
11076                 if (IS_ERR(tp->ptp_clock))
11077                         tp->ptp_clock = NULL;
11078         }
11079
11080         return err;
11081 }
11082
11083 static int tg3_close(struct net_device *dev)
11084 {
11085         struct tg3 *tp = netdev_priv(dev);
11086
11087         tg3_ptp_fini(tp);
11088
11089         tg3_stop(tp);
11090
11091         /* Clear stats across close / open calls */
11092         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11093         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11094
11095         tg3_power_down(tp);
11096
11097         tg3_carrier_off(tp);
11098
11099         return 0;
11100 }
11101
11102 static inline u64 get_stat64(tg3_stat64_t *val)
11103 {
11104        return ((u64)val->high << 32) | ((u64)val->low);
11105 }
11106
11107 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11108 {
11109         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11110
11111         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11112             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11113              tg3_asic_rev(tp) == ASIC_REV_5701)) {
11114                 u32 val;
11115
11116                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11117                         tg3_writephy(tp, MII_TG3_TEST1,
11118                                      val | MII_TG3_TEST1_CRC_EN);
11119                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11120                 } else
11121                         val = 0;
11122
11123                 tp->phy_crc_errors += val;
11124
11125                 return tp->phy_crc_errors;
11126         }
11127
11128         return get_stat64(&hw_stats->rx_fcs_errors);
11129 }
11130
11131 #define ESTAT_ADD(member) \
11132         estats->member =        old_estats->member + \
11133                                 get_stat64(&hw_stats->member)
11134
11135 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11136 {
11137         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11138         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11139
11140         ESTAT_ADD(rx_octets);
11141         ESTAT_ADD(rx_fragments);
11142         ESTAT_ADD(rx_ucast_packets);
11143         ESTAT_ADD(rx_mcast_packets);
11144         ESTAT_ADD(rx_bcast_packets);
11145         ESTAT_ADD(rx_fcs_errors);
11146         ESTAT_ADD(rx_align_errors);
11147         ESTAT_ADD(rx_xon_pause_rcvd);
11148         ESTAT_ADD(rx_xoff_pause_rcvd);
11149         ESTAT_ADD(rx_mac_ctrl_rcvd);
11150         ESTAT_ADD(rx_xoff_entered);
11151         ESTAT_ADD(rx_frame_too_long_errors);
11152         ESTAT_ADD(rx_jabbers);
11153         ESTAT_ADD(rx_undersize_packets);
11154         ESTAT_ADD(rx_in_length_errors);
11155         ESTAT_ADD(rx_out_length_errors);
11156         ESTAT_ADD(rx_64_or_less_octet_packets);
11157         ESTAT_ADD(rx_65_to_127_octet_packets);
11158         ESTAT_ADD(rx_128_to_255_octet_packets);
11159         ESTAT_ADD(rx_256_to_511_octet_packets);
11160         ESTAT_ADD(rx_512_to_1023_octet_packets);
11161         ESTAT_ADD(rx_1024_to_1522_octet_packets);
11162         ESTAT_ADD(rx_1523_to_2047_octet_packets);
11163         ESTAT_ADD(rx_2048_to_4095_octet_packets);
11164         ESTAT_ADD(rx_4096_to_8191_octet_packets);
11165         ESTAT_ADD(rx_8192_to_9022_octet_packets);
11166
11167         ESTAT_ADD(tx_octets);
11168         ESTAT_ADD(tx_collisions);
11169         ESTAT_ADD(tx_xon_sent);
11170         ESTAT_ADD(tx_xoff_sent);
11171         ESTAT_ADD(tx_flow_control);
11172         ESTAT_ADD(tx_mac_errors);
11173         ESTAT_ADD(tx_single_collisions);
11174         ESTAT_ADD(tx_mult_collisions);
11175         ESTAT_ADD(tx_deferred);
11176         ESTAT_ADD(tx_excessive_collisions);
11177         ESTAT_ADD(tx_late_collisions);
11178         ESTAT_ADD(tx_collide_2times);
11179         ESTAT_ADD(tx_collide_3times);
11180         ESTAT_ADD(tx_collide_4times);
11181         ESTAT_ADD(tx_collide_5times);
11182         ESTAT_ADD(tx_collide_6times);
11183         ESTAT_ADD(tx_collide_7times);
11184         ESTAT_ADD(tx_collide_8times);
11185         ESTAT_ADD(tx_collide_9times);
11186         ESTAT_ADD(tx_collide_10times);
11187         ESTAT_ADD(tx_collide_11times);
11188         ESTAT_ADD(tx_collide_12times);
11189         ESTAT_ADD(tx_collide_13times);
11190         ESTAT_ADD(tx_collide_14times);
11191         ESTAT_ADD(tx_collide_15times);
11192         ESTAT_ADD(tx_ucast_packets);
11193         ESTAT_ADD(tx_mcast_packets);
11194         ESTAT_ADD(tx_bcast_packets);
11195         ESTAT_ADD(tx_carrier_sense_errors);
11196         ESTAT_ADD(tx_discards);
11197         ESTAT_ADD(tx_errors);
11198
11199         ESTAT_ADD(dma_writeq_full);
11200         ESTAT_ADD(dma_write_prioq_full);
11201         ESTAT_ADD(rxbds_empty);
11202         ESTAT_ADD(rx_discards);
11203         ESTAT_ADD(rx_errors);
11204         ESTAT_ADD(rx_threshold_hit);
11205
11206         ESTAT_ADD(dma_readq_full);
11207         ESTAT_ADD(dma_read_prioq_full);
11208         ESTAT_ADD(tx_comp_queue_full);
11209
11210         ESTAT_ADD(ring_set_send_prod_index);
11211         ESTAT_ADD(ring_status_update);
11212         ESTAT_ADD(nic_irqs);
11213         ESTAT_ADD(nic_avoided_irqs);
11214         ESTAT_ADD(nic_tx_threshold_hit);
11215
11216         ESTAT_ADD(mbuf_lwm_thresh_hit);
11217 }
11218
11219 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11220 {
11221         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11222         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11223
11224         stats->rx_packets = old_stats->rx_packets +
11225                 get_stat64(&hw_stats->rx_ucast_packets) +
11226                 get_stat64(&hw_stats->rx_mcast_packets) +
11227                 get_stat64(&hw_stats->rx_bcast_packets);
11228
11229         stats->tx_packets = old_stats->tx_packets +
11230                 get_stat64(&hw_stats->tx_ucast_packets) +
11231                 get_stat64(&hw_stats->tx_mcast_packets) +
11232                 get_stat64(&hw_stats->tx_bcast_packets);
11233
11234         stats->rx_bytes = old_stats->rx_bytes +
11235                 get_stat64(&hw_stats->rx_octets);
11236         stats->tx_bytes = old_stats->tx_bytes +
11237                 get_stat64(&hw_stats->tx_octets);
11238
11239         stats->rx_errors = old_stats->rx_errors +
11240                 get_stat64(&hw_stats->rx_errors);
11241         stats->tx_errors = old_stats->tx_errors +
11242                 get_stat64(&hw_stats->tx_errors) +
11243                 get_stat64(&hw_stats->tx_mac_errors) +
11244                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11245                 get_stat64(&hw_stats->tx_discards);
11246
11247         stats->multicast = old_stats->multicast +
11248                 get_stat64(&hw_stats->rx_mcast_packets);
11249         stats->collisions = old_stats->collisions +
11250                 get_stat64(&hw_stats->tx_collisions);
11251
11252         stats->rx_length_errors = old_stats->rx_length_errors +
11253                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11254                 get_stat64(&hw_stats->rx_undersize_packets);
11255
11256         stats->rx_over_errors = old_stats->rx_over_errors +
11257                 get_stat64(&hw_stats->rxbds_empty);
11258         stats->rx_frame_errors = old_stats->rx_frame_errors +
11259                 get_stat64(&hw_stats->rx_align_errors);
11260         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11261                 get_stat64(&hw_stats->tx_discards);
11262         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11263                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11264
11265         stats->rx_crc_errors = old_stats->rx_crc_errors +
11266                 tg3_calc_crc_errors(tp);
11267
11268         stats->rx_missed_errors = old_stats->rx_missed_errors +
11269                 get_stat64(&hw_stats->rx_discards);
11270
11271         stats->rx_dropped = tp->rx_dropped;
11272         stats->tx_dropped = tp->tx_dropped;
11273 }
11274
11275 static int tg3_get_regs_len(struct net_device *dev)
11276 {
11277         return TG3_REG_BLK_SIZE;
11278 }
11279
11280 static void tg3_get_regs(struct net_device *dev,
11281                 struct ethtool_regs *regs, void *_p)
11282 {
11283         struct tg3 *tp = netdev_priv(dev);
11284
11285         regs->version = 0;
11286
11287         memset(_p, 0, TG3_REG_BLK_SIZE);
11288
11289         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11290                 return;
11291
11292         tg3_full_lock(tp, 0);
11293
11294         tg3_dump_legacy_regs(tp, (u32 *)_p);
11295
11296         tg3_full_unlock(tp);
11297 }
11298
11299 static int tg3_get_eeprom_len(struct net_device *dev)
11300 {
11301         struct tg3 *tp = netdev_priv(dev);
11302
11303         return tp->nvram_size;
11304 }
11305
11306 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11307 {
11308         struct tg3 *tp = netdev_priv(dev);
11309         int ret;
11310         u8  *pd;
11311         u32 i, offset, len, b_offset, b_count;
11312         __be32 val;
11313
11314         if (tg3_flag(tp, NO_NVRAM))
11315                 return -EINVAL;
11316
11317         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11318                 return -EAGAIN;
11319
11320         offset = eeprom->offset;
11321         len = eeprom->len;
11322         eeprom->len = 0;
11323
11324         eeprom->magic = TG3_EEPROM_MAGIC;
11325
11326         if (offset & 3) {
11327                 /* adjustments to start on required 4 byte boundary */
11328                 b_offset = offset & 3;
11329                 b_count = 4 - b_offset;
11330                 if (b_count > len) {
11331                         /* i.e. offset=1 len=2 */
11332                         b_count = len;
11333                 }
11334                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11335                 if (ret)
11336                         return ret;
11337                 memcpy(data, ((char *)&val) + b_offset, b_count);
11338                 len -= b_count;
11339                 offset += b_count;
11340                 eeprom->len += b_count;
11341         }
11342
11343         /* read bytes up to the last 4 byte boundary */
11344         pd = &data[eeprom->len];
11345         for (i = 0; i < (len - (len & 3)); i += 4) {
11346                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11347                 if (ret) {
11348                         eeprom->len += i;
11349                         return ret;
11350                 }
11351                 memcpy(pd + i, &val, 4);
11352         }
11353         eeprom->len += i;
11354
11355         if (len & 3) {
11356                 /* read last bytes not ending on 4 byte boundary */
11357                 pd = &data[eeprom->len];
11358                 b_count = len & 3;
11359                 b_offset = offset + len - b_count;
11360                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11361                 if (ret)
11362                         return ret;
11363                 memcpy(pd, &val, b_count);
11364                 eeprom->len += b_count;
11365         }
11366         return 0;
11367 }
11368
11369 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11370 {
11371         struct tg3 *tp = netdev_priv(dev);
11372         int ret;
11373         u32 offset, len, b_offset, odd_len;
11374         u8 *buf;
11375         __be32 start, end;
11376
11377         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11378                 return -EAGAIN;
11379
11380         if (tg3_flag(tp, NO_NVRAM) ||
11381             eeprom->magic != TG3_EEPROM_MAGIC)
11382                 return -EINVAL;
11383
11384         offset = eeprom->offset;
11385         len = eeprom->len;
11386
11387         if ((b_offset = (offset & 3))) {
11388                 /* adjustments to start on required 4 byte boundary */
11389                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11390                 if (ret)
11391                         return ret;
11392                 len += b_offset;
11393                 offset &= ~3;
11394                 if (len < 4)
11395                         len = 4;
11396         }
11397
11398         odd_len = 0;
11399         if (len & 3) {
11400                 /* adjustments to end on required 4 byte boundary */
11401                 odd_len = 1;
11402                 len = (len + 3) & ~3;
11403                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11404                 if (ret)
11405                         return ret;
11406         }
11407
11408         buf = data;
11409         if (b_offset || odd_len) {
11410                 buf = kmalloc(len, GFP_KERNEL);
11411                 if (!buf)
11412                         return -ENOMEM;
11413                 if (b_offset)
11414                         memcpy(buf, &start, 4);
11415                 if (odd_len)
11416                         memcpy(buf+len-4, &end, 4);
11417                 memcpy(buf + b_offset, data, eeprom->len);
11418         }
11419
11420         ret = tg3_nvram_write_block(tp, offset, len, buf);
11421
11422         if (buf != data)
11423                 kfree(buf);
11424
11425         return ret;
11426 }
11427
11428 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11429 {
11430         struct tg3 *tp = netdev_priv(dev);
11431
11432         if (tg3_flag(tp, USE_PHYLIB)) {
11433                 struct phy_device *phydev;
11434                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11435                         return -EAGAIN;
11436                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11437                 return phy_ethtool_gset(phydev, cmd);
11438         }
11439
11440         cmd->supported = (SUPPORTED_Autoneg);
11441
11442         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11443                 cmd->supported |= (SUPPORTED_1000baseT_Half |
11444                                    SUPPORTED_1000baseT_Full);
11445
11446         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11447                 cmd->supported |= (SUPPORTED_100baseT_Half |
11448                                   SUPPORTED_100baseT_Full |
11449                                   SUPPORTED_10baseT_Half |
11450                                   SUPPORTED_10baseT_Full |
11451                                   SUPPORTED_TP);
11452                 cmd->port = PORT_TP;
11453         } else {
11454                 cmd->supported |= SUPPORTED_FIBRE;
11455                 cmd->port = PORT_FIBRE;
11456         }
11457
11458         cmd->advertising = tp->link_config.advertising;
11459         if (tg3_flag(tp, PAUSE_AUTONEG)) {
11460                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11461                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11462                                 cmd->advertising |= ADVERTISED_Pause;
11463                         } else {
11464                                 cmd->advertising |= ADVERTISED_Pause |
11465                                                     ADVERTISED_Asym_Pause;
11466                         }
11467                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11468                         cmd->advertising |= ADVERTISED_Asym_Pause;
11469                 }
11470         }
11471         if (netif_running(dev) && tp->link_up) {
11472                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11473                 cmd->duplex = tp->link_config.active_duplex;
11474                 cmd->lp_advertising = tp->link_config.rmt_adv;
11475                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11476                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11477                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11478                         else
11479                                 cmd->eth_tp_mdix = ETH_TP_MDI;
11480                 }
11481         } else {
11482                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11483                 cmd->duplex = DUPLEX_UNKNOWN;
11484                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11485         }
11486         cmd->phy_address = tp->phy_addr;
11487         cmd->transceiver = XCVR_INTERNAL;
11488         cmd->autoneg = tp->link_config.autoneg;
11489         cmd->maxtxpkt = 0;
11490         cmd->maxrxpkt = 0;
11491         return 0;
11492 }
11493
11494 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11495 {
11496         struct tg3 *tp = netdev_priv(dev);
11497         u32 speed = ethtool_cmd_speed(cmd);
11498
11499         if (tg3_flag(tp, USE_PHYLIB)) {
11500                 struct phy_device *phydev;
11501                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11502                         return -EAGAIN;
11503                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11504                 return phy_ethtool_sset(phydev, cmd);
11505         }
11506
11507         if (cmd->autoneg != AUTONEG_ENABLE &&
11508             cmd->autoneg != AUTONEG_DISABLE)
11509                 return -EINVAL;
11510
11511         if (cmd->autoneg == AUTONEG_DISABLE &&
11512             cmd->duplex != DUPLEX_FULL &&
11513             cmd->duplex != DUPLEX_HALF)
11514                 return -EINVAL;
11515
11516         if (cmd->autoneg == AUTONEG_ENABLE) {
11517                 u32 mask = ADVERTISED_Autoneg |
11518                            ADVERTISED_Pause |
11519                            ADVERTISED_Asym_Pause;
11520
11521                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11522                         mask |= ADVERTISED_1000baseT_Half |
11523                                 ADVERTISED_1000baseT_Full;
11524
11525                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11526                         mask |= ADVERTISED_100baseT_Half |
11527                                 ADVERTISED_100baseT_Full |
11528                                 ADVERTISED_10baseT_Half |
11529                                 ADVERTISED_10baseT_Full |
11530                                 ADVERTISED_TP;
11531                 else
11532                         mask |= ADVERTISED_FIBRE;
11533
11534                 if (cmd->advertising & ~mask)
11535                         return -EINVAL;
11536
11537                 mask &= (ADVERTISED_1000baseT_Half |
11538                          ADVERTISED_1000baseT_Full |
11539                          ADVERTISED_100baseT_Half |
11540                          ADVERTISED_100baseT_Full |
11541                          ADVERTISED_10baseT_Half |
11542                          ADVERTISED_10baseT_Full);
11543
11544                 cmd->advertising &= mask;
11545         } else {
11546                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11547                         if (speed != SPEED_1000)
11548                                 return -EINVAL;
11549
11550                         if (cmd->duplex != DUPLEX_FULL)
11551                                 return -EINVAL;
11552                 } else {
11553                         if (speed != SPEED_100 &&
11554                             speed != SPEED_10)
11555                                 return -EINVAL;
11556                 }
11557         }
11558
11559         tg3_full_lock(tp, 0);
11560
11561         tp->link_config.autoneg = cmd->autoneg;
11562         if (cmd->autoneg == AUTONEG_ENABLE) {
11563                 tp->link_config.advertising = (cmd->advertising |
11564                                               ADVERTISED_Autoneg);
11565                 tp->link_config.speed = SPEED_UNKNOWN;
11566                 tp->link_config.duplex = DUPLEX_UNKNOWN;
11567         } else {
11568                 tp->link_config.advertising = 0;
11569                 tp->link_config.speed = speed;
11570                 tp->link_config.duplex = cmd->duplex;
11571         }
11572
11573         tg3_warn_mgmt_link_flap(tp);
11574
11575         if (netif_running(dev))
11576                 tg3_setup_phy(tp, 1);
11577
11578         tg3_full_unlock(tp);
11579
11580         return 0;
11581 }
11582
11583 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11584 {
11585         struct tg3 *tp = netdev_priv(dev);
11586
11587         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11588         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11589         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11590         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11591 }
11592
11593 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11594 {
11595         struct tg3 *tp = netdev_priv(dev);
11596
11597         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11598                 wol->supported = WAKE_MAGIC;
11599         else
11600                 wol->supported = 0;
11601         wol->wolopts = 0;
11602         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11603                 wol->wolopts = WAKE_MAGIC;
11604         memset(&wol->sopass, 0, sizeof(wol->sopass));
11605 }
11606
11607 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11608 {
11609         struct tg3 *tp = netdev_priv(dev);
11610         struct device *dp = &tp->pdev->dev;
11611
11612         if (wol->wolopts & ~WAKE_MAGIC)
11613                 return -EINVAL;
11614         if ((wol->wolopts & WAKE_MAGIC) &&
11615             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11616                 return -EINVAL;
11617
11618         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11619
11620         spin_lock_bh(&tp->lock);
11621         if (device_may_wakeup(dp))
11622                 tg3_flag_set(tp, WOL_ENABLE);
11623         else
11624                 tg3_flag_clear(tp, WOL_ENABLE);
11625         spin_unlock_bh(&tp->lock);
11626
11627         return 0;
11628 }
11629
11630 static u32 tg3_get_msglevel(struct net_device *dev)
11631 {
11632         struct tg3 *tp = netdev_priv(dev);
11633         return tp->msg_enable;
11634 }
11635
11636 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11637 {
11638         struct tg3 *tp = netdev_priv(dev);
11639         tp->msg_enable = value;
11640 }
11641
11642 static int tg3_nway_reset(struct net_device *dev)
11643 {
11644         struct tg3 *tp = netdev_priv(dev);
11645         int r;
11646
11647         if (!netif_running(dev))
11648                 return -EAGAIN;
11649
11650         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11651                 return -EINVAL;
11652
11653         tg3_warn_mgmt_link_flap(tp);
11654
11655         if (tg3_flag(tp, USE_PHYLIB)) {
11656                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11657                         return -EAGAIN;
11658                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11659         } else {
11660                 u32 bmcr;
11661
11662                 spin_lock_bh(&tp->lock);
11663                 r = -EINVAL;
11664                 tg3_readphy(tp, MII_BMCR, &bmcr);
11665                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11666                     ((bmcr & BMCR_ANENABLE) ||
11667                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11668                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11669                                                    BMCR_ANENABLE);
11670                         r = 0;
11671                 }
11672                 spin_unlock_bh(&tp->lock);
11673         }
11674
11675         return r;
11676 }
11677
11678 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11679 {
11680         struct tg3 *tp = netdev_priv(dev);
11681
11682         ering->rx_max_pending = tp->rx_std_ring_mask;
11683         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11684                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11685         else
11686                 ering->rx_jumbo_max_pending = 0;
11687
11688         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11689
11690         ering->rx_pending = tp->rx_pending;
11691         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11692                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11693         else
11694                 ering->rx_jumbo_pending = 0;
11695
11696         ering->tx_pending = tp->napi[0].tx_pending;
11697 }
11698
11699 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11700 {
11701         struct tg3 *tp = netdev_priv(dev);
11702         int i, irq_sync = 0, err = 0;
11703
11704         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11705             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11706             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11707             (ering->tx_pending <= MAX_SKB_FRAGS) ||
11708             (tg3_flag(tp, TSO_BUG) &&
11709              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11710                 return -EINVAL;
11711
11712         if (netif_running(dev)) {
11713                 tg3_phy_stop(tp);
11714                 tg3_netif_stop(tp);
11715                 irq_sync = 1;
11716         }
11717
11718         tg3_full_lock(tp, irq_sync);
11719
11720         tp->rx_pending = ering->rx_pending;
11721
11722         if (tg3_flag(tp, MAX_RXPEND_64) &&
11723             tp->rx_pending > 63)
11724                 tp->rx_pending = 63;
11725         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11726
11727         for (i = 0; i < tp->irq_max; i++)
11728                 tp->napi[i].tx_pending = ering->tx_pending;
11729
11730         if (netif_running(dev)) {
11731                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11732                 err = tg3_restart_hw(tp, 0);
11733                 if (!err)
11734                         tg3_netif_start(tp);
11735         }
11736
11737         tg3_full_unlock(tp);
11738
11739         if (irq_sync && !err)
11740                 tg3_phy_start(tp);
11741
11742         return err;
11743 }
11744
11745 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11746 {
11747         struct tg3 *tp = netdev_priv(dev);
11748
11749         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11750
11751         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11752                 epause->rx_pause = 1;
11753         else
11754                 epause->rx_pause = 0;
11755
11756         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11757                 epause->tx_pause = 1;
11758         else
11759                 epause->tx_pause = 0;
11760 }
11761
11762 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11763 {
11764         struct tg3 *tp = netdev_priv(dev);
11765         int err = 0;
11766
11767         if (tp->link_config.autoneg == AUTONEG_ENABLE)
11768                 tg3_warn_mgmt_link_flap(tp);
11769
11770         if (tg3_flag(tp, USE_PHYLIB)) {
11771                 u32 newadv;
11772                 struct phy_device *phydev;
11773
11774                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11775
11776                 if (!(phydev->supported & SUPPORTED_Pause) ||
11777                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11778                      (epause->rx_pause != epause->tx_pause)))
11779                         return -EINVAL;
11780
11781                 tp->link_config.flowctrl = 0;
11782                 if (epause->rx_pause) {
11783                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11784
11785                         if (epause->tx_pause) {
11786                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11787                                 newadv = ADVERTISED_Pause;
11788                         } else
11789                                 newadv = ADVERTISED_Pause |
11790                                          ADVERTISED_Asym_Pause;
11791                 } else if (epause->tx_pause) {
11792                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11793                         newadv = ADVERTISED_Asym_Pause;
11794                 } else
11795                         newadv = 0;
11796
11797                 if (epause->autoneg)
11798                         tg3_flag_set(tp, PAUSE_AUTONEG);
11799                 else
11800                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11801
11802                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11803                         u32 oldadv = phydev->advertising &
11804                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11805                         if (oldadv != newadv) {
11806                                 phydev->advertising &=
11807                                         ~(ADVERTISED_Pause |
11808                                           ADVERTISED_Asym_Pause);
11809                                 phydev->advertising |= newadv;
11810                                 if (phydev->autoneg) {
11811                                         /*
11812                                          * Always renegotiate the link to
11813                                          * inform our link partner of our
11814                                          * flow control settings, even if the
11815                                          * flow control is forced.  Let
11816                                          * tg3_adjust_link() do the final
11817                                          * flow control setup.
11818                                          */
11819                                         return phy_start_aneg(phydev);
11820                                 }
11821                         }
11822
11823                         if (!epause->autoneg)
11824                                 tg3_setup_flow_control(tp, 0, 0);
11825                 } else {
11826                         tp->link_config.advertising &=
11827                                         ~(ADVERTISED_Pause |
11828                                           ADVERTISED_Asym_Pause);
11829                         tp->link_config.advertising |= newadv;
11830                 }
11831         } else {
11832                 int irq_sync = 0;
11833
11834                 if (netif_running(dev)) {
11835                         tg3_netif_stop(tp);
11836                         irq_sync = 1;
11837                 }
11838
11839                 tg3_full_lock(tp, irq_sync);
11840
11841                 if (epause->autoneg)
11842                         tg3_flag_set(tp, PAUSE_AUTONEG);
11843                 else
11844                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11845                 if (epause->rx_pause)
11846                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11847                 else
11848                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11849                 if (epause->tx_pause)
11850                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11851                 else
11852                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11853
11854                 if (netif_running(dev)) {
11855                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11856                         err = tg3_restart_hw(tp, 0);
11857                         if (!err)
11858                                 tg3_netif_start(tp);
11859                 }
11860
11861                 tg3_full_unlock(tp);
11862         }
11863
11864         return err;
11865 }
11866
11867 static int tg3_get_sset_count(struct net_device *dev, int sset)
11868 {
11869         switch (sset) {
11870         case ETH_SS_TEST:
11871                 return TG3_NUM_TEST;
11872         case ETH_SS_STATS:
11873                 return TG3_NUM_STATS;
11874         default:
11875                 return -EOPNOTSUPP;
11876         }
11877 }
11878
11879 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11880                          u32 *rules __always_unused)
11881 {
11882         struct tg3 *tp = netdev_priv(dev);
11883
11884         if (!tg3_flag(tp, SUPPORT_MSIX))
11885                 return -EOPNOTSUPP;
11886
11887         switch (info->cmd) {
11888         case ETHTOOL_GRXRINGS:
11889                 if (netif_running(tp->dev))
11890                         info->data = tp->rxq_cnt;
11891                 else {
11892                         info->data = num_online_cpus();
11893                         if (info->data > TG3_RSS_MAX_NUM_QS)
11894                                 info->data = TG3_RSS_MAX_NUM_QS;
11895                 }
11896
11897                 /* The first interrupt vector only
11898                  * handles link interrupts.
11899                  */
11900                 info->data -= 1;
11901                 return 0;
11902
11903         default:
11904                 return -EOPNOTSUPP;
11905         }
11906 }
11907
11908 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11909 {
11910         u32 size = 0;
11911         struct tg3 *tp = netdev_priv(dev);
11912
11913         if (tg3_flag(tp, SUPPORT_MSIX))
11914                 size = TG3_RSS_INDIR_TBL_SIZE;
11915
11916         return size;
11917 }
11918
11919 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11920 {
11921         struct tg3 *tp = netdev_priv(dev);
11922         int i;
11923
11924         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11925                 indir[i] = tp->rss_ind_tbl[i];
11926
11927         return 0;
11928 }
11929
11930 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11931 {
11932         struct tg3 *tp = netdev_priv(dev);
11933         size_t i;
11934
11935         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11936                 tp->rss_ind_tbl[i] = indir[i];
11937
11938         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11939                 return 0;
11940
11941         /* It is legal to write the indirection
11942          * table while the device is running.
11943          */
11944         tg3_full_lock(tp, 0);
11945         tg3_rss_write_indir_tbl(tp);
11946         tg3_full_unlock(tp);
11947
11948         return 0;
11949 }
11950
11951 static void tg3_get_channels(struct net_device *dev,
11952                              struct ethtool_channels *channel)
11953 {
11954         struct tg3 *tp = netdev_priv(dev);
11955         u32 deflt_qs = netif_get_num_default_rss_queues();
11956
11957         channel->max_rx = tp->rxq_max;
11958         channel->max_tx = tp->txq_max;
11959
11960         if (netif_running(dev)) {
11961                 channel->rx_count = tp->rxq_cnt;
11962                 channel->tx_count = tp->txq_cnt;
11963         } else {
11964                 if (tp->rxq_req)
11965                         channel->rx_count = tp->rxq_req;
11966                 else
11967                         channel->rx_count = min(deflt_qs, tp->rxq_max);
11968
11969                 if (tp->txq_req)
11970                         channel->tx_count = tp->txq_req;
11971                 else
11972                         channel->tx_count = min(deflt_qs, tp->txq_max);
11973         }
11974 }
11975
11976 static int tg3_set_channels(struct net_device *dev,
11977                             struct ethtool_channels *channel)
11978 {
11979         struct tg3 *tp = netdev_priv(dev);
11980
11981         if (!tg3_flag(tp, SUPPORT_MSIX))
11982                 return -EOPNOTSUPP;
11983
11984         if (channel->rx_count > tp->rxq_max ||
11985             channel->tx_count > tp->txq_max)
11986                 return -EINVAL;
11987
11988         tp->rxq_req = channel->rx_count;
11989         tp->txq_req = channel->tx_count;
11990
11991         if (!netif_running(dev))
11992                 return 0;
11993
11994         tg3_stop(tp);
11995
11996         tg3_carrier_off(tp);
11997
11998         tg3_start(tp, true, false, false);
11999
12000         return 0;
12001 }
12002
12003 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12004 {
12005         switch (stringset) {
12006         case ETH_SS_STATS:
12007                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12008                 break;
12009         case ETH_SS_TEST:
12010                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12011                 break;
12012         default:
12013                 WARN_ON(1);     /* we need a WARN() */
12014                 break;
12015         }
12016 }
12017
12018 static int tg3_set_phys_id(struct net_device *dev,
12019                             enum ethtool_phys_id_state state)
12020 {
12021         struct tg3 *tp = netdev_priv(dev);
12022
12023         if (!netif_running(tp->dev))
12024                 return -EAGAIN;
12025
12026         switch (state) {
12027         case ETHTOOL_ID_ACTIVE:
12028                 return 1;       /* cycle on/off once per second */
12029
12030         case ETHTOOL_ID_ON:
12031                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12032                      LED_CTRL_1000MBPS_ON |
12033                      LED_CTRL_100MBPS_ON |
12034                      LED_CTRL_10MBPS_ON |
12035                      LED_CTRL_TRAFFIC_OVERRIDE |
12036                      LED_CTRL_TRAFFIC_BLINK |
12037                      LED_CTRL_TRAFFIC_LED);
12038                 break;
12039
12040         case ETHTOOL_ID_OFF:
12041                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12042                      LED_CTRL_TRAFFIC_OVERRIDE);
12043                 break;
12044
12045         case ETHTOOL_ID_INACTIVE:
12046                 tw32(MAC_LED_CTRL, tp->led_ctrl);
12047                 break;
12048         }
12049
12050         return 0;
12051 }
12052
12053 static void tg3_get_ethtool_stats(struct net_device *dev,
12054                                    struct ethtool_stats *estats, u64 *tmp_stats)
12055 {
12056         struct tg3 *tp = netdev_priv(dev);
12057
12058         if (tp->hw_stats)
12059                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12060         else
12061                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12062 }
12063
12064 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12065 {
12066         int i;
12067         __be32 *buf;
12068         u32 offset = 0, len = 0;
12069         u32 magic, val;
12070
12071         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12072                 return NULL;
12073
12074         if (magic == TG3_EEPROM_MAGIC) {
12075                 for (offset = TG3_NVM_DIR_START;
12076                      offset < TG3_NVM_DIR_END;
12077                      offset += TG3_NVM_DIRENT_SIZE) {
12078                         if (tg3_nvram_read(tp, offset, &val))
12079                                 return NULL;
12080
12081                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12082                             TG3_NVM_DIRTYPE_EXTVPD)
12083                                 break;
12084                 }
12085
12086                 if (offset != TG3_NVM_DIR_END) {
12087                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12088                         if (tg3_nvram_read(tp, offset + 4, &offset))
12089                                 return NULL;
12090
12091                         offset = tg3_nvram_logical_addr(tp, offset);
12092                 }
12093         }
12094
12095         if (!offset || !len) {
12096                 offset = TG3_NVM_VPD_OFF;
12097                 len = TG3_NVM_VPD_LEN;
12098         }
12099
12100         buf = kmalloc(len, GFP_KERNEL);
12101         if (buf == NULL)
12102                 return NULL;
12103
12104         if (magic == TG3_EEPROM_MAGIC) {
12105                 for (i = 0; i < len; i += 4) {
12106                         /* The data is in little-endian format in NVRAM.
12107                          * Use the big-endian read routines to preserve
12108                          * the byte order as it exists in NVRAM.
12109                          */
12110                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12111                                 goto error;
12112                 }
12113         } else {
12114                 u8 *ptr;
12115                 ssize_t cnt;
12116                 unsigned int pos = 0;
12117
12118                 ptr = (u8 *)&buf[0];
12119                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12120                         cnt = pci_read_vpd(tp->pdev, pos,
12121                                            len - pos, ptr);
12122                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
12123                                 cnt = 0;
12124                         else if (cnt < 0)
12125                                 goto error;
12126                 }
12127                 if (pos != len)
12128                         goto error;
12129         }
12130
12131         *vpdlen = len;
12132
12133         return buf;
12134
12135 error:
12136         kfree(buf);
12137         return NULL;
12138 }
12139
12140 #define NVRAM_TEST_SIZE 0x100
12141 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
12142 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
12143 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
12144 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
12145 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
12146 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
12147 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12148 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12149
12150 static int tg3_test_nvram(struct tg3 *tp)
12151 {
12152         u32 csum, magic, len;
12153         __be32 *buf;
12154         int i, j, k, err = 0, size;
12155
12156         if (tg3_flag(tp, NO_NVRAM))
12157                 return 0;
12158
12159         if (tg3_nvram_read(tp, 0, &magic) != 0)
12160                 return -EIO;
12161
12162         if (magic == TG3_EEPROM_MAGIC)
12163                 size = NVRAM_TEST_SIZE;
12164         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12165                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12166                     TG3_EEPROM_SB_FORMAT_1) {
12167                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12168                         case TG3_EEPROM_SB_REVISION_0:
12169                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12170                                 break;
12171                         case TG3_EEPROM_SB_REVISION_2:
12172                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12173                                 break;
12174                         case TG3_EEPROM_SB_REVISION_3:
12175                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12176                                 break;
12177                         case TG3_EEPROM_SB_REVISION_4:
12178                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12179                                 break;
12180                         case TG3_EEPROM_SB_REVISION_5:
12181                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12182                                 break;
12183                         case TG3_EEPROM_SB_REVISION_6:
12184                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12185                                 break;
12186                         default:
12187                                 return -EIO;
12188                         }
12189                 } else
12190                         return 0;
12191         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12192                 size = NVRAM_SELFBOOT_HW_SIZE;
12193         else
12194                 return -EIO;
12195
12196         buf = kmalloc(size, GFP_KERNEL);
12197         if (buf == NULL)
12198                 return -ENOMEM;
12199
12200         err = -EIO;
12201         for (i = 0, j = 0; i < size; i += 4, j++) {
12202                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12203                 if (err)
12204                         break;
12205         }
12206         if (i < size)
12207                 goto out;
12208
12209         /* Selfboot format */
12210         magic = be32_to_cpu(buf[0]);
12211         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12212             TG3_EEPROM_MAGIC_FW) {
12213                 u8 *buf8 = (u8 *) buf, csum8 = 0;
12214
12215                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12216                     TG3_EEPROM_SB_REVISION_2) {
12217                         /* For rev 2, the csum doesn't include the MBA. */
12218                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12219                                 csum8 += buf8[i];
12220                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12221                                 csum8 += buf8[i];
12222                 } else {
12223                         for (i = 0; i < size; i++)
12224                                 csum8 += buf8[i];
12225                 }
12226
12227                 if (csum8 == 0) {
12228                         err = 0;
12229                         goto out;
12230                 }
12231
12232                 err = -EIO;
12233                 goto out;
12234         }
12235
12236         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12237             TG3_EEPROM_MAGIC_HW) {
12238                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12239                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12240                 u8 *buf8 = (u8 *) buf;
12241
12242                 /* Separate the parity bits and the data bytes.  */
12243                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12244                         if ((i == 0) || (i == 8)) {
12245                                 int l;
12246                                 u8 msk;
12247
12248                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12249                                         parity[k++] = buf8[i] & msk;
12250                                 i++;
12251                         } else if (i == 16) {
12252                                 int l;
12253                                 u8 msk;
12254
12255                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12256                                         parity[k++] = buf8[i] & msk;
12257                                 i++;
12258
12259                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12260                                         parity[k++] = buf8[i] & msk;
12261                                 i++;
12262                         }
12263                         data[j++] = buf8[i];
12264                 }
12265
12266                 err = -EIO;
12267                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12268                         u8 hw8 = hweight8(data[i]);
12269
12270                         if ((hw8 & 0x1) && parity[i])
12271                                 goto out;
12272                         else if (!(hw8 & 0x1) && !parity[i])
12273                                 goto out;
12274                 }
12275                 err = 0;
12276                 goto out;
12277         }
12278
12279         err = -EIO;
12280
12281         /* Bootstrap checksum at offset 0x10 */
12282         csum = calc_crc((unsigned char *) buf, 0x10);
12283         if (csum != le32_to_cpu(buf[0x10/4]))
12284                 goto out;
12285
12286         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12287         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12288         if (csum != le32_to_cpu(buf[0xfc/4]))
12289                 goto out;
12290
12291         kfree(buf);
12292
12293         buf = tg3_vpd_readblock(tp, &len);
12294         if (!buf)
12295                 return -ENOMEM;
12296
12297         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12298         if (i > 0) {
12299                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12300                 if (j < 0)
12301                         goto out;
12302
12303                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12304                         goto out;
12305
12306                 i += PCI_VPD_LRDT_TAG_SIZE;
12307                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12308                                               PCI_VPD_RO_KEYWORD_CHKSUM);
12309                 if (j > 0) {
12310                         u8 csum8 = 0;
12311
12312                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
12313
12314                         for (i = 0; i <= j; i++)
12315                                 csum8 += ((u8 *)buf)[i];
12316
12317                         if (csum8)
12318                                 goto out;
12319                 }
12320         }
12321
12322         err = 0;
12323
12324 out:
12325         kfree(buf);
12326         return err;
12327 }
12328
12329 #define TG3_SERDES_TIMEOUT_SEC  2
12330 #define TG3_COPPER_TIMEOUT_SEC  6
12331
12332 static int tg3_test_link(struct tg3 *tp)
12333 {
12334         int i, max;
12335
12336         if (!netif_running(tp->dev))
12337                 return -ENODEV;
12338
12339         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12340                 max = TG3_SERDES_TIMEOUT_SEC;
12341         else
12342                 max = TG3_COPPER_TIMEOUT_SEC;
12343
12344         for (i = 0; i < max; i++) {
12345                 if (tp->link_up)
12346                         return 0;
12347
12348                 if (msleep_interruptible(1000))
12349                         break;
12350         }
12351
12352         return -EIO;
12353 }
12354
12355 /* Only test the commonly used registers */
12356 static int tg3_test_registers(struct tg3 *tp)
12357 {
12358         int i, is_5705, is_5750;
12359         u32 offset, read_mask, write_mask, val, save_val, read_val;
12360         static struct {
12361                 u16 offset;
12362                 u16 flags;
12363 #define TG3_FL_5705     0x1
12364 #define TG3_FL_NOT_5705 0x2
12365 #define TG3_FL_NOT_5788 0x4
12366 #define TG3_FL_NOT_5750 0x8
12367                 u32 read_mask;
12368                 u32 write_mask;
12369         } reg_tbl[] = {
12370                 /* MAC Control Registers */
12371                 { MAC_MODE, TG3_FL_NOT_5705,
12372                         0x00000000, 0x00ef6f8c },
12373                 { MAC_MODE, TG3_FL_5705,
12374                         0x00000000, 0x01ef6b8c },
12375                 { MAC_STATUS, TG3_FL_NOT_5705,
12376                         0x03800107, 0x00000000 },
12377                 { MAC_STATUS, TG3_FL_5705,
12378                         0x03800100, 0x00000000 },
12379                 { MAC_ADDR_0_HIGH, 0x0000,
12380                         0x00000000, 0x0000ffff },
12381                 { MAC_ADDR_0_LOW, 0x0000,
12382                         0x00000000, 0xffffffff },
12383                 { MAC_RX_MTU_SIZE, 0x0000,
12384                         0x00000000, 0x0000ffff },
12385                 { MAC_TX_MODE, 0x0000,
12386                         0x00000000, 0x00000070 },
12387                 { MAC_TX_LENGTHS, 0x0000,
12388                         0x00000000, 0x00003fff },
12389                 { MAC_RX_MODE, TG3_FL_NOT_5705,
12390                         0x00000000, 0x000007fc },
12391                 { MAC_RX_MODE, TG3_FL_5705,
12392                         0x00000000, 0x000007dc },
12393                 { MAC_HASH_REG_0, 0x0000,
12394                         0x00000000, 0xffffffff },
12395                 { MAC_HASH_REG_1, 0x0000,
12396                         0x00000000, 0xffffffff },
12397                 { MAC_HASH_REG_2, 0x0000,
12398                         0x00000000, 0xffffffff },
12399                 { MAC_HASH_REG_3, 0x0000,
12400                         0x00000000, 0xffffffff },
12401
12402                 /* Receive Data and Receive BD Initiator Control Registers. */
12403                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12404                         0x00000000, 0xffffffff },
12405                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12406                         0x00000000, 0xffffffff },
12407                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12408                         0x00000000, 0x00000003 },
12409                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12410                         0x00000000, 0xffffffff },
12411                 { RCVDBDI_STD_BD+0, 0x0000,
12412                         0x00000000, 0xffffffff },
12413                 { RCVDBDI_STD_BD+4, 0x0000,
12414                         0x00000000, 0xffffffff },
12415                 { RCVDBDI_STD_BD+8, 0x0000,
12416                         0x00000000, 0xffff0002 },
12417                 { RCVDBDI_STD_BD+0xc, 0x0000,
12418                         0x00000000, 0xffffffff },
12419
12420                 /* Receive BD Initiator Control Registers. */
12421                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12422                         0x00000000, 0xffffffff },
12423                 { RCVBDI_STD_THRESH, TG3_FL_5705,
12424                         0x00000000, 0x000003ff },
12425                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12426                         0x00000000, 0xffffffff },
12427
12428                 /* Host Coalescing Control Registers. */
12429                 { HOSTCC_MODE, TG3_FL_NOT_5705,
12430                         0x00000000, 0x00000004 },
12431                 { HOSTCC_MODE, TG3_FL_5705,
12432                         0x00000000, 0x000000f6 },
12433                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12434                         0x00000000, 0xffffffff },
12435                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12436                         0x00000000, 0x000003ff },
12437                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12438                         0x00000000, 0xffffffff },
12439                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12440                         0x00000000, 0x000003ff },
12441                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12442                         0x00000000, 0xffffffff },
12443                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12444                         0x00000000, 0x000000ff },
12445                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12446                         0x00000000, 0xffffffff },
12447                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12448                         0x00000000, 0x000000ff },
12449                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12450                         0x00000000, 0xffffffff },
12451                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12452                         0x00000000, 0xffffffff },
12453                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12454                         0x00000000, 0xffffffff },
12455                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12456                         0x00000000, 0x000000ff },
12457                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12458                         0x00000000, 0xffffffff },
12459                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12460                         0x00000000, 0x000000ff },
12461                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12462                         0x00000000, 0xffffffff },
12463                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12464                         0x00000000, 0xffffffff },
12465                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12466                         0x00000000, 0xffffffff },
12467                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12468                         0x00000000, 0xffffffff },
12469                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12470                         0x00000000, 0xffffffff },
12471                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12472                         0xffffffff, 0x00000000 },
12473                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12474                         0xffffffff, 0x00000000 },
12475
12476                 /* Buffer Manager Control Registers. */
12477                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12478                         0x00000000, 0x007fff80 },
12479                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12480                         0x00000000, 0x007fffff },
12481                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12482                         0x00000000, 0x0000003f },
12483                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12484                         0x00000000, 0x000001ff },
12485                 { BUFMGR_MB_HIGH_WATER, 0x0000,
12486                         0x00000000, 0x000001ff },
12487                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12488                         0xffffffff, 0x00000000 },
12489                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12490                         0xffffffff, 0x00000000 },
12491
12492                 /* Mailbox Registers */
12493                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12494                         0x00000000, 0x000001ff },
12495                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12496                         0x00000000, 0x000001ff },
12497                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12498                         0x00000000, 0x000007ff },
12499                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12500                         0x00000000, 0x000001ff },
12501
12502                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12503         };
12504
12505         is_5705 = is_5750 = 0;
12506         if (tg3_flag(tp, 5705_PLUS)) {
12507                 is_5705 = 1;
12508                 if (tg3_flag(tp, 5750_PLUS))
12509                         is_5750 = 1;
12510         }
12511
12512         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12513                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12514                         continue;
12515
12516                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12517                         continue;
12518
12519                 if (tg3_flag(tp, IS_5788) &&
12520                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
12521                         continue;
12522
12523                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12524                         continue;
12525
12526                 offset = (u32) reg_tbl[i].offset;
12527                 read_mask = reg_tbl[i].read_mask;
12528                 write_mask = reg_tbl[i].write_mask;
12529
12530                 /* Save the original register content */
12531                 save_val = tr32(offset);
12532
12533                 /* Determine the read-only value. */
12534                 read_val = save_val & read_mask;
12535
12536                 /* Write zero to the register, then make sure the read-only bits
12537                  * are not changed and the read/write bits are all zeros.
12538                  */
12539                 tw32(offset, 0);
12540
12541                 val = tr32(offset);
12542
12543                 /* Test the read-only and read/write bits. */
12544                 if (((val & read_mask) != read_val) || (val & write_mask))
12545                         goto out;
12546
12547                 /* Write ones to all the bits defined by RdMask and WrMask, then
12548                  * make sure the read-only bits are not changed and the
12549                  * read/write bits are all ones.
12550                  */
12551                 tw32(offset, read_mask | write_mask);
12552
12553                 val = tr32(offset);
12554
12555                 /* Test the read-only bits. */
12556                 if ((val & read_mask) != read_val)
12557                         goto out;
12558
12559                 /* Test the read/write bits. */
12560                 if ((val & write_mask) != write_mask)
12561                         goto out;
12562
12563                 tw32(offset, save_val);
12564         }
12565
12566         return 0;
12567
12568 out:
12569         if (netif_msg_hw(tp))
12570                 netdev_err(tp->dev,
12571                            "Register test failed at offset %x\n", offset);
12572         tw32(offset, save_val);
12573         return -EIO;
12574 }
12575
12576 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12577 {
12578         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12579         int i;
12580         u32 j;
12581
12582         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12583                 for (j = 0; j < len; j += 4) {
12584                         u32 val;
12585
12586                         tg3_write_mem(tp, offset + j, test_pattern[i]);
12587                         tg3_read_mem(tp, offset + j, &val);
12588                         if (val != test_pattern[i])
12589                                 return -EIO;
12590                 }
12591         }
12592         return 0;
12593 }
12594
12595 static int tg3_test_memory(struct tg3 *tp)
12596 {
12597         static struct mem_entry {
12598                 u32 offset;
12599                 u32 len;
12600         } mem_tbl_570x[] = {
12601                 { 0x00000000, 0x00b50},
12602                 { 0x00002000, 0x1c000},
12603                 { 0xffffffff, 0x00000}
12604         }, mem_tbl_5705[] = {
12605                 { 0x00000100, 0x0000c},
12606                 { 0x00000200, 0x00008},
12607                 { 0x00004000, 0x00800},
12608                 { 0x00006000, 0x01000},
12609                 { 0x00008000, 0x02000},
12610                 { 0x00010000, 0x0e000},
12611                 { 0xffffffff, 0x00000}
12612         }, mem_tbl_5755[] = {
12613                 { 0x00000200, 0x00008},
12614                 { 0x00004000, 0x00800},
12615                 { 0x00006000, 0x00800},
12616                 { 0x00008000, 0x02000},
12617                 { 0x00010000, 0x0c000},
12618                 { 0xffffffff, 0x00000}
12619         }, mem_tbl_5906[] = {
12620                 { 0x00000200, 0x00008},
12621                 { 0x00004000, 0x00400},
12622                 { 0x00006000, 0x00400},
12623                 { 0x00008000, 0x01000},
12624                 { 0x00010000, 0x01000},
12625                 { 0xffffffff, 0x00000}
12626         }, mem_tbl_5717[] = {
12627                 { 0x00000200, 0x00008},
12628                 { 0x00010000, 0x0a000},
12629                 { 0x00020000, 0x13c00},
12630                 { 0xffffffff, 0x00000}
12631         }, mem_tbl_57765[] = {
12632                 { 0x00000200, 0x00008},
12633                 { 0x00004000, 0x00800},
12634                 { 0x00006000, 0x09800},
12635                 { 0x00010000, 0x0a000},
12636                 { 0xffffffff, 0x00000}
12637         };
12638         struct mem_entry *mem_tbl;
12639         int err = 0;
12640         int i;
12641
12642         if (tg3_flag(tp, 5717_PLUS))
12643                 mem_tbl = mem_tbl_5717;
12644         else if (tg3_flag(tp, 57765_CLASS) ||
12645                  tg3_asic_rev(tp) == ASIC_REV_5762)
12646                 mem_tbl = mem_tbl_57765;
12647         else if (tg3_flag(tp, 5755_PLUS))
12648                 mem_tbl = mem_tbl_5755;
12649         else if (tg3_asic_rev(tp) == ASIC_REV_5906)
12650                 mem_tbl = mem_tbl_5906;
12651         else if (tg3_flag(tp, 5705_PLUS))
12652                 mem_tbl = mem_tbl_5705;
12653         else
12654                 mem_tbl = mem_tbl_570x;
12655
12656         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12657                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12658                 if (err)
12659                         break;
12660         }
12661
12662         return err;
12663 }
12664
12665 #define TG3_TSO_MSS             500
12666
12667 #define TG3_TSO_IP_HDR_LEN      20
12668 #define TG3_TSO_TCP_HDR_LEN     20
12669 #define TG3_TSO_TCP_OPT_LEN     12
12670
12671 static const u8 tg3_tso_header[] = {
12672 0x08, 0x00,
12673 0x45, 0x00, 0x00, 0x00,
12674 0x00, 0x00, 0x40, 0x00,
12675 0x40, 0x06, 0x00, 0x00,
12676 0x0a, 0x00, 0x00, 0x01,
12677 0x0a, 0x00, 0x00, 0x02,
12678 0x0d, 0x00, 0xe0, 0x00,
12679 0x00, 0x00, 0x01, 0x00,
12680 0x00, 0x00, 0x02, 0x00,
12681 0x80, 0x10, 0x10, 0x00,
12682 0x14, 0x09, 0x00, 0x00,
12683 0x01, 0x01, 0x08, 0x0a,
12684 0x11, 0x11, 0x11, 0x11,
12685 0x11, 0x11, 0x11, 0x11,
12686 };
12687
12688 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12689 {
12690         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12691         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12692         u32 budget;
12693         struct sk_buff *skb;
12694         u8 *tx_data, *rx_data;
12695         dma_addr_t map;
12696         int num_pkts, tx_len, rx_len, i, err;
12697         struct tg3_rx_buffer_desc *desc;
12698         struct tg3_napi *tnapi, *rnapi;
12699         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12700
12701         tnapi = &tp->napi[0];
12702         rnapi = &tp->napi[0];
12703         if (tp->irq_cnt > 1) {
12704                 if (tg3_flag(tp, ENABLE_RSS))
12705                         rnapi = &tp->napi[1];
12706                 if (tg3_flag(tp, ENABLE_TSS))
12707                         tnapi = &tp->napi[1];
12708         }
12709         coal_now = tnapi->coal_now | rnapi->coal_now;
12710
12711         err = -EIO;
12712
12713         tx_len = pktsz;
12714         skb = netdev_alloc_skb(tp->dev, tx_len);
12715         if (!skb)
12716                 return -ENOMEM;
12717
12718         tx_data = skb_put(skb, tx_len);
12719         memcpy(tx_data, tp->dev->dev_addr, 6);
12720         memset(tx_data + 6, 0x0, 8);
12721
12722         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12723
12724         if (tso_loopback) {
12725                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12726
12727                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12728                               TG3_TSO_TCP_OPT_LEN;
12729
12730                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12731                        sizeof(tg3_tso_header));
12732                 mss = TG3_TSO_MSS;
12733
12734                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12735                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12736
12737                 /* Set the total length field in the IP header */
12738                 iph->tot_len = htons((u16)(mss + hdr_len));
12739
12740                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12741                               TXD_FLAG_CPU_POST_DMA);
12742
12743                 if (tg3_flag(tp, HW_TSO_1) ||
12744                     tg3_flag(tp, HW_TSO_2) ||
12745                     tg3_flag(tp, HW_TSO_3)) {
12746                         struct tcphdr *th;
12747                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12748                         th = (struct tcphdr *)&tx_data[val];
12749                         th->check = 0;
12750                 } else
12751                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
12752
12753                 if (tg3_flag(tp, HW_TSO_3)) {
12754                         mss |= (hdr_len & 0xc) << 12;
12755                         if (hdr_len & 0x10)
12756                                 base_flags |= 0x00000010;
12757                         base_flags |= (hdr_len & 0x3e0) << 5;
12758                 } else if (tg3_flag(tp, HW_TSO_2))
12759                         mss |= hdr_len << 9;
12760                 else if (tg3_flag(tp, HW_TSO_1) ||
12761                          tg3_asic_rev(tp) == ASIC_REV_5705) {
12762                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12763                 } else {
12764                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12765                 }
12766
12767                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12768         } else {
12769                 num_pkts = 1;
12770                 data_off = ETH_HLEN;
12771
12772                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12773                     tx_len > VLAN_ETH_FRAME_LEN)
12774                         base_flags |= TXD_FLAG_JMB_PKT;
12775         }
12776
12777         for (i = data_off; i < tx_len; i++)
12778                 tx_data[i] = (u8) (i & 0xff);
12779
12780         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12781         if (pci_dma_mapping_error(tp->pdev, map)) {
12782                 dev_kfree_skb(skb);
12783                 return -EIO;
12784         }
12785
12786         val = tnapi->tx_prod;
12787         tnapi->tx_buffers[val].skb = skb;
12788         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12789
12790         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12791                rnapi->coal_now);
12792
12793         udelay(10);
12794
12795         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12796
12797         budget = tg3_tx_avail(tnapi);
12798         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12799                             base_flags | TXD_FLAG_END, mss, 0)) {
12800                 tnapi->tx_buffers[val].skb = NULL;
12801                 dev_kfree_skb(skb);
12802                 return -EIO;
12803         }
12804
12805         tnapi->tx_prod++;
12806
12807         /* Sync BD data before updating mailbox */
12808         wmb();
12809
12810         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12811         tr32_mailbox(tnapi->prodmbox);
12812
12813         udelay(10);
12814
12815         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
12816         for (i = 0; i < 35; i++) {
12817                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12818                        coal_now);
12819
12820                 udelay(10);
12821
12822                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12823                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
12824                 if ((tx_idx == tnapi->tx_prod) &&
12825                     (rx_idx == (rx_start_idx + num_pkts)))
12826                         break;
12827         }
12828
12829         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12830         dev_kfree_skb(skb);
12831
12832         if (tx_idx != tnapi->tx_prod)
12833                 goto out;
12834
12835         if (rx_idx != rx_start_idx + num_pkts)
12836                 goto out;
12837
12838         val = data_off;
12839         while (rx_idx != rx_start_idx) {
12840                 desc = &rnapi->rx_rcb[rx_start_idx++];
12841                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12842                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12843
12844                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12845                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12846                         goto out;
12847
12848                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12849                          - ETH_FCS_LEN;
12850
12851                 if (!tso_loopback) {
12852                         if (rx_len != tx_len)
12853                                 goto out;
12854
12855                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12856                                 if (opaque_key != RXD_OPAQUE_RING_STD)
12857                                         goto out;
12858                         } else {
12859                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12860                                         goto out;
12861                         }
12862                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12863                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12864                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
12865                         goto out;
12866                 }
12867
12868                 if (opaque_key == RXD_OPAQUE_RING_STD) {
12869                         rx_data = tpr->rx_std_buffers[desc_idx].data;
12870                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12871                                              mapping);
12872                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12873                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12874                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12875                                              mapping);
12876                 } else
12877                         goto out;
12878
12879                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12880                                             PCI_DMA_FROMDEVICE);
12881
12882                 rx_data += TG3_RX_OFFSET(tp);
12883                 for (i = data_off; i < rx_len; i++, val++) {
12884                         if (*(rx_data + i) != (u8) (val & 0xff))
12885                                 goto out;
12886                 }
12887         }
12888
12889         err = 0;
12890
12891         /* tg3_free_rings will unmap and free the rx_data */
12892 out:
12893         return err;
12894 }
12895
12896 #define TG3_STD_LOOPBACK_FAILED         1
12897 #define TG3_JMB_LOOPBACK_FAILED         2
12898 #define TG3_TSO_LOOPBACK_FAILED         4
12899 #define TG3_LOOPBACK_FAILED \
12900         (TG3_STD_LOOPBACK_FAILED | \
12901          TG3_JMB_LOOPBACK_FAILED | \
12902          TG3_TSO_LOOPBACK_FAILED)
12903
12904 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12905 {
12906         int err = -EIO;
12907         u32 eee_cap;
12908         u32 jmb_pkt_sz = 9000;
12909
12910         if (tp->dma_limit)
12911                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12912
12913         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12914         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12915
12916         if (!netif_running(tp->dev)) {
12917                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12918                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12919                 if (do_extlpbk)
12920                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12921                 goto done;
12922         }
12923
12924         err = tg3_reset_hw(tp, 1);
12925         if (err) {
12926                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12927                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12928                 if (do_extlpbk)
12929                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12930                 goto done;
12931         }
12932
12933         if (tg3_flag(tp, ENABLE_RSS)) {
12934                 int i;
12935
12936                 /* Reroute all rx packets to the 1st queue */
12937                 for (i = MAC_RSS_INDIR_TBL_0;
12938                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12939                         tw32(i, 0x0);
12940         }
12941
12942         /* HW errata - mac loopback fails in some cases on 5780.
12943          * Normal traffic and PHY loopback are not affected by
12944          * errata.  Also, the MAC loopback test is deprecated for
12945          * all newer ASIC revisions.
12946          */
12947         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
12948             !tg3_flag(tp, CPMU_PRESENT)) {
12949                 tg3_mac_loopback(tp, true);
12950
12951                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12952                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12953
12954                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12955                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12956                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12957
12958                 tg3_mac_loopback(tp, false);
12959         }
12960
12961         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12962             !tg3_flag(tp, USE_PHYLIB)) {
12963                 int i;
12964
12965                 tg3_phy_lpbk_set(tp, 0, false);
12966
12967                 /* Wait for link */
12968                 for (i = 0; i < 100; i++) {
12969                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12970                                 break;
12971                         mdelay(1);
12972                 }
12973
12974                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12975                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12976                 if (tg3_flag(tp, TSO_CAPABLE) &&
12977                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12978                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
12979                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12980                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12981                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12982
12983                 if (do_extlpbk) {
12984                         tg3_phy_lpbk_set(tp, 0, true);
12985
12986                         /* All link indications report up, but the hardware
12987                          * isn't really ready for about 20 msec.  Double it
12988                          * to be sure.
12989                          */
12990                         mdelay(40);
12991
12992                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12993                                 data[TG3_EXT_LOOPB_TEST] |=
12994                                                         TG3_STD_LOOPBACK_FAILED;
12995                         if (tg3_flag(tp, TSO_CAPABLE) &&
12996                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12997                                 data[TG3_EXT_LOOPB_TEST] |=
12998                                                         TG3_TSO_LOOPBACK_FAILED;
12999                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13000                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13001                                 data[TG3_EXT_LOOPB_TEST] |=
13002                                                         TG3_JMB_LOOPBACK_FAILED;
13003                 }
13004
13005                 /* Re-enable gphy autopowerdown. */
13006                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13007                         tg3_phy_toggle_apd(tp, true);
13008         }
13009
13010         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13011                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13012
13013 done:
13014         tp->phy_flags |= eee_cap;
13015
13016         return err;
13017 }
13018
13019 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13020                           u64 *data)
13021 {
13022         struct tg3 *tp = netdev_priv(dev);
13023         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13024
13025         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
13026             tg3_power_up(tp)) {
13027                 etest->flags |= ETH_TEST_FL_FAILED;
13028                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13029                 return;
13030         }
13031
13032         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13033
13034         if (tg3_test_nvram(tp) != 0) {
13035                 etest->flags |= ETH_TEST_FL_FAILED;
13036                 data[TG3_NVRAM_TEST] = 1;
13037         }
13038         if (!doextlpbk && tg3_test_link(tp)) {
13039                 etest->flags |= ETH_TEST_FL_FAILED;
13040                 data[TG3_LINK_TEST] = 1;
13041         }
13042         if (etest->flags & ETH_TEST_FL_OFFLINE) {
13043                 int err, err2 = 0, irq_sync = 0;
13044
13045                 if (netif_running(dev)) {
13046                         tg3_phy_stop(tp);
13047                         tg3_netif_stop(tp);
13048                         irq_sync = 1;
13049                 }
13050
13051                 tg3_full_lock(tp, irq_sync);
13052                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13053                 err = tg3_nvram_lock(tp);
13054                 tg3_halt_cpu(tp, RX_CPU_BASE);
13055                 if (!tg3_flag(tp, 5705_PLUS))
13056                         tg3_halt_cpu(tp, TX_CPU_BASE);
13057                 if (!err)
13058                         tg3_nvram_unlock(tp);
13059
13060                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13061                         tg3_phy_reset(tp);
13062
13063                 if (tg3_test_registers(tp) != 0) {
13064                         etest->flags |= ETH_TEST_FL_FAILED;
13065                         data[TG3_REGISTER_TEST] = 1;
13066                 }
13067
13068                 if (tg3_test_memory(tp) != 0) {
13069                         etest->flags |= ETH_TEST_FL_FAILED;
13070                         data[TG3_MEMORY_TEST] = 1;
13071                 }
13072
13073                 if (doextlpbk)
13074                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13075
13076                 if (tg3_test_loopback(tp, data, doextlpbk))
13077                         etest->flags |= ETH_TEST_FL_FAILED;
13078
13079                 tg3_full_unlock(tp);
13080
13081                 if (tg3_test_interrupt(tp) != 0) {
13082                         etest->flags |= ETH_TEST_FL_FAILED;
13083                         data[TG3_INTERRUPT_TEST] = 1;
13084                 }
13085
13086                 tg3_full_lock(tp, 0);
13087
13088                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13089                 if (netif_running(dev)) {
13090                         tg3_flag_set(tp, INIT_COMPLETE);
13091                         err2 = tg3_restart_hw(tp, 1);
13092                         if (!err2)
13093                                 tg3_netif_start(tp);
13094                 }
13095
13096                 tg3_full_unlock(tp);
13097
13098                 if (irq_sync && !err2)
13099                         tg3_phy_start(tp);
13100         }
13101         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13102                 tg3_power_down(tp);
13103
13104 }
13105
13106 static int tg3_hwtstamp_ioctl(struct net_device *dev,
13107                               struct ifreq *ifr, int cmd)
13108 {
13109         struct tg3 *tp = netdev_priv(dev);
13110         struct hwtstamp_config stmpconf;
13111
13112         if (!tg3_flag(tp, PTP_CAPABLE))
13113                 return -EINVAL;
13114
13115         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13116                 return -EFAULT;
13117
13118         if (stmpconf.flags)
13119                 return -EINVAL;
13120
13121         switch (stmpconf.tx_type) {
13122         case HWTSTAMP_TX_ON:
13123                 tg3_flag_set(tp, TX_TSTAMP_EN);
13124                 break;
13125         case HWTSTAMP_TX_OFF:
13126                 tg3_flag_clear(tp, TX_TSTAMP_EN);
13127                 break;
13128         default:
13129                 return -ERANGE;
13130         }
13131
13132         switch (stmpconf.rx_filter) {
13133         case HWTSTAMP_FILTER_NONE:
13134                 tp->rxptpctl = 0;
13135                 break;
13136         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13137                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13138                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13139                 break;
13140         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13141                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13142                                TG3_RX_PTP_CTL_SYNC_EVNT;
13143                 break;
13144         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13145                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13146                                TG3_RX_PTP_CTL_DELAY_REQ;
13147                 break;
13148         case HWTSTAMP_FILTER_PTP_V2_EVENT:
13149                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13150                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13151                 break;
13152         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13153                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13154                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13155                 break;
13156         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13157                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13158                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13159                 break;
13160         case HWTSTAMP_FILTER_PTP_V2_SYNC:
13161                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13162                                TG3_RX_PTP_CTL_SYNC_EVNT;
13163                 break;
13164         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13165                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13166                                TG3_RX_PTP_CTL_SYNC_EVNT;
13167                 break;
13168         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13169                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13170                                TG3_RX_PTP_CTL_SYNC_EVNT;
13171                 break;
13172         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13173                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13174                                TG3_RX_PTP_CTL_DELAY_REQ;
13175                 break;
13176         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13177                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13178                                TG3_RX_PTP_CTL_DELAY_REQ;
13179                 break;
13180         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13181                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13182                                TG3_RX_PTP_CTL_DELAY_REQ;
13183                 break;
13184         default:
13185                 return -ERANGE;
13186         }
13187
13188         if (netif_running(dev) && tp->rxptpctl)
13189                 tw32(TG3_RX_PTP_CTL,
13190                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13191
13192         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13193                 -EFAULT : 0;
13194 }
13195
13196 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13197 {
13198         struct mii_ioctl_data *data = if_mii(ifr);
13199         struct tg3 *tp = netdev_priv(dev);
13200         int err;
13201
13202         if (tg3_flag(tp, USE_PHYLIB)) {
13203                 struct phy_device *phydev;
13204                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13205                         return -EAGAIN;
13206                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13207                 return phy_mii_ioctl(phydev, ifr, cmd);
13208         }
13209
13210         switch (cmd) {
13211         case SIOCGMIIPHY:
13212                 data->phy_id = tp->phy_addr;
13213
13214                 /* fallthru */
13215         case SIOCGMIIREG: {
13216                 u32 mii_regval;
13217
13218                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13219                         break;                  /* We have no PHY */
13220
13221                 if (!netif_running(dev))
13222                         return -EAGAIN;
13223
13224                 spin_lock_bh(&tp->lock);
13225                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13226                                     data->reg_num & 0x1f, &mii_regval);
13227                 spin_unlock_bh(&tp->lock);
13228
13229                 data->val_out = mii_regval;
13230
13231                 return err;
13232         }
13233
13234         case SIOCSMIIREG:
13235                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13236                         break;                  /* We have no PHY */
13237
13238                 if (!netif_running(dev))
13239                         return -EAGAIN;
13240
13241                 spin_lock_bh(&tp->lock);
13242                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13243                                      data->reg_num & 0x1f, data->val_in);
13244                 spin_unlock_bh(&tp->lock);
13245
13246                 return err;
13247
13248         case SIOCSHWTSTAMP:
13249                 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13250
13251         default:
13252                 /* do nothing */
13253                 break;
13254         }
13255         return -EOPNOTSUPP;
13256 }
13257
13258 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13259 {
13260         struct tg3 *tp = netdev_priv(dev);
13261
13262         memcpy(ec, &tp->coal, sizeof(*ec));
13263         return 0;
13264 }
13265
13266 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13267 {
13268         struct tg3 *tp = netdev_priv(dev);
13269         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13270         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13271
13272         if (!tg3_flag(tp, 5705_PLUS)) {
13273                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13274                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13275                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13276                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13277         }
13278
13279         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13280             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13281             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13282             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13283             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13284             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13285             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13286             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13287             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13288             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13289                 return -EINVAL;
13290
13291         /* No rx interrupts will be generated if both are zero */
13292         if ((ec->rx_coalesce_usecs == 0) &&
13293             (ec->rx_max_coalesced_frames == 0))
13294                 return -EINVAL;
13295
13296         /* No tx interrupts will be generated if both are zero */
13297         if ((ec->tx_coalesce_usecs == 0) &&
13298             (ec->tx_max_coalesced_frames == 0))
13299                 return -EINVAL;
13300
13301         /* Only copy relevant parameters, ignore all others. */
13302         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13303         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13304         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13305         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13306         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13307         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13308         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13309         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13310         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13311
13312         if (netif_running(dev)) {
13313                 tg3_full_lock(tp, 0);
13314                 __tg3_set_coalesce(tp, &tp->coal);
13315                 tg3_full_unlock(tp);
13316         }
13317         return 0;
13318 }
13319
13320 static const struct ethtool_ops tg3_ethtool_ops = {
13321         .get_settings           = tg3_get_settings,
13322         .set_settings           = tg3_set_settings,
13323         .get_drvinfo            = tg3_get_drvinfo,
13324         .get_regs_len           = tg3_get_regs_len,
13325         .get_regs               = tg3_get_regs,
13326         .get_wol                = tg3_get_wol,
13327         .set_wol                = tg3_set_wol,
13328         .get_msglevel           = tg3_get_msglevel,
13329         .set_msglevel           = tg3_set_msglevel,
13330         .nway_reset             = tg3_nway_reset,
13331         .get_link               = ethtool_op_get_link,
13332         .get_eeprom_len         = tg3_get_eeprom_len,
13333         .get_eeprom             = tg3_get_eeprom,
13334         .set_eeprom             = tg3_set_eeprom,
13335         .get_ringparam          = tg3_get_ringparam,
13336         .set_ringparam          = tg3_set_ringparam,
13337         .get_pauseparam         = tg3_get_pauseparam,
13338         .set_pauseparam         = tg3_set_pauseparam,
13339         .self_test              = tg3_self_test,
13340         .get_strings            = tg3_get_strings,
13341         .set_phys_id            = tg3_set_phys_id,
13342         .get_ethtool_stats      = tg3_get_ethtool_stats,
13343         .get_coalesce           = tg3_get_coalesce,
13344         .set_coalesce           = tg3_set_coalesce,
13345         .get_sset_count         = tg3_get_sset_count,
13346         .get_rxnfc              = tg3_get_rxnfc,
13347         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
13348         .get_rxfh_indir         = tg3_get_rxfh_indir,
13349         .set_rxfh_indir         = tg3_set_rxfh_indir,
13350         .get_channels           = tg3_get_channels,
13351         .set_channels           = tg3_set_channels,
13352         .get_ts_info            = tg3_get_ts_info,
13353 };
13354
13355 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13356                                                 struct rtnl_link_stats64 *stats)
13357 {
13358         struct tg3 *tp = netdev_priv(dev);
13359
13360         spin_lock_bh(&tp->lock);
13361         if (!tp->hw_stats) {
13362                 spin_unlock_bh(&tp->lock);
13363                 return &tp->net_stats_prev;
13364         }
13365
13366         tg3_get_nstats(tp, stats);
13367         spin_unlock_bh(&tp->lock);
13368
13369         return stats;
13370 }
13371
13372 static void tg3_set_rx_mode(struct net_device *dev)
13373 {
13374         struct tg3 *tp = netdev_priv(dev);
13375
13376         if (!netif_running(dev))
13377                 return;
13378
13379         tg3_full_lock(tp, 0);
13380         __tg3_set_rx_mode(dev);
13381         tg3_full_unlock(tp);
13382 }
13383
13384 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13385                                int new_mtu)
13386 {
13387         dev->mtu = new_mtu;
13388
13389         if (new_mtu > ETH_DATA_LEN) {
13390                 if (tg3_flag(tp, 5780_CLASS)) {
13391                         netdev_update_features(dev);
13392                         tg3_flag_clear(tp, TSO_CAPABLE);
13393                 } else {
13394                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
13395                 }
13396         } else {
13397                 if (tg3_flag(tp, 5780_CLASS)) {
13398                         tg3_flag_set(tp, TSO_CAPABLE);
13399                         netdev_update_features(dev);
13400                 }
13401                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13402         }
13403 }
13404
13405 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13406 {
13407         struct tg3 *tp = netdev_priv(dev);
13408         int err, reset_phy = 0;
13409
13410         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13411                 return -EINVAL;
13412
13413         if (!netif_running(dev)) {
13414                 /* We'll just catch it later when the
13415                  * device is up'd.
13416                  */
13417                 tg3_set_mtu(dev, tp, new_mtu);
13418                 return 0;
13419         }
13420
13421         tg3_phy_stop(tp);
13422
13423         tg3_netif_stop(tp);
13424
13425         tg3_full_lock(tp, 1);
13426
13427         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13428
13429         tg3_set_mtu(dev, tp, new_mtu);
13430
13431         /* Reset PHY, otherwise the read DMA engine will be in a mode that
13432          * breaks all requests to 256 bytes.
13433          */
13434         if (tg3_asic_rev(tp) == ASIC_REV_57766)
13435                 reset_phy = 1;
13436
13437         err = tg3_restart_hw(tp, reset_phy);
13438
13439         if (!err)
13440                 tg3_netif_start(tp);
13441
13442         tg3_full_unlock(tp);
13443
13444         if (!err)
13445                 tg3_phy_start(tp);
13446
13447         return err;
13448 }
13449
13450 static const struct net_device_ops tg3_netdev_ops = {
13451         .ndo_open               = tg3_open,
13452         .ndo_stop               = tg3_close,
13453         .ndo_start_xmit         = tg3_start_xmit,
13454         .ndo_get_stats64        = tg3_get_stats64,
13455         .ndo_validate_addr      = eth_validate_addr,
13456         .ndo_set_rx_mode        = tg3_set_rx_mode,
13457         .ndo_set_mac_address    = tg3_set_mac_addr,
13458         .ndo_do_ioctl           = tg3_ioctl,
13459         .ndo_tx_timeout         = tg3_tx_timeout,
13460         .ndo_change_mtu         = tg3_change_mtu,
13461         .ndo_fix_features       = tg3_fix_features,
13462         .ndo_set_features       = tg3_set_features,
13463 #ifdef CONFIG_NET_POLL_CONTROLLER
13464         .ndo_poll_controller    = tg3_poll_controller,
13465 #endif
13466 };
13467
13468 static void tg3_get_eeprom_size(struct tg3 *tp)
13469 {
13470         u32 cursize, val, magic;
13471
13472         tp->nvram_size = EEPROM_CHIP_SIZE;
13473
13474         if (tg3_nvram_read(tp, 0, &magic) != 0)
13475                 return;
13476
13477         if ((magic != TG3_EEPROM_MAGIC) &&
13478             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13479             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13480                 return;
13481
13482         /*
13483          * Size the chip by reading offsets at increasing powers of two.
13484          * When we encounter our validation signature, we know the addressing
13485          * has wrapped around, and thus have our chip size.
13486          */
13487         cursize = 0x10;
13488
13489         while (cursize < tp->nvram_size) {
13490                 if (tg3_nvram_read(tp, cursize, &val) != 0)
13491                         return;
13492
13493                 if (val == magic)
13494                         break;
13495
13496                 cursize <<= 1;
13497         }
13498
13499         tp->nvram_size = cursize;
13500 }
13501
13502 static void tg3_get_nvram_size(struct tg3 *tp)
13503 {
13504         u32 val;
13505
13506         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13507                 return;
13508
13509         /* Selfboot format */
13510         if (val != TG3_EEPROM_MAGIC) {
13511                 tg3_get_eeprom_size(tp);
13512                 return;
13513         }
13514
13515         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13516                 if (val != 0) {
13517                         /* This is confusing.  We want to operate on the
13518                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
13519                          * call will read from NVRAM and byteswap the data
13520                          * according to the byteswapping settings for all
13521                          * other register accesses.  This ensures the data we
13522                          * want will always reside in the lower 16-bits.
13523                          * However, the data in NVRAM is in LE format, which
13524                          * means the data from the NVRAM read will always be
13525                          * opposite the endianness of the CPU.  The 16-bit
13526                          * byteswap then brings the data to CPU endianness.
13527                          */
13528                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13529                         return;
13530                 }
13531         }
13532         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13533 }
13534
13535 static void tg3_get_nvram_info(struct tg3 *tp)
13536 {
13537         u32 nvcfg1;
13538
13539         nvcfg1 = tr32(NVRAM_CFG1);
13540         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13541                 tg3_flag_set(tp, FLASH);
13542         } else {
13543                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13544                 tw32(NVRAM_CFG1, nvcfg1);
13545         }
13546
13547         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
13548             tg3_flag(tp, 5780_CLASS)) {
13549                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13550                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13551                         tp->nvram_jedecnum = JEDEC_ATMEL;
13552                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13553                         tg3_flag_set(tp, NVRAM_BUFFERED);
13554                         break;
13555                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13556                         tp->nvram_jedecnum = JEDEC_ATMEL;
13557                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13558                         break;
13559                 case FLASH_VENDOR_ATMEL_EEPROM:
13560                         tp->nvram_jedecnum = JEDEC_ATMEL;
13561                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13562                         tg3_flag_set(tp, NVRAM_BUFFERED);
13563                         break;
13564                 case FLASH_VENDOR_ST:
13565                         tp->nvram_jedecnum = JEDEC_ST;
13566                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13567                         tg3_flag_set(tp, NVRAM_BUFFERED);
13568                         break;
13569                 case FLASH_VENDOR_SAIFUN:
13570                         tp->nvram_jedecnum = JEDEC_SAIFUN;
13571                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13572                         break;
13573                 case FLASH_VENDOR_SST_SMALL:
13574                 case FLASH_VENDOR_SST_LARGE:
13575                         tp->nvram_jedecnum = JEDEC_SST;
13576                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13577                         break;
13578                 }
13579         } else {
13580                 tp->nvram_jedecnum = JEDEC_ATMEL;
13581                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13582                 tg3_flag_set(tp, NVRAM_BUFFERED);
13583         }
13584 }
13585
13586 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13587 {
13588         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13589         case FLASH_5752PAGE_SIZE_256:
13590                 tp->nvram_pagesize = 256;
13591                 break;
13592         case FLASH_5752PAGE_SIZE_512:
13593                 tp->nvram_pagesize = 512;
13594                 break;
13595         case FLASH_5752PAGE_SIZE_1K:
13596                 tp->nvram_pagesize = 1024;
13597                 break;
13598         case FLASH_5752PAGE_SIZE_2K:
13599                 tp->nvram_pagesize = 2048;
13600                 break;
13601         case FLASH_5752PAGE_SIZE_4K:
13602                 tp->nvram_pagesize = 4096;
13603                 break;
13604         case FLASH_5752PAGE_SIZE_264:
13605                 tp->nvram_pagesize = 264;
13606                 break;
13607         case FLASH_5752PAGE_SIZE_528:
13608                 tp->nvram_pagesize = 528;
13609                 break;
13610         }
13611 }
13612
13613 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13614 {
13615         u32 nvcfg1;
13616
13617         nvcfg1 = tr32(NVRAM_CFG1);
13618
13619         /* NVRAM protection for TPM */
13620         if (nvcfg1 & (1 << 27))
13621                 tg3_flag_set(tp, PROTECTED_NVRAM);
13622
13623         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13624         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13625         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13626                 tp->nvram_jedecnum = JEDEC_ATMEL;
13627                 tg3_flag_set(tp, NVRAM_BUFFERED);
13628                 break;
13629         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13630                 tp->nvram_jedecnum = JEDEC_ATMEL;
13631                 tg3_flag_set(tp, NVRAM_BUFFERED);
13632                 tg3_flag_set(tp, FLASH);
13633                 break;
13634         case FLASH_5752VENDOR_ST_M45PE10:
13635         case FLASH_5752VENDOR_ST_M45PE20:
13636         case FLASH_5752VENDOR_ST_M45PE40:
13637                 tp->nvram_jedecnum = JEDEC_ST;
13638                 tg3_flag_set(tp, NVRAM_BUFFERED);
13639                 tg3_flag_set(tp, FLASH);
13640                 break;
13641         }
13642
13643         if (tg3_flag(tp, FLASH)) {
13644                 tg3_nvram_get_pagesize(tp, nvcfg1);
13645         } else {
13646                 /* For eeprom, set pagesize to maximum eeprom size */
13647                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13648
13649                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13650                 tw32(NVRAM_CFG1, nvcfg1);
13651         }
13652 }
13653
13654 static void tg3_get_5755_nvram_info(struct tg3 *tp)
13655 {
13656         u32 nvcfg1, protect = 0;
13657
13658         nvcfg1 = tr32(NVRAM_CFG1);
13659
13660         /* NVRAM protection for TPM */
13661         if (nvcfg1 & (1 << 27)) {
13662                 tg3_flag_set(tp, PROTECTED_NVRAM);
13663                 protect = 1;
13664         }
13665
13666         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13667         switch (nvcfg1) {
13668         case FLASH_5755VENDOR_ATMEL_FLASH_1:
13669         case FLASH_5755VENDOR_ATMEL_FLASH_2:
13670         case FLASH_5755VENDOR_ATMEL_FLASH_3:
13671         case FLASH_5755VENDOR_ATMEL_FLASH_5:
13672                 tp->nvram_jedecnum = JEDEC_ATMEL;
13673                 tg3_flag_set(tp, NVRAM_BUFFERED);
13674                 tg3_flag_set(tp, FLASH);
13675                 tp->nvram_pagesize = 264;
13676                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13677                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13678                         tp->nvram_size = (protect ? 0x3e200 :
13679                                           TG3_NVRAM_SIZE_512KB);
13680                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13681                         tp->nvram_size = (protect ? 0x1f200 :
13682                                           TG3_NVRAM_SIZE_256KB);
13683                 else
13684                         tp->nvram_size = (protect ? 0x1f200 :
13685                                           TG3_NVRAM_SIZE_128KB);
13686                 break;
13687         case FLASH_5752VENDOR_ST_M45PE10:
13688         case FLASH_5752VENDOR_ST_M45PE20:
13689         case FLASH_5752VENDOR_ST_M45PE40:
13690                 tp->nvram_jedecnum = JEDEC_ST;
13691                 tg3_flag_set(tp, NVRAM_BUFFERED);
13692                 tg3_flag_set(tp, FLASH);
13693                 tp->nvram_pagesize = 256;
13694                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13695                         tp->nvram_size = (protect ?
13696                                           TG3_NVRAM_SIZE_64KB :
13697                                           TG3_NVRAM_SIZE_128KB);
13698                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13699                         tp->nvram_size = (protect ?
13700                                           TG3_NVRAM_SIZE_64KB :
13701                                           TG3_NVRAM_SIZE_256KB);
13702                 else
13703                         tp->nvram_size = (protect ?
13704                                           TG3_NVRAM_SIZE_128KB :
13705                                           TG3_NVRAM_SIZE_512KB);
13706                 break;
13707         }
13708 }
13709
13710 static void tg3_get_5787_nvram_info(struct tg3 *tp)
13711 {
13712         u32 nvcfg1;
13713
13714         nvcfg1 = tr32(NVRAM_CFG1);
13715
13716         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13717         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13718         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13719         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13720         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13721                 tp->nvram_jedecnum = JEDEC_ATMEL;
13722                 tg3_flag_set(tp, NVRAM_BUFFERED);
13723                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13724
13725                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13726                 tw32(NVRAM_CFG1, nvcfg1);
13727                 break;
13728         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13729         case FLASH_5755VENDOR_ATMEL_FLASH_1:
13730         case FLASH_5755VENDOR_ATMEL_FLASH_2:
13731         case FLASH_5755VENDOR_ATMEL_FLASH_3:
13732                 tp->nvram_jedecnum = JEDEC_ATMEL;
13733                 tg3_flag_set(tp, NVRAM_BUFFERED);
13734                 tg3_flag_set(tp, FLASH);
13735                 tp->nvram_pagesize = 264;
13736                 break;
13737         case FLASH_5752VENDOR_ST_M45PE10:
13738         case FLASH_5752VENDOR_ST_M45PE20:
13739         case FLASH_5752VENDOR_ST_M45PE40:
13740                 tp->nvram_jedecnum = JEDEC_ST;
13741                 tg3_flag_set(tp, NVRAM_BUFFERED);
13742                 tg3_flag_set(tp, FLASH);
13743                 tp->nvram_pagesize = 256;
13744                 break;
13745         }
13746 }
13747
13748 static void tg3_get_5761_nvram_info(struct tg3 *tp)
13749 {
13750         u32 nvcfg1, protect = 0;
13751
13752         nvcfg1 = tr32(NVRAM_CFG1);
13753
13754         /* NVRAM protection for TPM */
13755         if (nvcfg1 & (1 << 27)) {
13756                 tg3_flag_set(tp, PROTECTED_NVRAM);
13757                 protect = 1;
13758         }
13759
13760         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13761         switch (nvcfg1) {
13762         case FLASH_5761VENDOR_ATMEL_ADB021D:
13763         case FLASH_5761VENDOR_ATMEL_ADB041D:
13764         case FLASH_5761VENDOR_ATMEL_ADB081D:
13765         case FLASH_5761VENDOR_ATMEL_ADB161D:
13766         case FLASH_5761VENDOR_ATMEL_MDB021D:
13767         case FLASH_5761VENDOR_ATMEL_MDB041D:
13768         case FLASH_5761VENDOR_ATMEL_MDB081D:
13769         case FLASH_5761VENDOR_ATMEL_MDB161D:
13770                 tp->nvram_jedecnum = JEDEC_ATMEL;
13771                 tg3_flag_set(tp, NVRAM_BUFFERED);
13772                 tg3_flag_set(tp, FLASH);
13773                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13774                 tp->nvram_pagesize = 256;
13775                 break;
13776         case FLASH_5761VENDOR_ST_A_M45PE20:
13777         case FLASH_5761VENDOR_ST_A_M45PE40:
13778         case FLASH_5761VENDOR_ST_A_M45PE80:
13779         case FLASH_5761VENDOR_ST_A_M45PE16:
13780         case FLASH_5761VENDOR_ST_M_M45PE20:
13781         case FLASH_5761VENDOR_ST_M_M45PE40:
13782         case FLASH_5761VENDOR_ST_M_M45PE80:
13783         case FLASH_5761VENDOR_ST_M_M45PE16:
13784                 tp->nvram_jedecnum = JEDEC_ST;
13785                 tg3_flag_set(tp, NVRAM_BUFFERED);
13786                 tg3_flag_set(tp, FLASH);
13787                 tp->nvram_pagesize = 256;
13788                 break;
13789         }
13790
13791         if (protect) {
13792                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13793         } else {
13794                 switch (nvcfg1) {
13795                 case FLASH_5761VENDOR_ATMEL_ADB161D:
13796                 case FLASH_5761VENDOR_ATMEL_MDB161D:
13797                 case FLASH_5761VENDOR_ST_A_M45PE16:
13798                 case FLASH_5761VENDOR_ST_M_M45PE16:
13799                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13800                         break;
13801                 case FLASH_5761VENDOR_ATMEL_ADB081D:
13802                 case FLASH_5761VENDOR_ATMEL_MDB081D:
13803                 case FLASH_5761VENDOR_ST_A_M45PE80:
13804                 case FLASH_5761VENDOR_ST_M_M45PE80:
13805                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13806                         break;
13807                 case FLASH_5761VENDOR_ATMEL_ADB041D:
13808                 case FLASH_5761VENDOR_ATMEL_MDB041D:
13809                 case FLASH_5761VENDOR_ST_A_M45PE40:
13810                 case FLASH_5761VENDOR_ST_M_M45PE40:
13811                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13812                         break;
13813                 case FLASH_5761VENDOR_ATMEL_ADB021D:
13814                 case FLASH_5761VENDOR_ATMEL_MDB021D:
13815                 case FLASH_5761VENDOR_ST_A_M45PE20:
13816                 case FLASH_5761VENDOR_ST_M_M45PE20:
13817                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13818                         break;
13819                 }
13820         }
13821 }
13822
13823 static void tg3_get_5906_nvram_info(struct tg3 *tp)
13824 {
13825         tp->nvram_jedecnum = JEDEC_ATMEL;
13826         tg3_flag_set(tp, NVRAM_BUFFERED);
13827         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13828 }
13829
13830 static void tg3_get_57780_nvram_info(struct tg3 *tp)
13831 {
13832         u32 nvcfg1;
13833
13834         nvcfg1 = tr32(NVRAM_CFG1);
13835
13836         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13837         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13838         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13839                 tp->nvram_jedecnum = JEDEC_ATMEL;
13840                 tg3_flag_set(tp, NVRAM_BUFFERED);
13841                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13842
13843                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13844                 tw32(NVRAM_CFG1, nvcfg1);
13845                 return;
13846         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13847         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13848         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13849         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13850         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13851         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13852         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13853                 tp->nvram_jedecnum = JEDEC_ATMEL;
13854                 tg3_flag_set(tp, NVRAM_BUFFERED);
13855                 tg3_flag_set(tp, FLASH);
13856
13857                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13858                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13859                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13860                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13861                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13862                         break;
13863                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13864                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13865                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13866                         break;
13867                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13868                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13869                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13870                         break;
13871                 }
13872                 break;
13873         case FLASH_5752VENDOR_ST_M45PE10:
13874         case FLASH_5752VENDOR_ST_M45PE20:
13875         case FLASH_5752VENDOR_ST_M45PE40:
13876                 tp->nvram_jedecnum = JEDEC_ST;
13877                 tg3_flag_set(tp, NVRAM_BUFFERED);
13878                 tg3_flag_set(tp, FLASH);
13879
13880                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13881                 case FLASH_5752VENDOR_ST_M45PE10:
13882                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13883                         break;
13884                 case FLASH_5752VENDOR_ST_M45PE20:
13885                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13886                         break;
13887                 case FLASH_5752VENDOR_ST_M45PE40:
13888                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13889                         break;
13890                 }
13891                 break;
13892         default:
13893                 tg3_flag_set(tp, NO_NVRAM);
13894                 return;
13895         }
13896
13897         tg3_nvram_get_pagesize(tp, nvcfg1);
13898         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13899                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13900 }
13901
13902
13903 static void tg3_get_5717_nvram_info(struct tg3 *tp)
13904 {
13905         u32 nvcfg1;
13906
13907         nvcfg1 = tr32(NVRAM_CFG1);
13908
13909         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13910         case FLASH_5717VENDOR_ATMEL_EEPROM:
13911         case FLASH_5717VENDOR_MICRO_EEPROM:
13912                 tp->nvram_jedecnum = JEDEC_ATMEL;
13913                 tg3_flag_set(tp, NVRAM_BUFFERED);
13914                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13915
13916                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13917                 tw32(NVRAM_CFG1, nvcfg1);
13918                 return;
13919         case FLASH_5717VENDOR_ATMEL_MDB011D:
13920         case FLASH_5717VENDOR_ATMEL_ADB011B:
13921         case FLASH_5717VENDOR_ATMEL_ADB011D:
13922         case FLASH_5717VENDOR_ATMEL_MDB021D:
13923         case FLASH_5717VENDOR_ATMEL_ADB021B:
13924         case FLASH_5717VENDOR_ATMEL_ADB021D:
13925         case FLASH_5717VENDOR_ATMEL_45USPT:
13926                 tp->nvram_jedecnum = JEDEC_ATMEL;
13927                 tg3_flag_set(tp, NVRAM_BUFFERED);
13928                 tg3_flag_set(tp, FLASH);
13929
13930                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13931                 case FLASH_5717VENDOR_ATMEL_MDB021D:
13932                         /* Detect size with tg3_nvram_get_size() */
13933                         break;
13934                 case FLASH_5717VENDOR_ATMEL_ADB021B:
13935                 case FLASH_5717VENDOR_ATMEL_ADB021D:
13936                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13937                         break;
13938                 default:
13939                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13940                         break;
13941                 }
13942                 break;
13943         case FLASH_5717VENDOR_ST_M_M25PE10:
13944         case FLASH_5717VENDOR_ST_A_M25PE10:
13945         case FLASH_5717VENDOR_ST_M_M45PE10:
13946         case FLASH_5717VENDOR_ST_A_M45PE10:
13947         case FLASH_5717VENDOR_ST_M_M25PE20:
13948         case FLASH_5717VENDOR_ST_A_M25PE20:
13949         case FLASH_5717VENDOR_ST_M_M45PE20:
13950         case FLASH_5717VENDOR_ST_A_M45PE20:
13951         case FLASH_5717VENDOR_ST_25USPT:
13952         case FLASH_5717VENDOR_ST_45USPT:
13953                 tp->nvram_jedecnum = JEDEC_ST;
13954                 tg3_flag_set(tp, NVRAM_BUFFERED);
13955                 tg3_flag_set(tp, FLASH);
13956
13957                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13958                 case FLASH_5717VENDOR_ST_M_M25PE20:
13959                 case FLASH_5717VENDOR_ST_M_M45PE20:
13960                         /* Detect size with tg3_nvram_get_size() */
13961                         break;
13962                 case FLASH_5717VENDOR_ST_A_M25PE20:
13963                 case FLASH_5717VENDOR_ST_A_M45PE20:
13964                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13965                         break;
13966                 default:
13967                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13968                         break;
13969                 }
13970                 break;
13971         default:
13972                 tg3_flag_set(tp, NO_NVRAM);
13973                 return;
13974         }
13975
13976         tg3_nvram_get_pagesize(tp, nvcfg1);
13977         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13978                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13979 }
13980
13981 static void tg3_get_5720_nvram_info(struct tg3 *tp)
13982 {
13983         u32 nvcfg1, nvmpinstrp;
13984
13985         nvcfg1 = tr32(NVRAM_CFG1);
13986         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13987
13988         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
13989                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
13990                         tg3_flag_set(tp, NO_NVRAM);
13991                         return;
13992                 }
13993
13994                 switch (nvmpinstrp) {
13995                 case FLASH_5762_EEPROM_HD:
13996                         nvmpinstrp = FLASH_5720_EEPROM_HD;
13997                         break;
13998                 case FLASH_5762_EEPROM_LD:
13999                         nvmpinstrp = FLASH_5720_EEPROM_LD;
14000                         break;
14001                 case FLASH_5720VENDOR_M_ST_M45PE20:
14002                         /* This pinstrap supports multiple sizes, so force it
14003                          * to read the actual size from location 0xf0.
14004                          */
14005                         nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14006                         break;
14007                 }
14008         }
14009
14010         switch (nvmpinstrp) {
14011         case FLASH_5720_EEPROM_HD:
14012         case FLASH_5720_EEPROM_LD:
14013                 tp->nvram_jedecnum = JEDEC_ATMEL;
14014                 tg3_flag_set(tp, NVRAM_BUFFERED);
14015
14016                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14017                 tw32(NVRAM_CFG1, nvcfg1);
14018                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14019                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14020                 else
14021                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14022                 return;
14023         case FLASH_5720VENDOR_M_ATMEL_DB011D:
14024         case FLASH_5720VENDOR_A_ATMEL_DB011B:
14025         case FLASH_5720VENDOR_A_ATMEL_DB011D:
14026         case FLASH_5720VENDOR_M_ATMEL_DB021D:
14027         case FLASH_5720VENDOR_A_ATMEL_DB021B:
14028         case FLASH_5720VENDOR_A_ATMEL_DB021D:
14029         case FLASH_5720VENDOR_M_ATMEL_DB041D:
14030         case FLASH_5720VENDOR_A_ATMEL_DB041B:
14031         case FLASH_5720VENDOR_A_ATMEL_DB041D:
14032         case FLASH_5720VENDOR_M_ATMEL_DB081D:
14033         case FLASH_5720VENDOR_A_ATMEL_DB081D:
14034         case FLASH_5720VENDOR_ATMEL_45USPT:
14035                 tp->nvram_jedecnum = JEDEC_ATMEL;
14036                 tg3_flag_set(tp, NVRAM_BUFFERED);
14037                 tg3_flag_set(tp, FLASH);
14038
14039                 switch (nvmpinstrp) {
14040                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14041                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14042                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14043                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14044                         break;
14045                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14046                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14047                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14048                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14049                         break;
14050                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14051                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14052                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14053                         break;
14054                 default:
14055                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14056                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14057                         break;
14058                 }
14059                 break;
14060         case FLASH_5720VENDOR_M_ST_M25PE10:
14061         case FLASH_5720VENDOR_M_ST_M45PE10:
14062         case FLASH_5720VENDOR_A_ST_M25PE10:
14063         case FLASH_5720VENDOR_A_ST_M45PE10:
14064         case FLASH_5720VENDOR_M_ST_M25PE20:
14065         case FLASH_5720VENDOR_M_ST_M45PE20:
14066         case FLASH_5720VENDOR_A_ST_M25PE20:
14067         case FLASH_5720VENDOR_A_ST_M45PE20:
14068         case FLASH_5720VENDOR_M_ST_M25PE40:
14069         case FLASH_5720VENDOR_M_ST_M45PE40:
14070         case FLASH_5720VENDOR_A_ST_M25PE40:
14071         case FLASH_5720VENDOR_A_ST_M45PE40:
14072         case FLASH_5720VENDOR_M_ST_M25PE80:
14073         case FLASH_5720VENDOR_M_ST_M45PE80:
14074         case FLASH_5720VENDOR_A_ST_M25PE80:
14075         case FLASH_5720VENDOR_A_ST_M45PE80:
14076         case FLASH_5720VENDOR_ST_25USPT:
14077         case FLASH_5720VENDOR_ST_45USPT:
14078                 tp->nvram_jedecnum = JEDEC_ST;
14079                 tg3_flag_set(tp, NVRAM_BUFFERED);
14080                 tg3_flag_set(tp, FLASH);
14081
14082                 switch (nvmpinstrp) {
14083                 case FLASH_5720VENDOR_M_ST_M25PE20:
14084                 case FLASH_5720VENDOR_M_ST_M45PE20:
14085                 case FLASH_5720VENDOR_A_ST_M25PE20:
14086                 case FLASH_5720VENDOR_A_ST_M45PE20:
14087                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14088                         break;
14089                 case FLASH_5720VENDOR_M_ST_M25PE40:
14090                 case FLASH_5720VENDOR_M_ST_M45PE40:
14091                 case FLASH_5720VENDOR_A_ST_M25PE40:
14092                 case FLASH_5720VENDOR_A_ST_M45PE40:
14093                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14094                         break;
14095                 case FLASH_5720VENDOR_M_ST_M25PE80:
14096                 case FLASH_5720VENDOR_M_ST_M45PE80:
14097                 case FLASH_5720VENDOR_A_ST_M25PE80:
14098                 case FLASH_5720VENDOR_A_ST_M45PE80:
14099                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14100                         break;
14101                 default:
14102                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14103                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14104                         break;
14105                 }
14106                 break;
14107         default:
14108                 tg3_flag_set(tp, NO_NVRAM);
14109                 return;
14110         }
14111
14112         tg3_nvram_get_pagesize(tp, nvcfg1);
14113         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14114                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14115
14116         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14117                 u32 val;
14118
14119                 if (tg3_nvram_read(tp, 0, &val))
14120                         return;
14121
14122                 if (val != TG3_EEPROM_MAGIC &&
14123                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14124                         tg3_flag_set(tp, NO_NVRAM);
14125         }
14126 }
14127
14128 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14129 static void tg3_nvram_init(struct tg3 *tp)
14130 {
14131         if (tg3_flag(tp, IS_SSB_CORE)) {
14132                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14133                 tg3_flag_clear(tp, NVRAM);
14134                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14135                 tg3_flag_set(tp, NO_NVRAM);
14136                 return;
14137         }
14138
14139         tw32_f(GRC_EEPROM_ADDR,
14140              (EEPROM_ADDR_FSM_RESET |
14141               (EEPROM_DEFAULT_CLOCK_PERIOD <<
14142                EEPROM_ADDR_CLKPERD_SHIFT)));
14143
14144         msleep(1);
14145
14146         /* Enable seeprom accesses. */
14147         tw32_f(GRC_LOCAL_CTRL,
14148              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14149         udelay(100);
14150
14151         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14152             tg3_asic_rev(tp) != ASIC_REV_5701) {
14153                 tg3_flag_set(tp, NVRAM);
14154
14155                 if (tg3_nvram_lock(tp)) {
14156                         netdev_warn(tp->dev,
14157                                     "Cannot get nvram lock, %s failed\n",
14158                                     __func__);
14159                         return;
14160                 }
14161                 tg3_enable_nvram_access(tp);
14162
14163                 tp->nvram_size = 0;
14164
14165                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14166                         tg3_get_5752_nvram_info(tp);
14167                 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14168                         tg3_get_5755_nvram_info(tp);
14169                 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14170                          tg3_asic_rev(tp) == ASIC_REV_5784 ||
14171                          tg3_asic_rev(tp) == ASIC_REV_5785)
14172                         tg3_get_5787_nvram_info(tp);
14173                 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14174                         tg3_get_5761_nvram_info(tp);
14175                 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14176                         tg3_get_5906_nvram_info(tp);
14177                 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14178                          tg3_flag(tp, 57765_CLASS))
14179                         tg3_get_57780_nvram_info(tp);
14180                 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14181                          tg3_asic_rev(tp) == ASIC_REV_5719)
14182                         tg3_get_5717_nvram_info(tp);
14183                 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14184                          tg3_asic_rev(tp) == ASIC_REV_5762)
14185                         tg3_get_5720_nvram_info(tp);
14186                 else
14187                         tg3_get_nvram_info(tp);
14188
14189                 if (tp->nvram_size == 0)
14190                         tg3_get_nvram_size(tp);
14191
14192                 tg3_disable_nvram_access(tp);
14193                 tg3_nvram_unlock(tp);
14194
14195         } else {
14196                 tg3_flag_clear(tp, NVRAM);
14197                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14198
14199                 tg3_get_eeprom_size(tp);
14200         }
14201 }
14202
14203 struct subsys_tbl_ent {
14204         u16 subsys_vendor, subsys_devid;
14205         u32 phy_id;
14206 };
14207
14208 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14209         /* Broadcom boards. */
14210         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14211           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14212         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14213           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14214         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14215           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14216         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14217           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14218         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14219           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14220         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14221           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14222         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14223           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14224         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14225           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14226         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14227           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14228         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14229           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14230         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14231           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14232
14233         /* 3com boards. */
14234         { TG3PCI_SUBVENDOR_ID_3COM,
14235           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14236         { TG3PCI_SUBVENDOR_ID_3COM,
14237           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14238         { TG3PCI_SUBVENDOR_ID_3COM,
14239           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14240         { TG3PCI_SUBVENDOR_ID_3COM,
14241           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14242         { TG3PCI_SUBVENDOR_ID_3COM,
14243           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14244
14245         /* DELL boards. */
14246         { TG3PCI_SUBVENDOR_ID_DELL,
14247           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14248         { TG3PCI_SUBVENDOR_ID_DELL,
14249           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14250         { TG3PCI_SUBVENDOR_ID_DELL,
14251           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14252         { TG3PCI_SUBVENDOR_ID_DELL,
14253           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14254
14255         /* Compaq boards. */
14256         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14257           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14258         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14259           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14260         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14261           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14262         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14263           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14264         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14265           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14266
14267         /* IBM boards. */
14268         { TG3PCI_SUBVENDOR_ID_IBM,
14269           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14270 };
14271
14272 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14273 {
14274         int i;
14275
14276         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14277                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14278                      tp->pdev->subsystem_vendor) &&
14279                     (subsys_id_to_phy_id[i].subsys_devid ==
14280                      tp->pdev->subsystem_device))
14281                         return &subsys_id_to_phy_id[i];
14282         }
14283         return NULL;
14284 }
14285
14286 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14287 {
14288         u32 val;
14289
14290         tp->phy_id = TG3_PHY_ID_INVALID;
14291         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14292
14293         /* Assume an onboard device and WOL capable by default.  */
14294         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14295         tg3_flag_set(tp, WOL_CAP);
14296
14297         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14298                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14299                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14300                         tg3_flag_set(tp, IS_NIC);
14301                 }
14302                 val = tr32(VCPU_CFGSHDW);
14303                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14304                         tg3_flag_set(tp, ASPM_WORKAROUND);
14305                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14306                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14307                         tg3_flag_set(tp, WOL_ENABLE);
14308                         device_set_wakeup_enable(&tp->pdev->dev, true);
14309                 }
14310                 goto done;
14311         }
14312
14313         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14314         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14315                 u32 nic_cfg, led_cfg;
14316                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14317                 int eeprom_phy_serdes = 0;
14318
14319                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14320                 tp->nic_sram_data_cfg = nic_cfg;
14321
14322                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14323                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14324                 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14325                     tg3_asic_rev(tp) != ASIC_REV_5701 &&
14326                     tg3_asic_rev(tp) != ASIC_REV_5703 &&
14327                     (ver > 0) && (ver < 0x100))
14328                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14329
14330                 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14331                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14332
14333                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14334                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14335                         eeprom_phy_serdes = 1;
14336
14337                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14338                 if (nic_phy_id != 0) {
14339                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14340                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14341
14342                         eeprom_phy_id  = (id1 >> 16) << 10;
14343                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
14344                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
14345                 } else
14346                         eeprom_phy_id = 0;
14347
14348                 tp->phy_id = eeprom_phy_id;
14349                 if (eeprom_phy_serdes) {
14350                         if (!tg3_flag(tp, 5705_PLUS))
14351                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14352                         else
14353                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14354                 }
14355
14356                 if (tg3_flag(tp, 5750_PLUS))
14357                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14358                                     SHASTA_EXT_LED_MODE_MASK);
14359                 else
14360                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14361
14362                 switch (led_cfg) {
14363                 default:
14364                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14365                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14366                         break;
14367
14368                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14369                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14370                         break;
14371
14372                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14373                         tp->led_ctrl = LED_CTRL_MODE_MAC;
14374
14375                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14376                          * read on some older 5700/5701 bootcode.
14377                          */
14378                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14379                             tg3_asic_rev(tp) == ASIC_REV_5701)
14380                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14381
14382                         break;
14383
14384                 case SHASTA_EXT_LED_SHARED:
14385                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
14386                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14387                             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14388                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14389                                                  LED_CTRL_MODE_PHY_2);
14390                         break;
14391
14392                 case SHASTA_EXT_LED_MAC:
14393                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14394                         break;
14395
14396                 case SHASTA_EXT_LED_COMBO:
14397                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
14398                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14399                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14400                                                  LED_CTRL_MODE_PHY_2);
14401                         break;
14402
14403                 }
14404
14405                 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14406                      tg3_asic_rev(tp) == ASIC_REV_5701) &&
14407                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14408                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14409
14410                 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14411                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14412
14413                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14414                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14415                         if ((tp->pdev->subsystem_vendor ==
14416                              PCI_VENDOR_ID_ARIMA) &&
14417                             (tp->pdev->subsystem_device == 0x205a ||
14418                              tp->pdev->subsystem_device == 0x2063))
14419                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14420                 } else {
14421                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14422                         tg3_flag_set(tp, IS_NIC);
14423                 }
14424
14425                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14426                         tg3_flag_set(tp, ENABLE_ASF);
14427                         if (tg3_flag(tp, 5750_PLUS))
14428                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14429                 }
14430
14431                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14432                     tg3_flag(tp, 5750_PLUS))
14433                         tg3_flag_set(tp, ENABLE_APE);
14434
14435                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14436                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14437                         tg3_flag_clear(tp, WOL_CAP);
14438
14439                 if (tg3_flag(tp, WOL_CAP) &&
14440                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14441                         tg3_flag_set(tp, WOL_ENABLE);
14442                         device_set_wakeup_enable(&tp->pdev->dev, true);
14443                 }
14444
14445                 if (cfg2 & (1 << 17))
14446                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14447
14448                 /* serdes signal pre-emphasis in register 0x590 set by */
14449                 /* bootcode if bit 18 is set */
14450                 if (cfg2 & (1 << 18))
14451                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14452
14453                 if ((tg3_flag(tp, 57765_PLUS) ||
14454                      (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14455                       tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14456                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14457                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14458
14459                 if (tg3_flag(tp, PCI_EXPRESS) &&
14460                     tg3_asic_rev(tp) != ASIC_REV_5785 &&
14461                     !tg3_flag(tp, 57765_PLUS)) {
14462                         u32 cfg3;
14463
14464                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14465                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
14466                                 tg3_flag_set(tp, ASPM_WORKAROUND);
14467                 }
14468
14469                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14470                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14471                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14472                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14473                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14474                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14475         }
14476 done:
14477         if (tg3_flag(tp, WOL_CAP))
14478                 device_set_wakeup_enable(&tp->pdev->dev,
14479                                          tg3_flag(tp, WOL_ENABLE));
14480         else
14481                 device_set_wakeup_capable(&tp->pdev->dev, false);
14482 }
14483
14484 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14485 {
14486         int i, err;
14487         u32 val2, off = offset * 8;
14488
14489         err = tg3_nvram_lock(tp);
14490         if (err)
14491                 return err;
14492
14493         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14494         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14495                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14496         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14497         udelay(10);
14498
14499         for (i = 0; i < 100; i++) {
14500                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14501                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14502                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14503                         break;
14504                 }
14505                 udelay(10);
14506         }
14507
14508         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14509
14510         tg3_nvram_unlock(tp);
14511         if (val2 & APE_OTP_STATUS_CMD_DONE)
14512                 return 0;
14513
14514         return -EBUSY;
14515 }
14516
14517 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14518 {
14519         int i;
14520         u32 val;
14521
14522         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14523         tw32(OTP_CTRL, cmd);
14524
14525         /* Wait for up to 1 ms for command to execute. */
14526         for (i = 0; i < 100; i++) {
14527                 val = tr32(OTP_STATUS);
14528                 if (val & OTP_STATUS_CMD_DONE)
14529                         break;
14530                 udelay(10);
14531         }
14532
14533         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14534 }
14535
14536 /* Read the gphy configuration from the OTP region of the chip.  The gphy
14537  * configuration is a 32-bit value that straddles the alignment boundary.
14538  * We do two 32-bit reads and then shift and merge the results.
14539  */
14540 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14541 {
14542         u32 bhalf_otp, thalf_otp;
14543
14544         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14545
14546         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14547                 return 0;
14548
14549         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14550
14551         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14552                 return 0;
14553
14554         thalf_otp = tr32(OTP_READ_DATA);
14555
14556         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14557
14558         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14559                 return 0;
14560
14561         bhalf_otp = tr32(OTP_READ_DATA);
14562
14563         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14564 }
14565
14566 static void tg3_phy_init_link_config(struct tg3 *tp)
14567 {
14568         u32 adv = ADVERTISED_Autoneg;
14569
14570         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14571                 adv |= ADVERTISED_1000baseT_Half |
14572                        ADVERTISED_1000baseT_Full;
14573
14574         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14575                 adv |= ADVERTISED_100baseT_Half |
14576                        ADVERTISED_100baseT_Full |
14577                        ADVERTISED_10baseT_Half |
14578                        ADVERTISED_10baseT_Full |
14579                        ADVERTISED_TP;
14580         else
14581                 adv |= ADVERTISED_FIBRE;
14582
14583         tp->link_config.advertising = adv;
14584         tp->link_config.speed = SPEED_UNKNOWN;
14585         tp->link_config.duplex = DUPLEX_UNKNOWN;
14586         tp->link_config.autoneg = AUTONEG_ENABLE;
14587         tp->link_config.active_speed = SPEED_UNKNOWN;
14588         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14589
14590         tp->old_link = -1;
14591 }
14592
14593 static int tg3_phy_probe(struct tg3 *tp)
14594 {
14595         u32 hw_phy_id_1, hw_phy_id_2;
14596         u32 hw_phy_id, hw_phy_id_masked;
14597         int err;
14598
14599         /* flow control autonegotiation is default behavior */
14600         tg3_flag_set(tp, PAUSE_AUTONEG);
14601         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14602
14603         if (tg3_flag(tp, ENABLE_APE)) {
14604                 switch (tp->pci_fn) {
14605                 case 0:
14606                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14607                         break;
14608                 case 1:
14609                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14610                         break;
14611                 case 2:
14612                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14613                         break;
14614                 case 3:
14615                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14616                         break;
14617                 }
14618         }
14619
14620         if (tg3_flag(tp, USE_PHYLIB))
14621                 return tg3_phy_init(tp);
14622
14623         /* Reading the PHY ID register can conflict with ASF
14624          * firmware access to the PHY hardware.
14625          */
14626         err = 0;
14627         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14628                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14629         } else {
14630                 /* Now read the physical PHY_ID from the chip and verify
14631                  * that it is sane.  If it doesn't look good, we fall back
14632                  * to either the hard-coded table based PHY_ID and failing
14633                  * that the value found in the eeprom area.
14634                  */
14635                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
14636                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
14637
14638                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
14639                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
14640                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
14641
14642                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
14643         }
14644
14645         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
14646                 tp->phy_id = hw_phy_id;
14647                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
14648                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14649                 else
14650                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
14651         } else {
14652                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
14653                         /* Do nothing, phy ID already set up in
14654                          * tg3_get_eeprom_hw_cfg().
14655                          */
14656                 } else {
14657                         struct subsys_tbl_ent *p;
14658
14659                         /* No eeprom signature?  Try the hardcoded
14660                          * subsys device table.
14661                          */
14662                         p = tg3_lookup_by_subsys(tp);
14663                         if (p) {
14664                                 tp->phy_id = p->phy_id;
14665                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
14666                                 /* For now we saw the IDs 0xbc050cd0,
14667                                  * 0xbc050f80 and 0xbc050c30 on devices
14668                                  * connected to an BCM4785 and there are
14669                                  * probably more. Just assume that the phy is
14670                                  * supported when it is connected to a SSB core
14671                                  * for now.
14672                                  */
14673                                 return -ENODEV;
14674                         }
14675
14676                         if (!tp->phy_id ||
14677                             tp->phy_id == TG3_PHY_ID_BCM8002)
14678                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14679                 }
14680         }
14681
14682         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14683             (tg3_asic_rev(tp) == ASIC_REV_5719 ||
14684              tg3_asic_rev(tp) == ASIC_REV_5720 ||
14685              tg3_asic_rev(tp) == ASIC_REV_57766 ||
14686              tg3_asic_rev(tp) == ASIC_REV_5762 ||
14687              (tg3_asic_rev(tp) == ASIC_REV_5717 &&
14688               tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
14689              (tg3_asic_rev(tp) == ASIC_REV_57765 &&
14690               tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0)))
14691                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
14692
14693         tg3_phy_init_link_config(tp);
14694
14695         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14696             !tg3_flag(tp, ENABLE_APE) &&
14697             !tg3_flag(tp, ENABLE_ASF)) {
14698                 u32 bmsr, dummy;
14699
14700                 tg3_readphy(tp, MII_BMSR, &bmsr);
14701                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
14702                     (bmsr & BMSR_LSTATUS))
14703                         goto skip_phy_reset;
14704
14705                 err = tg3_phy_reset(tp);
14706                 if (err)
14707                         return err;
14708
14709                 tg3_phy_set_wirespeed(tp);
14710
14711                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
14712                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
14713                                             tp->link_config.flowctrl);
14714
14715                         tg3_writephy(tp, MII_BMCR,
14716                                      BMCR_ANENABLE | BMCR_ANRESTART);
14717                 }
14718         }
14719
14720 skip_phy_reset:
14721         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
14722                 err = tg3_init_5401phy_dsp(tp);
14723                 if (err)
14724                         return err;
14725
14726                 err = tg3_init_5401phy_dsp(tp);
14727         }
14728
14729         return err;
14730 }
14731
14732 static void tg3_read_vpd(struct tg3 *tp)
14733 {
14734         u8 *vpd_data;
14735         unsigned int block_end, rosize, len;
14736         u32 vpdlen;
14737         int j, i = 0;
14738
14739         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
14740         if (!vpd_data)
14741                 goto out_no_vpd;
14742
14743         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
14744         if (i < 0)
14745                 goto out_not_found;
14746
14747         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
14748         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
14749         i += PCI_VPD_LRDT_TAG_SIZE;
14750
14751         if (block_end > vpdlen)
14752                 goto out_not_found;
14753
14754         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14755                                       PCI_VPD_RO_KEYWORD_MFR_ID);
14756         if (j > 0) {
14757                 len = pci_vpd_info_field_size(&vpd_data[j]);
14758
14759                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14760                 if (j + len > block_end || len != 4 ||
14761                     memcmp(&vpd_data[j], "1028", 4))
14762                         goto partno;
14763
14764                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14765                                               PCI_VPD_RO_KEYWORD_VENDOR0);
14766                 if (j < 0)
14767                         goto partno;
14768
14769                 len = pci_vpd_info_field_size(&vpd_data[j]);
14770
14771                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14772                 if (j + len > block_end)
14773                         goto partno;
14774
14775                 if (len >= sizeof(tp->fw_ver))
14776                         len = sizeof(tp->fw_ver) - 1;
14777                 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
14778                 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
14779                          &vpd_data[j]);
14780         }
14781
14782 partno:
14783         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14784                                       PCI_VPD_RO_KEYWORD_PARTNO);
14785         if (i < 0)
14786                 goto out_not_found;
14787
14788         len = pci_vpd_info_field_size(&vpd_data[i]);
14789
14790         i += PCI_VPD_INFO_FLD_HDR_SIZE;
14791         if (len > TG3_BPN_SIZE ||
14792             (len + i) > vpdlen)
14793                 goto out_not_found;
14794
14795         memcpy(tp->board_part_number, &vpd_data[i], len);
14796
14797 out_not_found:
14798         kfree(vpd_data);
14799         if (tp->board_part_number[0])
14800                 return;
14801
14802 out_no_vpd:
14803         if (tg3_asic_rev(tp) == ASIC_REV_5717) {
14804                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14805                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
14806                         strcpy(tp->board_part_number, "BCM5717");
14807                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14808                         strcpy(tp->board_part_number, "BCM5718");
14809                 else
14810                         goto nomatch;
14811         } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
14812                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14813                         strcpy(tp->board_part_number, "BCM57780");
14814                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14815                         strcpy(tp->board_part_number, "BCM57760");
14816                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14817                         strcpy(tp->board_part_number, "BCM57790");
14818                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14819                         strcpy(tp->board_part_number, "BCM57788");
14820                 else
14821                         goto nomatch;
14822         } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
14823                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14824                         strcpy(tp->board_part_number, "BCM57761");
14825                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14826                         strcpy(tp->board_part_number, "BCM57765");
14827                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14828                         strcpy(tp->board_part_number, "BCM57781");
14829                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14830                         strcpy(tp->board_part_number, "BCM57785");
14831                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14832                         strcpy(tp->board_part_number, "BCM57791");
14833                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14834                         strcpy(tp->board_part_number, "BCM57795");
14835                 else
14836                         goto nomatch;
14837         } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
14838                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14839                         strcpy(tp->board_part_number, "BCM57762");
14840                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14841                         strcpy(tp->board_part_number, "BCM57766");
14842                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14843                         strcpy(tp->board_part_number, "BCM57782");
14844                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14845                         strcpy(tp->board_part_number, "BCM57786");
14846                 else
14847                         goto nomatch;
14848         } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14849                 strcpy(tp->board_part_number, "BCM95906");
14850         } else {
14851 nomatch:
14852                 strcpy(tp->board_part_number, "none");
14853         }
14854 }
14855
14856 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14857 {
14858         u32 val;
14859
14860         if (tg3_nvram_read(tp, offset, &val) ||
14861             (val & 0xfc000000) != 0x0c000000 ||
14862             tg3_nvram_read(tp, offset + 4, &val) ||
14863             val != 0)
14864                 return 0;
14865
14866         return 1;
14867 }
14868
14869 static void tg3_read_bc_ver(struct tg3 *tp)
14870 {
14871         u32 val, offset, start, ver_offset;
14872         int i, dst_off;
14873         bool newver = false;
14874
14875         if (tg3_nvram_read(tp, 0xc, &offset) ||
14876             tg3_nvram_read(tp, 0x4, &start))
14877                 return;
14878
14879         offset = tg3_nvram_logical_addr(tp, offset);
14880
14881         if (tg3_nvram_read(tp, offset, &val))
14882                 return;
14883
14884         if ((val & 0xfc000000) == 0x0c000000) {
14885                 if (tg3_nvram_read(tp, offset + 4, &val))
14886                         return;
14887
14888                 if (val == 0)
14889                         newver = true;
14890         }
14891
14892         dst_off = strlen(tp->fw_ver);
14893
14894         if (newver) {
14895                 if (TG3_VER_SIZE - dst_off < 16 ||
14896                     tg3_nvram_read(tp, offset + 8, &ver_offset))
14897                         return;
14898
14899                 offset = offset + ver_offset - start;
14900                 for (i = 0; i < 16; i += 4) {
14901                         __be32 v;
14902                         if (tg3_nvram_read_be32(tp, offset + i, &v))
14903                                 return;
14904
14905                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
14906                 }
14907         } else {
14908                 u32 major, minor;
14909
14910                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14911                         return;
14912
14913                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14914                         TG3_NVM_BCVER_MAJSFT;
14915                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
14916                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14917                          "v%d.%02d", major, minor);
14918         }
14919 }
14920
14921 static void tg3_read_hwsb_ver(struct tg3 *tp)
14922 {
14923         u32 val, major, minor;
14924
14925         /* Use native endian representation */
14926         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14927                 return;
14928
14929         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14930                 TG3_NVM_HWSB_CFG1_MAJSFT;
14931         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14932                 TG3_NVM_HWSB_CFG1_MINSFT;
14933
14934         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14935 }
14936
14937 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
14938 {
14939         u32 offset, major, minor, build;
14940
14941         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
14942
14943         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14944                 return;
14945
14946         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14947         case TG3_EEPROM_SB_REVISION_0:
14948                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14949                 break;
14950         case TG3_EEPROM_SB_REVISION_2:
14951                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14952                 break;
14953         case TG3_EEPROM_SB_REVISION_3:
14954                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14955                 break;
14956         case TG3_EEPROM_SB_REVISION_4:
14957                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14958                 break;
14959         case TG3_EEPROM_SB_REVISION_5:
14960                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14961                 break;
14962         case TG3_EEPROM_SB_REVISION_6:
14963                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14964                 break;
14965         default:
14966                 return;
14967         }
14968
14969         if (tg3_nvram_read(tp, offset, &val))
14970                 return;
14971
14972         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14973                 TG3_EEPROM_SB_EDH_BLD_SHFT;
14974         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14975                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
14976         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
14977
14978         if (minor > 99 || build > 26)
14979                 return;
14980
14981         offset = strlen(tp->fw_ver);
14982         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14983                  " v%d.%02d", major, minor);
14984
14985         if (build > 0) {
14986                 offset = strlen(tp->fw_ver);
14987                 if (offset < TG3_VER_SIZE - 1)
14988                         tp->fw_ver[offset] = 'a' + build - 1;
14989         }
14990 }
14991
14992 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
14993 {
14994         u32 val, offset, start;
14995         int i, vlen;
14996
14997         for (offset = TG3_NVM_DIR_START;
14998              offset < TG3_NVM_DIR_END;
14999              offset += TG3_NVM_DIRENT_SIZE) {
15000                 if (tg3_nvram_read(tp, offset, &val))
15001                         return;
15002
15003                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15004                         break;
15005         }
15006
15007         if (offset == TG3_NVM_DIR_END)
15008                 return;
15009
15010         if (!tg3_flag(tp, 5705_PLUS))
15011                 start = 0x08000000;
15012         else if (tg3_nvram_read(tp, offset - 4, &start))
15013                 return;
15014
15015         if (tg3_nvram_read(tp, offset + 4, &offset) ||
15016             !tg3_fw_img_is_valid(tp, offset) ||
15017             tg3_nvram_read(tp, offset + 8, &val))
15018                 return;
15019
15020         offset += val - start;
15021
15022         vlen = strlen(tp->fw_ver);
15023
15024         tp->fw_ver[vlen++] = ',';
15025         tp->fw_ver[vlen++] = ' ';
15026
15027         for (i = 0; i < 4; i++) {
15028                 __be32 v;
15029                 if (tg3_nvram_read_be32(tp, offset, &v))
15030                         return;
15031
15032                 offset += sizeof(v);
15033
15034                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15035                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15036                         break;
15037                 }
15038
15039                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15040                 vlen += sizeof(v);
15041         }
15042 }
15043
15044 static void tg3_probe_ncsi(struct tg3 *tp)
15045 {
15046         u32 apedata;
15047
15048         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15049         if (apedata != APE_SEG_SIG_MAGIC)
15050                 return;
15051
15052         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15053         if (!(apedata & APE_FW_STATUS_READY))
15054                 return;
15055
15056         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15057                 tg3_flag_set(tp, APE_HAS_NCSI);
15058 }
15059
15060 static void tg3_read_dash_ver(struct tg3 *tp)
15061 {
15062         int vlen;
15063         u32 apedata;
15064         char *fwtype;
15065
15066         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15067
15068         if (tg3_flag(tp, APE_HAS_NCSI))
15069                 fwtype = "NCSI";
15070         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15071                 fwtype = "SMASH";
15072         else
15073                 fwtype = "DASH";
15074
15075         vlen = strlen(tp->fw_ver);
15076
15077         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15078                  fwtype,
15079                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15080                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15081                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15082                  (apedata & APE_FW_VERSION_BLDMSK));
15083 }
15084
15085 static void tg3_read_otp_ver(struct tg3 *tp)
15086 {
15087         u32 val, val2;
15088
15089         if (tg3_asic_rev(tp) != ASIC_REV_5762)
15090                 return;
15091
15092         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15093             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15094             TG3_OTP_MAGIC0_VALID(val)) {
15095                 u64 val64 = (u64) val << 32 | val2;
15096                 u32 ver = 0;
15097                 int i, vlen;
15098
15099                 for (i = 0; i < 7; i++) {
15100                         if ((val64 & 0xff) == 0)
15101                                 break;
15102                         ver = val64 & 0xff;
15103                         val64 >>= 8;
15104                 }
15105                 vlen = strlen(tp->fw_ver);
15106                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15107         }
15108 }
15109
15110 static void tg3_read_fw_ver(struct tg3 *tp)
15111 {
15112         u32 val;
15113         bool vpd_vers = false;
15114
15115         if (tp->fw_ver[0] != 0)
15116                 vpd_vers = true;
15117
15118         if (tg3_flag(tp, NO_NVRAM)) {
15119                 strcat(tp->fw_ver, "sb");
15120                 tg3_read_otp_ver(tp);
15121                 return;
15122         }
15123
15124         if (tg3_nvram_read(tp, 0, &val))
15125                 return;
15126
15127         if (val == TG3_EEPROM_MAGIC)
15128                 tg3_read_bc_ver(tp);
15129         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15130                 tg3_read_sb_ver(tp, val);
15131         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15132                 tg3_read_hwsb_ver(tp);
15133
15134         if (tg3_flag(tp, ENABLE_ASF)) {
15135                 if (tg3_flag(tp, ENABLE_APE)) {
15136                         tg3_probe_ncsi(tp);
15137                         if (!vpd_vers)
15138                                 tg3_read_dash_ver(tp);
15139                 } else if (!vpd_vers) {
15140                         tg3_read_mgmtfw_ver(tp);
15141                 }
15142         }
15143
15144         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15145 }
15146
15147 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15148 {
15149         if (tg3_flag(tp, LRG_PROD_RING_CAP))
15150                 return TG3_RX_RET_MAX_SIZE_5717;
15151         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15152                 return TG3_RX_RET_MAX_SIZE_5700;
15153         else
15154                 return TG3_RX_RET_MAX_SIZE_5705;
15155 }
15156
15157 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15158         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15159         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15160         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15161         { },
15162 };
15163
15164 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15165 {
15166         struct pci_dev *peer;
15167         unsigned int func, devnr = tp->pdev->devfn & ~7;
15168
15169         for (func = 0; func < 8; func++) {
15170                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15171                 if (peer && peer != tp->pdev)
15172                         break;
15173                 pci_dev_put(peer);
15174         }
15175         /* 5704 can be configured in single-port mode, set peer to
15176          * tp->pdev in that case.
15177          */
15178         if (!peer) {
15179                 peer = tp->pdev;
15180                 return peer;
15181         }
15182
15183         /*
15184          * We don't need to keep the refcount elevated; there's no way
15185          * to remove one half of this device without removing the other
15186          */
15187         pci_dev_put(peer);
15188
15189         return peer;
15190 }
15191
15192 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15193 {
15194         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15195         if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15196                 u32 reg;
15197
15198                 /* All devices that use the alternate
15199                  * ASIC REV location have a CPMU.
15200                  */
15201                 tg3_flag_set(tp, CPMU_PRESENT);
15202
15203                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15204                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15205                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15206                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15207                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15208                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15209                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15210                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15211                         reg = TG3PCI_GEN2_PRODID_ASICREV;
15212                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15213                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15214                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15215                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15216                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15217                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15218                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15219                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15220                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15221                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15222                         reg = TG3PCI_GEN15_PRODID_ASICREV;
15223                 else
15224                         reg = TG3PCI_PRODID_ASICREV;
15225
15226                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15227         }
15228
15229         /* Wrong chip ID in 5752 A0. This code can be removed later
15230          * as A0 is not in production.
15231          */
15232         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15233                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15234
15235         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15236                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15237
15238         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15239             tg3_asic_rev(tp) == ASIC_REV_5719 ||
15240             tg3_asic_rev(tp) == ASIC_REV_5720)
15241                 tg3_flag_set(tp, 5717_PLUS);
15242
15243         if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15244             tg3_asic_rev(tp) == ASIC_REV_57766)
15245                 tg3_flag_set(tp, 57765_CLASS);
15246
15247         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15248              tg3_asic_rev(tp) == ASIC_REV_5762)
15249                 tg3_flag_set(tp, 57765_PLUS);
15250
15251         /* Intentionally exclude ASIC_REV_5906 */
15252         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15253             tg3_asic_rev(tp) == ASIC_REV_5787 ||
15254             tg3_asic_rev(tp) == ASIC_REV_5784 ||
15255             tg3_asic_rev(tp) == ASIC_REV_5761 ||
15256             tg3_asic_rev(tp) == ASIC_REV_5785 ||
15257             tg3_asic_rev(tp) == ASIC_REV_57780 ||
15258             tg3_flag(tp, 57765_PLUS))
15259                 tg3_flag_set(tp, 5755_PLUS);
15260
15261         if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15262             tg3_asic_rev(tp) == ASIC_REV_5714)
15263                 tg3_flag_set(tp, 5780_CLASS);
15264
15265         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15266             tg3_asic_rev(tp) == ASIC_REV_5752 ||
15267             tg3_asic_rev(tp) == ASIC_REV_5906 ||
15268             tg3_flag(tp, 5755_PLUS) ||
15269             tg3_flag(tp, 5780_CLASS))
15270                 tg3_flag_set(tp, 5750_PLUS);
15271
15272         if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15273             tg3_flag(tp, 5750_PLUS))
15274                 tg3_flag_set(tp, 5705_PLUS);
15275 }
15276
15277 static bool tg3_10_100_only_device(struct tg3 *tp,
15278                                    const struct pci_device_id *ent)
15279 {
15280         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15281
15282         if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15283              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15284             (tp->phy_flags & TG3_PHYFLG_IS_FET))
15285                 return true;
15286
15287         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15288                 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15289                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15290                                 return true;
15291                 } else {
15292                         return true;
15293                 }
15294         }
15295
15296         return false;
15297 }
15298
15299 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15300 {
15301         u32 misc_ctrl_reg;
15302         u32 pci_state_reg, grc_misc_cfg;
15303         u32 val;
15304         u16 pci_cmd;
15305         int err;
15306
15307         /* Force memory write invalidate off.  If we leave it on,
15308          * then on 5700_BX chips we have to enable a workaround.
15309          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15310          * to match the cacheline size.  The Broadcom driver have this
15311          * workaround but turns MWI off all the times so never uses
15312          * it.  This seems to suggest that the workaround is insufficient.
15313          */
15314         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15315         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15316         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15317
15318         /* Important! -- Make sure register accesses are byteswapped
15319          * correctly.  Also, for those chips that require it, make
15320          * sure that indirect register accesses are enabled before
15321          * the first operation.
15322          */
15323         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15324                               &misc_ctrl_reg);
15325         tp->misc_host_ctrl |= (misc_ctrl_reg &
15326                                MISC_HOST_CTRL_CHIPREV);
15327         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15328                                tp->misc_host_ctrl);
15329
15330         tg3_detect_asic_rev(tp, misc_ctrl_reg);
15331
15332         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15333          * we need to disable memory and use config. cycles
15334          * only to access all registers. The 5702/03 chips
15335          * can mistakenly decode the special cycles from the
15336          * ICH chipsets as memory write cycles, causing corruption
15337          * of register and memory space. Only certain ICH bridges
15338          * will drive special cycles with non-zero data during the
15339          * address phase which can fall within the 5703's address
15340          * range. This is not an ICH bug as the PCI spec allows
15341          * non-zero address during special cycles. However, only
15342          * these ICH bridges are known to drive non-zero addresses
15343          * during special cycles.
15344          *
15345          * Since special cycles do not cross PCI bridges, we only
15346          * enable this workaround if the 5703 is on the secondary
15347          * bus of these ICH bridges.
15348          */
15349         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15350             (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15351                 static struct tg3_dev_id {
15352                         u32     vendor;
15353                         u32     device;
15354                         u32     rev;
15355                 } ich_chipsets[] = {
15356                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15357                           PCI_ANY_ID },
15358                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15359                           PCI_ANY_ID },
15360                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15361                           0xa },
15362                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15363                           PCI_ANY_ID },
15364                         { },
15365                 };
15366                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15367                 struct pci_dev *bridge = NULL;
15368
15369                 while (pci_id->vendor != 0) {
15370                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
15371                                                 bridge);
15372                         if (!bridge) {
15373                                 pci_id++;
15374                                 continue;
15375                         }
15376                         if (pci_id->rev != PCI_ANY_ID) {
15377                                 if (bridge->revision > pci_id->rev)
15378                                         continue;
15379                         }
15380                         if (bridge->subordinate &&
15381                             (bridge->subordinate->number ==
15382                              tp->pdev->bus->number)) {
15383                                 tg3_flag_set(tp, ICH_WORKAROUND);
15384                                 pci_dev_put(bridge);
15385                                 break;
15386                         }
15387                 }
15388         }
15389
15390         if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15391                 static struct tg3_dev_id {
15392                         u32     vendor;
15393                         u32     device;
15394                 } bridge_chipsets[] = {
15395                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15396                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15397                         { },
15398                 };
15399                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15400                 struct pci_dev *bridge = NULL;
15401
15402                 while (pci_id->vendor != 0) {
15403                         bridge = pci_get_device(pci_id->vendor,
15404                                                 pci_id->device,
15405                                                 bridge);
15406                         if (!bridge) {
15407                                 pci_id++;
15408                                 continue;
15409                         }
15410                         if (bridge->subordinate &&
15411                             (bridge->subordinate->number <=
15412                              tp->pdev->bus->number) &&
15413                             (bridge->subordinate->busn_res.end >=
15414                              tp->pdev->bus->number)) {
15415                                 tg3_flag_set(tp, 5701_DMA_BUG);
15416                                 pci_dev_put(bridge);
15417                                 break;
15418                         }
15419                 }
15420         }
15421
15422         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15423          * DMA addresses > 40-bit. This bridge may have other additional
15424          * 57xx devices behind it in some 4-port NIC designs for example.
15425          * Any tg3 device found behind the bridge will also need the 40-bit
15426          * DMA workaround.
15427          */
15428         if (tg3_flag(tp, 5780_CLASS)) {
15429                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15430                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15431         } else {
15432                 struct pci_dev *bridge = NULL;
15433
15434                 do {
15435                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15436                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
15437                                                 bridge);
15438                         if (bridge && bridge->subordinate &&
15439                             (bridge->subordinate->number <=
15440                              tp->pdev->bus->number) &&
15441                             (bridge->subordinate->busn_res.end >=
15442                              tp->pdev->bus->number)) {
15443                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15444                                 pci_dev_put(bridge);
15445                                 break;
15446                         }
15447                 } while (bridge);
15448         }
15449
15450         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15451             tg3_asic_rev(tp) == ASIC_REV_5714)
15452                 tp->pdev_peer = tg3_find_peer(tp);
15453
15454         /* Determine TSO capabilities */
15455         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
15456                 ; /* Do nothing. HW bug. */
15457         else if (tg3_flag(tp, 57765_PLUS))
15458                 tg3_flag_set(tp, HW_TSO_3);
15459         else if (tg3_flag(tp, 5755_PLUS) ||
15460                  tg3_asic_rev(tp) == ASIC_REV_5906)
15461                 tg3_flag_set(tp, HW_TSO_2);
15462         else if (tg3_flag(tp, 5750_PLUS)) {
15463                 tg3_flag_set(tp, HW_TSO_1);
15464                 tg3_flag_set(tp, TSO_BUG);
15465                 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
15466                     tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
15467                         tg3_flag_clear(tp, TSO_BUG);
15468         } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15469                    tg3_asic_rev(tp) != ASIC_REV_5701 &&
15470                    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
15471                 tg3_flag_set(tp, FW_TSO);
15472                 tg3_flag_set(tp, TSO_BUG);
15473                 if (tg3_asic_rev(tp) == ASIC_REV_5705)
15474                         tp->fw_needed = FIRMWARE_TG3TSO5;
15475                 else
15476                         tp->fw_needed = FIRMWARE_TG3TSO;
15477         }
15478
15479         /* Selectively allow TSO based on operating conditions */
15480         if (tg3_flag(tp, HW_TSO_1) ||
15481             tg3_flag(tp, HW_TSO_2) ||
15482             tg3_flag(tp, HW_TSO_3) ||
15483             tg3_flag(tp, FW_TSO)) {
15484                 /* For firmware TSO, assume ASF is disabled.
15485                  * We'll disable TSO later if we discover ASF
15486                  * is enabled in tg3_get_eeprom_hw_cfg().
15487                  */
15488                 tg3_flag_set(tp, TSO_CAPABLE);
15489         } else {
15490                 tg3_flag_clear(tp, TSO_CAPABLE);
15491                 tg3_flag_clear(tp, TSO_BUG);
15492                 tp->fw_needed = NULL;
15493         }
15494
15495         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
15496                 tp->fw_needed = FIRMWARE_TG3;
15497
15498         if (tg3_asic_rev(tp) == ASIC_REV_57766)
15499                 tp->fw_needed = FIRMWARE_TG357766;
15500
15501         tp->irq_max = 1;
15502
15503         if (tg3_flag(tp, 5750_PLUS)) {
15504                 tg3_flag_set(tp, SUPPORT_MSI);
15505                 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
15506                     tg3_chip_rev(tp) == CHIPREV_5750_BX ||
15507                     (tg3_asic_rev(tp) == ASIC_REV_5714 &&
15508                      tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
15509                      tp->pdev_peer == tp->pdev))
15510                         tg3_flag_clear(tp, SUPPORT_MSI);
15511
15512                 if (tg3_flag(tp, 5755_PLUS) ||
15513                     tg3_asic_rev(tp) == ASIC_REV_5906) {
15514                         tg3_flag_set(tp, 1SHOT_MSI);
15515                 }
15516
15517                 if (tg3_flag(tp, 57765_PLUS)) {
15518                         tg3_flag_set(tp, SUPPORT_MSIX);
15519                         tp->irq_max = TG3_IRQ_MAX_VECS;
15520                 }
15521         }
15522
15523         tp->txq_max = 1;
15524         tp->rxq_max = 1;
15525         if (tp->irq_max > 1) {
15526                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15527                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15528
15529                 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15530                     tg3_asic_rev(tp) == ASIC_REV_5720)
15531                         tp->txq_max = tp->irq_max - 1;
15532         }
15533
15534         if (tg3_flag(tp, 5755_PLUS) ||
15535             tg3_asic_rev(tp) == ASIC_REV_5906)
15536                 tg3_flag_set(tp, SHORT_DMA_BUG);
15537
15538         if (tg3_asic_rev(tp) == ASIC_REV_5719)
15539                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15540
15541         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15542             tg3_asic_rev(tp) == ASIC_REV_5719 ||
15543             tg3_asic_rev(tp) == ASIC_REV_5720 ||
15544             tg3_asic_rev(tp) == ASIC_REV_5762)
15545                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15546
15547         if (tg3_flag(tp, 57765_PLUS) &&
15548             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
15549                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15550
15551         if (!tg3_flag(tp, 5705_PLUS) ||
15552             tg3_flag(tp, 5780_CLASS) ||
15553             tg3_flag(tp, USE_JUMBO_BDFLAG))
15554                 tg3_flag_set(tp, JUMBO_CAPABLE);
15555
15556         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15557                               &pci_state_reg);
15558
15559         if (pci_is_pcie(tp->pdev)) {
15560                 u16 lnkctl;
15561
15562                 tg3_flag_set(tp, PCI_EXPRESS);
15563
15564                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15565                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15566                         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15567                                 tg3_flag_clear(tp, HW_TSO_2);
15568                                 tg3_flag_clear(tp, TSO_CAPABLE);
15569                         }
15570                         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
15571                             tg3_asic_rev(tp) == ASIC_REV_5761 ||
15572                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
15573                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
15574                                 tg3_flag_set(tp, CLKREQ_BUG);
15575                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
15576                         tg3_flag_set(tp, L1PLLPD_EN);
15577                 }
15578         } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
15579                 /* BCM5785 devices are effectively PCIe devices, and should
15580                  * follow PCIe codepaths, but do not have a PCIe capabilities
15581                  * section.
15582                  */
15583                 tg3_flag_set(tp, PCI_EXPRESS);
15584         } else if (!tg3_flag(tp, 5705_PLUS) ||
15585                    tg3_flag(tp, 5780_CLASS)) {
15586                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15587                 if (!tp->pcix_cap) {
15588                         dev_err(&tp->pdev->dev,
15589                                 "Cannot find PCI-X capability, aborting\n");
15590                         return -EIO;
15591                 }
15592
15593                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15594                         tg3_flag_set(tp, PCIX_MODE);
15595         }
15596
15597         /* If we have an AMD 762 or VIA K8T800 chipset, write
15598          * reordering to the mailbox registers done by the host
15599          * controller can cause major troubles.  We read back from
15600          * every mailbox register write to force the writes to be
15601          * posted to the chip in order.
15602          */
15603         if (pci_dev_present(tg3_write_reorder_chipsets) &&
15604             !tg3_flag(tp, PCI_EXPRESS))
15605                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
15606
15607         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15608                              &tp->pci_cacheline_sz);
15609         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15610                              &tp->pci_lat_timer);
15611         if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
15612             tp->pci_lat_timer < 64) {
15613                 tp->pci_lat_timer = 64;
15614                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15615                                       tp->pci_lat_timer);
15616         }
15617
15618         /* Important! -- It is critical that the PCI-X hw workaround
15619          * situation is decided before the first MMIO register access.
15620          */
15621         if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
15622                 /* 5700 BX chips need to have their TX producer index
15623                  * mailboxes written twice to workaround a bug.
15624                  */
15625                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
15626
15627                 /* If we are in PCI-X mode, enable register write workaround.
15628                  *
15629                  * The workaround is to use indirect register accesses
15630                  * for all chip writes not to mailbox registers.
15631                  */
15632                 if (tg3_flag(tp, PCIX_MODE)) {
15633                         u32 pm_reg;
15634
15635                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15636
15637                         /* The chip can have it's power management PCI config
15638                          * space registers clobbered due to this bug.
15639                          * So explicitly force the chip into D0 here.
15640                          */
15641                         pci_read_config_dword(tp->pdev,
15642                                               tp->pm_cap + PCI_PM_CTRL,
15643                                               &pm_reg);
15644                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
15645                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
15646                         pci_write_config_dword(tp->pdev,
15647                                                tp->pm_cap + PCI_PM_CTRL,
15648                                                pm_reg);
15649
15650                         /* Also, force SERR#/PERR# in PCI command. */
15651                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15652                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
15653                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15654                 }
15655         }
15656
15657         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
15658                 tg3_flag_set(tp, PCI_HIGH_SPEED);
15659         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
15660                 tg3_flag_set(tp, PCI_32BIT);
15661
15662         /* Chip-specific fixup from Broadcom driver */
15663         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
15664             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
15665                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
15666                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
15667         }
15668
15669         /* Default fast path register access methods */
15670         tp->read32 = tg3_read32;
15671         tp->write32 = tg3_write32;
15672         tp->read32_mbox = tg3_read32;
15673         tp->write32_mbox = tg3_write32;
15674         tp->write32_tx_mbox = tg3_write32;
15675         tp->write32_rx_mbox = tg3_write32;
15676
15677         /* Various workaround register access methods */
15678         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
15679                 tp->write32 = tg3_write_indirect_reg32;
15680         else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
15681                  (tg3_flag(tp, PCI_EXPRESS) &&
15682                   tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
15683                 /*
15684                  * Back to back register writes can cause problems on these
15685                  * chips, the workaround is to read back all reg writes
15686                  * except those to mailbox regs.
15687                  *
15688                  * See tg3_write_indirect_reg32().
15689                  */
15690                 tp->write32 = tg3_write_flush_reg32;
15691         }
15692
15693         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
15694                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
15695                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
15696                         tp->write32_rx_mbox = tg3_write_flush_reg32;
15697         }
15698
15699         if (tg3_flag(tp, ICH_WORKAROUND)) {
15700                 tp->read32 = tg3_read_indirect_reg32;
15701                 tp->write32 = tg3_write_indirect_reg32;
15702                 tp->read32_mbox = tg3_read_indirect_mbox;
15703                 tp->write32_mbox = tg3_write_indirect_mbox;
15704                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
15705                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
15706
15707                 iounmap(tp->regs);
15708                 tp->regs = NULL;
15709
15710                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15711                 pci_cmd &= ~PCI_COMMAND_MEMORY;
15712                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15713         }
15714         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15715                 tp->read32_mbox = tg3_read32_mbox_5906;
15716                 tp->write32_mbox = tg3_write32_mbox_5906;
15717                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
15718                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
15719         }
15720
15721         if (tp->write32 == tg3_write_indirect_reg32 ||
15722             (tg3_flag(tp, PCIX_MODE) &&
15723              (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15724               tg3_asic_rev(tp) == ASIC_REV_5701)))
15725                 tg3_flag_set(tp, SRAM_USE_CONFIG);
15726
15727         /* The memory arbiter has to be enabled in order for SRAM accesses
15728          * to succeed.  Normally on powerup the tg3 chip firmware will make
15729          * sure it is enabled, but other entities such as system netboot
15730          * code might disable it.
15731          */
15732         val = tr32(MEMARB_MODE);
15733         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
15734
15735         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
15736         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15737             tg3_flag(tp, 5780_CLASS)) {
15738                 if (tg3_flag(tp, PCIX_MODE)) {
15739                         pci_read_config_dword(tp->pdev,
15740                                               tp->pcix_cap + PCI_X_STATUS,
15741                                               &val);
15742                         tp->pci_fn = val & 0x7;
15743                 }
15744         } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15745                    tg3_asic_rev(tp) == ASIC_REV_5719 ||
15746                    tg3_asic_rev(tp) == ASIC_REV_5720) {
15747                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
15748                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
15749                         val = tr32(TG3_CPMU_STATUS);
15750
15751                 if (tg3_asic_rev(tp) == ASIC_REV_5717)
15752                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
15753                 else
15754                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
15755                                      TG3_CPMU_STATUS_FSHFT_5719;
15756         }
15757
15758         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
15759                 tp->write32_tx_mbox = tg3_write_flush_reg32;
15760                 tp->write32_rx_mbox = tg3_write_flush_reg32;
15761         }
15762
15763         /* Get eeprom hw config before calling tg3_set_power_state().
15764          * In particular, the TG3_FLAG_IS_NIC flag must be
15765          * determined before calling tg3_set_power_state() so that
15766          * we know whether or not to switch out of Vaux power.
15767          * When the flag is set, it means that GPIO1 is used for eeprom
15768          * write protect and also implies that it is a LOM where GPIOs
15769          * are not used to switch power.
15770          */
15771         tg3_get_eeprom_hw_cfg(tp);
15772
15773         if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
15774                 tg3_flag_clear(tp, TSO_CAPABLE);
15775                 tg3_flag_clear(tp, TSO_BUG);
15776                 tp->fw_needed = NULL;
15777         }
15778
15779         if (tg3_flag(tp, ENABLE_APE)) {
15780                 /* Allow reads and writes to the
15781                  * APE register and memory space.
15782                  */
15783                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
15784                                  PCISTATE_ALLOW_APE_SHMEM_WR |
15785                                  PCISTATE_ALLOW_APE_PSPACE_WR;
15786                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
15787                                        pci_state_reg);
15788
15789                 tg3_ape_lock_init(tp);
15790         }
15791
15792         /* Set up tp->grc_local_ctrl before calling
15793          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
15794          * will bring 5700's external PHY out of reset.
15795          * It is also used as eeprom write protect on LOMs.
15796          */
15797         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
15798         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15799             tg3_flag(tp, EEPROM_WRITE_PROT))
15800                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
15801                                        GRC_LCLCTRL_GPIO_OUTPUT1);
15802         /* Unused GPIO3 must be driven as output on 5752 because there
15803          * are no pull-up resistors on unused GPIO pins.
15804          */
15805         else if (tg3_asic_rev(tp) == ASIC_REV_5752)
15806                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
15807
15808         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15809             tg3_asic_rev(tp) == ASIC_REV_57780 ||
15810             tg3_flag(tp, 57765_CLASS))
15811                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15812
15813         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15814             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
15815                 /* Turn off the debug UART. */
15816                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15817                 if (tg3_flag(tp, IS_NIC))
15818                         /* Keep VMain power. */
15819                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
15820                                               GRC_LCLCTRL_GPIO_OUTPUT0;
15821         }
15822
15823         if (tg3_asic_rev(tp) == ASIC_REV_5762)
15824                 tp->grc_local_ctrl |=
15825                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
15826
15827         /* Switch out of Vaux if it is a NIC */
15828         tg3_pwrsrc_switch_to_vmain(tp);
15829
15830         /* Derive initial jumbo mode from MTU assigned in
15831          * ether_setup() via the alloc_etherdev() call
15832          */
15833         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
15834                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
15835
15836         /* Determine WakeOnLan speed to use. */
15837         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15838             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
15839             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
15840             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
15841                 tg3_flag_clear(tp, WOL_SPEED_100MB);
15842         } else {
15843                 tg3_flag_set(tp, WOL_SPEED_100MB);
15844         }
15845
15846         if (tg3_asic_rev(tp) == ASIC_REV_5906)
15847                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
15848
15849         /* A few boards don't want Ethernet@WireSpeed phy feature */
15850         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15851             (tg3_asic_rev(tp) == ASIC_REV_5705 &&
15852              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
15853              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
15854             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15855             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15856                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
15857
15858         if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
15859             tg3_chip_rev(tp) == CHIPREV_5704_AX)
15860                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
15861         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
15862                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
15863
15864         if (tg3_flag(tp, 5705_PLUS) &&
15865             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
15866             tg3_asic_rev(tp) != ASIC_REV_5785 &&
15867             tg3_asic_rev(tp) != ASIC_REV_57780 &&
15868             !tg3_flag(tp, 57765_PLUS)) {
15869                 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15870                     tg3_asic_rev(tp) == ASIC_REV_5787 ||
15871                     tg3_asic_rev(tp) == ASIC_REV_5784 ||
15872                     tg3_asic_rev(tp) == ASIC_REV_5761) {
15873                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15874                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
15875                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
15876                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
15877                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
15878                 } else
15879                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
15880         }
15881
15882         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15883             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
15884                 tp->phy_otp = tg3_read_otp_phycfg(tp);
15885                 if (tp->phy_otp == 0)
15886                         tp->phy_otp = TG3_OTP_DEFAULT;
15887         }
15888
15889         if (tg3_flag(tp, CPMU_PRESENT))
15890                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15891         else
15892                 tp->mi_mode = MAC_MI_MODE_BASE;
15893
15894         tp->coalesce_mode = 0;
15895         if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
15896             tg3_chip_rev(tp) != CHIPREV_5700_BX)
15897                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15898
15899         /* Set these bits to enable statistics workaround. */
15900         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15901             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
15902             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
15903                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15904                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15905         }
15906
15907         if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
15908             tg3_asic_rev(tp) == ASIC_REV_57780)
15909                 tg3_flag_set(tp, USE_PHYLIB);
15910
15911         err = tg3_mdio_init(tp);
15912         if (err)
15913                 return err;
15914
15915         /* Initialize data/descriptor byte/word swapping. */
15916         val = tr32(GRC_MODE);
15917         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15918             tg3_asic_rev(tp) == ASIC_REV_5762)
15919                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15920                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
15921                         GRC_MODE_B2HRX_ENABLE |
15922                         GRC_MODE_HTX2B_ENABLE |
15923                         GRC_MODE_HOST_STACKUP);
15924         else
15925                 val &= GRC_MODE_HOST_STACKUP;
15926
15927         tw32(GRC_MODE, val | tp->grc_mode);
15928
15929         tg3_switch_clocks(tp);
15930
15931         /* Clear this out for sanity. */
15932         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15933
15934         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15935                               &pci_state_reg);
15936         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
15937             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
15938                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
15939                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
15940                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
15941                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
15942                         void __iomem *sram_base;
15943
15944                         /* Write some dummy words into the SRAM status block
15945                          * area, see if it reads back correctly.  If the return
15946                          * value is bad, force enable the PCIX workaround.
15947                          */
15948                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15949
15950                         writel(0x00000000, sram_base);
15951                         writel(0x00000000, sram_base + 4);
15952                         writel(0xffffffff, sram_base + 4);
15953                         if (readl(sram_base) != 0x00000000)
15954                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15955                 }
15956         }
15957
15958         udelay(50);
15959         tg3_nvram_init(tp);
15960
15961         /* If the device has an NVRAM, no need to load patch firmware */
15962         if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
15963             !tg3_flag(tp, NO_NVRAM))
15964                 tp->fw_needed = NULL;
15965
15966         grc_misc_cfg = tr32(GRC_MISC_CFG);
15967         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15968
15969         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
15970             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
15971              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
15972                 tg3_flag_set(tp, IS_5788);
15973
15974         if (!tg3_flag(tp, IS_5788) &&
15975             tg3_asic_rev(tp) != ASIC_REV_5700)
15976                 tg3_flag_set(tp, TAGGED_STATUS);
15977         if (tg3_flag(tp, TAGGED_STATUS)) {
15978                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
15979                                       HOSTCC_MODE_CLRTICK_TXBD);
15980
15981                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
15982                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15983                                        tp->misc_host_ctrl);
15984         }
15985
15986         /* Preserve the APE MAC_MODE bits */
15987         if (tg3_flag(tp, ENABLE_APE))
15988                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
15989         else
15990                 tp->mac_mode = 0;
15991
15992         if (tg3_10_100_only_device(tp, ent))
15993                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
15994
15995         err = tg3_phy_probe(tp);
15996         if (err) {
15997                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
15998                 /* ... but do not return immediately ... */
15999                 tg3_mdio_fini(tp);
16000         }
16001
16002         tg3_read_vpd(tp);
16003         tg3_read_fw_ver(tp);
16004
16005         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16006                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16007         } else {
16008                 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16009                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16010                 else
16011                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16012         }
16013
16014         /* 5700 {AX,BX} chips have a broken status block link
16015          * change bit implementation, so we must use the
16016          * status register in those cases.
16017          */
16018         if (tg3_asic_rev(tp) == ASIC_REV_5700)
16019                 tg3_flag_set(tp, USE_LINKCHG_REG);
16020         else
16021                 tg3_flag_clear(tp, USE_LINKCHG_REG);
16022
16023         /* The led_ctrl is set during tg3_phy_probe, here we might
16024          * have to force the link status polling mechanism based
16025          * upon subsystem IDs.
16026          */
16027         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16028             tg3_asic_rev(tp) == ASIC_REV_5701 &&
16029             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16030                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16031                 tg3_flag_set(tp, USE_LINKCHG_REG);
16032         }
16033
16034         /* For all SERDES we poll the MAC status register. */
16035         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16036                 tg3_flag_set(tp, POLL_SERDES);
16037         else
16038                 tg3_flag_clear(tp, POLL_SERDES);
16039
16040         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16041         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16042         if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16043             tg3_flag(tp, PCIX_MODE)) {
16044                 tp->rx_offset = NET_SKB_PAD;
16045 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16046                 tp->rx_copy_thresh = ~(u16)0;
16047 #endif
16048         }
16049
16050         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16051         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16052         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16053
16054         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16055
16056         /* Increment the rx prod index on the rx std ring by at most
16057          * 8 for these chips to workaround hw errata.
16058          */
16059         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16060             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16061             tg3_asic_rev(tp) == ASIC_REV_5755)
16062                 tp->rx_std_max_post = 8;
16063
16064         if (tg3_flag(tp, ASPM_WORKAROUND))
16065                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16066                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
16067
16068         return err;
16069 }
16070
16071 #ifdef CONFIG_SPARC
16072 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16073 {
16074         struct net_device *dev = tp->dev;
16075         struct pci_dev *pdev = tp->pdev;
16076         struct device_node *dp = pci_device_to_OF_node(pdev);
16077         const unsigned char *addr;
16078         int len;
16079
16080         addr = of_get_property(dp, "local-mac-address", &len);
16081         if (addr && len == 6) {
16082                 memcpy(dev->dev_addr, addr, 6);
16083                 return 0;
16084         }
16085         return -ENODEV;
16086 }
16087
16088 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16089 {
16090         struct net_device *dev = tp->dev;
16091
16092         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
16093         return 0;
16094 }
16095 #endif
16096
16097 static int tg3_get_device_address(struct tg3 *tp)
16098 {
16099         struct net_device *dev = tp->dev;
16100         u32 hi, lo, mac_offset;
16101         int addr_ok = 0;
16102         int err;
16103
16104 #ifdef CONFIG_SPARC
16105         if (!tg3_get_macaddr_sparc(tp))
16106                 return 0;
16107 #endif
16108
16109         if (tg3_flag(tp, IS_SSB_CORE)) {
16110                 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16111                 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16112                         return 0;
16113         }
16114
16115         mac_offset = 0x7c;
16116         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16117             tg3_flag(tp, 5780_CLASS)) {
16118                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16119                         mac_offset = 0xcc;
16120                 if (tg3_nvram_lock(tp))
16121                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16122                 else
16123                         tg3_nvram_unlock(tp);
16124         } else if (tg3_flag(tp, 5717_PLUS)) {
16125                 if (tp->pci_fn & 1)
16126                         mac_offset = 0xcc;
16127                 if (tp->pci_fn > 1)
16128                         mac_offset += 0x18c;
16129         } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16130                 mac_offset = 0x10;
16131
16132         /* First try to get it from MAC address mailbox. */
16133         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16134         if ((hi >> 16) == 0x484b) {
16135                 dev->dev_addr[0] = (hi >>  8) & 0xff;
16136                 dev->dev_addr[1] = (hi >>  0) & 0xff;
16137
16138                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16139                 dev->dev_addr[2] = (lo >> 24) & 0xff;
16140                 dev->dev_addr[3] = (lo >> 16) & 0xff;
16141                 dev->dev_addr[4] = (lo >>  8) & 0xff;
16142                 dev->dev_addr[5] = (lo >>  0) & 0xff;
16143
16144                 /* Some old bootcode may report a 0 MAC address in SRAM */
16145                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16146         }
16147         if (!addr_ok) {
16148                 /* Next, try NVRAM. */
16149                 if (!tg3_flag(tp, NO_NVRAM) &&
16150                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16151                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16152                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16153                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16154                 }
16155                 /* Finally just fetch it out of the MAC control regs. */
16156                 else {
16157                         hi = tr32(MAC_ADDR_0_HIGH);
16158                         lo = tr32(MAC_ADDR_0_LOW);
16159
16160                         dev->dev_addr[5] = lo & 0xff;
16161                         dev->dev_addr[4] = (lo >> 8) & 0xff;
16162                         dev->dev_addr[3] = (lo >> 16) & 0xff;
16163                         dev->dev_addr[2] = (lo >> 24) & 0xff;
16164                         dev->dev_addr[1] = hi & 0xff;
16165                         dev->dev_addr[0] = (hi >> 8) & 0xff;
16166                 }
16167         }
16168
16169         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16170 #ifdef CONFIG_SPARC
16171                 if (!tg3_get_default_macaddr_sparc(tp))
16172                         return 0;
16173 #endif
16174                 return -EINVAL;
16175         }
16176         return 0;
16177 }
16178
16179 #define BOUNDARY_SINGLE_CACHELINE       1
16180 #define BOUNDARY_MULTI_CACHELINE        2
16181
16182 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16183 {
16184         int cacheline_size;
16185         u8 byte;
16186         int goal;
16187
16188         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16189         if (byte == 0)
16190                 cacheline_size = 1024;
16191         else
16192                 cacheline_size = (int) byte * 4;
16193
16194         /* On 5703 and later chips, the boundary bits have no
16195          * effect.
16196          */
16197         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16198             tg3_asic_rev(tp) != ASIC_REV_5701 &&
16199             !tg3_flag(tp, PCI_EXPRESS))
16200                 goto out;
16201
16202 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16203         goal = BOUNDARY_MULTI_CACHELINE;
16204 #else
16205 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16206         goal = BOUNDARY_SINGLE_CACHELINE;
16207 #else
16208         goal = 0;
16209 #endif
16210 #endif
16211
16212         if (tg3_flag(tp, 57765_PLUS)) {
16213                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16214                 goto out;
16215         }
16216
16217         if (!goal)
16218                 goto out;
16219
16220         /* PCI controllers on most RISC systems tend to disconnect
16221          * when a device tries to burst across a cache-line boundary.
16222          * Therefore, letting tg3 do so just wastes PCI bandwidth.
16223          *
16224          * Unfortunately, for PCI-E there are only limited
16225          * write-side controls for this, and thus for reads
16226          * we will still get the disconnects.  We'll also waste
16227          * these PCI cycles for both read and write for chips
16228          * other than 5700 and 5701 which do not implement the
16229          * boundary bits.
16230          */
16231         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16232                 switch (cacheline_size) {
16233                 case 16:
16234                 case 32:
16235                 case 64:
16236                 case 128:
16237                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16238                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16239                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16240                         } else {
16241                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16242                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16243                         }
16244                         break;
16245
16246                 case 256:
16247                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16248                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16249                         break;
16250
16251                 default:
16252                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16253                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16254                         break;
16255                 }
16256         } else if (tg3_flag(tp, PCI_EXPRESS)) {
16257                 switch (cacheline_size) {
16258                 case 16:
16259                 case 32:
16260                 case 64:
16261                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16262                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16263                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16264                                 break;
16265                         }
16266                         /* fallthrough */
16267                 case 128:
16268                 default:
16269                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16270                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16271                         break;
16272                 }
16273         } else {
16274                 switch (cacheline_size) {
16275                 case 16:
16276                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16277                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16278                                         DMA_RWCTRL_WRITE_BNDRY_16);
16279                                 break;
16280                         }
16281                         /* fallthrough */
16282                 case 32:
16283                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16284                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16285                                         DMA_RWCTRL_WRITE_BNDRY_32);
16286                                 break;
16287                         }
16288                         /* fallthrough */
16289                 case 64:
16290                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16291                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16292                                         DMA_RWCTRL_WRITE_BNDRY_64);
16293                                 break;
16294                         }
16295                         /* fallthrough */
16296                 case 128:
16297                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16298                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16299                                         DMA_RWCTRL_WRITE_BNDRY_128);
16300                                 break;
16301                         }
16302                         /* fallthrough */
16303                 case 256:
16304                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
16305                                 DMA_RWCTRL_WRITE_BNDRY_256);
16306                         break;
16307                 case 512:
16308                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
16309                                 DMA_RWCTRL_WRITE_BNDRY_512);
16310                         break;
16311                 case 1024:
16312                 default:
16313                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16314                                 DMA_RWCTRL_WRITE_BNDRY_1024);
16315                         break;
16316                 }
16317         }
16318
16319 out:
16320         return val;
16321 }
16322
16323 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16324                            int size, int to_device)
16325 {
16326         struct tg3_internal_buffer_desc test_desc;
16327         u32 sram_dma_descs;
16328         int i, ret;
16329
16330         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16331
16332         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16333         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16334         tw32(RDMAC_STATUS, 0);
16335         tw32(WDMAC_STATUS, 0);
16336
16337         tw32(BUFMGR_MODE, 0);
16338         tw32(FTQ_RESET, 0);
16339
16340         test_desc.addr_hi = ((u64) buf_dma) >> 32;
16341         test_desc.addr_lo = buf_dma & 0xffffffff;
16342         test_desc.nic_mbuf = 0x00002100;
16343         test_desc.len = size;
16344
16345         /*
16346          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16347          * the *second* time the tg3 driver was getting loaded after an
16348          * initial scan.
16349          *
16350          * Broadcom tells me:
16351          *   ...the DMA engine is connected to the GRC block and a DMA
16352          *   reset may affect the GRC block in some unpredictable way...
16353          *   The behavior of resets to individual blocks has not been tested.
16354          *
16355          * Broadcom noted the GRC reset will also reset all sub-components.
16356          */
16357         if (to_device) {
16358                 test_desc.cqid_sqid = (13 << 8) | 2;
16359
16360                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16361                 udelay(40);
16362         } else {
16363                 test_desc.cqid_sqid = (16 << 8) | 7;
16364
16365                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16366                 udelay(40);
16367         }
16368         test_desc.flags = 0x00000005;
16369
16370         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16371                 u32 val;
16372
16373                 val = *(((u32 *)&test_desc) + i);
16374                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16375                                        sram_dma_descs + (i * sizeof(u32)));
16376                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16377         }
16378         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16379
16380         if (to_device)
16381                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16382         else
16383                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16384
16385         ret = -ENODEV;
16386         for (i = 0; i < 40; i++) {
16387                 u32 val;
16388
16389                 if (to_device)
16390                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16391                 else
16392                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16393                 if ((val & 0xffff) == sram_dma_descs) {
16394                         ret = 0;
16395                         break;
16396                 }
16397
16398                 udelay(100);
16399         }
16400
16401         return ret;
16402 }
16403
16404 #define TEST_BUFFER_SIZE        0x2000
16405
16406 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16407         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16408         { },
16409 };
16410
16411 static int tg3_test_dma(struct tg3 *tp)
16412 {
16413         dma_addr_t buf_dma;
16414         u32 *buf, saved_dma_rwctrl;
16415         int ret = 0;
16416
16417         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16418                                  &buf_dma, GFP_KERNEL);
16419         if (!buf) {
16420                 ret = -ENOMEM;
16421                 goto out_nofree;
16422         }
16423
16424         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16425                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16426
16427         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16428
16429         if (tg3_flag(tp, 57765_PLUS))
16430                 goto out;
16431
16432         if (tg3_flag(tp, PCI_EXPRESS)) {
16433                 /* DMA read watermark not used on PCIE */
16434                 tp->dma_rwctrl |= 0x00180000;
16435         } else if (!tg3_flag(tp, PCIX_MODE)) {
16436                 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16437                     tg3_asic_rev(tp) == ASIC_REV_5750)
16438                         tp->dma_rwctrl |= 0x003f0000;
16439                 else
16440                         tp->dma_rwctrl |= 0x003f000f;
16441         } else {
16442                 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16443                     tg3_asic_rev(tp) == ASIC_REV_5704) {
16444                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16445                         u32 read_water = 0x7;
16446
16447                         /* If the 5704 is behind the EPB bridge, we can
16448                          * do the less restrictive ONE_DMA workaround for
16449                          * better performance.
16450                          */
16451                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16452                             tg3_asic_rev(tp) == ASIC_REV_5704)
16453                                 tp->dma_rwctrl |= 0x8000;
16454                         else if (ccval == 0x6 || ccval == 0x7)
16455                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16456
16457                         if (tg3_asic_rev(tp) == ASIC_REV_5703)
16458                                 read_water = 4;
16459                         /* Set bit 23 to enable PCIX hw bug fix */
16460                         tp->dma_rwctrl |=
16461                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16462                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16463                                 (1 << 23);
16464                 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
16465                         /* 5780 always in PCIX mode */
16466                         tp->dma_rwctrl |= 0x00144000;
16467                 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
16468                         /* 5714 always in PCIX mode */
16469                         tp->dma_rwctrl |= 0x00148000;
16470                 } else {
16471                         tp->dma_rwctrl |= 0x001b000f;
16472                 }
16473         }
16474         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16475                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16476
16477         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16478             tg3_asic_rev(tp) == ASIC_REV_5704)
16479                 tp->dma_rwctrl &= 0xfffffff0;
16480
16481         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16482             tg3_asic_rev(tp) == ASIC_REV_5701) {
16483                 /* Remove this if it causes problems for some boards. */
16484                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16485
16486                 /* On 5700/5701 chips, we need to set this bit.
16487                  * Otherwise the chip will issue cacheline transactions
16488                  * to streamable DMA memory with not all the byte
16489                  * enables turned on.  This is an error on several
16490                  * RISC PCI controllers, in particular sparc64.
16491                  *
16492                  * On 5703/5704 chips, this bit has been reassigned
16493                  * a different meaning.  In particular, it is used
16494                  * on those chips to enable a PCI-X workaround.
16495                  */
16496                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16497         }
16498
16499         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16500
16501 #if 0
16502         /* Unneeded, already done by tg3_get_invariants.  */
16503         tg3_switch_clocks(tp);
16504 #endif
16505
16506         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16507             tg3_asic_rev(tp) != ASIC_REV_5701)
16508                 goto out;
16509
16510         /* It is best to perform DMA test with maximum write burst size
16511          * to expose the 5700/5701 write DMA bug.
16512          */
16513         saved_dma_rwctrl = tp->dma_rwctrl;
16514         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16515         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16516
16517         while (1) {
16518                 u32 *p = buf, i;
16519
16520                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16521                         p[i] = i;
16522
16523                 /* Send the buffer to the chip. */
16524                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
16525                 if (ret) {
16526                         dev_err(&tp->pdev->dev,
16527                                 "%s: Buffer write failed. err = %d\n",
16528                                 __func__, ret);
16529                         break;
16530                 }
16531
16532 #if 0
16533                 /* validate data reached card RAM correctly. */
16534                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16535                         u32 val;
16536                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
16537                         if (le32_to_cpu(val) != p[i]) {
16538                                 dev_err(&tp->pdev->dev,
16539                                         "%s: Buffer corrupted on device! "
16540                                         "(%d != %d)\n", __func__, val, i);
16541                                 /* ret = -ENODEV here? */
16542                         }
16543                         p[i] = 0;
16544                 }
16545 #endif
16546                 /* Now read it back. */
16547                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
16548                 if (ret) {
16549                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16550                                 "err = %d\n", __func__, ret);
16551                         break;
16552                 }
16553
16554                 /* Verify it. */
16555                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16556                         if (p[i] == i)
16557                                 continue;
16558
16559                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16560                             DMA_RWCTRL_WRITE_BNDRY_16) {
16561                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16562                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16563                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16564                                 break;
16565                         } else {
16566                                 dev_err(&tp->pdev->dev,
16567                                         "%s: Buffer corrupted on read back! "
16568                                         "(%d != %d)\n", __func__, p[i], i);
16569                                 ret = -ENODEV;
16570                                 goto out;
16571                         }
16572                 }
16573
16574                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16575                         /* Success. */
16576                         ret = 0;
16577                         break;
16578                 }
16579         }
16580         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16581             DMA_RWCTRL_WRITE_BNDRY_16) {
16582                 /* DMA test passed without adjusting DMA boundary,
16583                  * now look for chipsets that are known to expose the
16584                  * DMA bug without failing the test.
16585                  */
16586                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16587                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16588                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16589                 } else {
16590                         /* Safe to use the calculated DMA boundary. */
16591                         tp->dma_rwctrl = saved_dma_rwctrl;
16592                 }
16593
16594                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16595         }
16596
16597 out:
16598         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16599 out_nofree:
16600         return ret;
16601 }
16602
16603 static void tg3_init_bufmgr_config(struct tg3 *tp)
16604 {
16605         if (tg3_flag(tp, 57765_PLUS)) {
16606                 tp->bufmgr_config.mbuf_read_dma_low_water =
16607                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16608                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16609                         DEFAULT_MB_MACRX_LOW_WATER_57765;
16610                 tp->bufmgr_config.mbuf_high_water =
16611                         DEFAULT_MB_HIGH_WATER_57765;
16612
16613                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16614                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16615                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16616                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16617                 tp->bufmgr_config.mbuf_high_water_jumbo =
16618                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16619         } else if (tg3_flag(tp, 5705_PLUS)) {
16620                 tp->bufmgr_config.mbuf_read_dma_low_water =
16621                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16622                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16623                         DEFAULT_MB_MACRX_LOW_WATER_5705;
16624                 tp->bufmgr_config.mbuf_high_water =
16625                         DEFAULT_MB_HIGH_WATER_5705;
16626                 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16627                         tp->bufmgr_config.mbuf_mac_rx_low_water =
16628                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
16629                         tp->bufmgr_config.mbuf_high_water =
16630                                 DEFAULT_MB_HIGH_WATER_5906;
16631                 }
16632
16633                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16634                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
16635                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16636                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
16637                 tp->bufmgr_config.mbuf_high_water_jumbo =
16638                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
16639         } else {
16640                 tp->bufmgr_config.mbuf_read_dma_low_water =
16641                         DEFAULT_MB_RDMA_LOW_WATER;
16642                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16643                         DEFAULT_MB_MACRX_LOW_WATER;
16644                 tp->bufmgr_config.mbuf_high_water =
16645                         DEFAULT_MB_HIGH_WATER;
16646
16647                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16648                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
16649                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16650                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
16651                 tp->bufmgr_config.mbuf_high_water_jumbo =
16652                         DEFAULT_MB_HIGH_WATER_JUMBO;
16653         }
16654
16655         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
16656         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
16657 }
16658
16659 static char *tg3_phy_string(struct tg3 *tp)
16660 {
16661         switch (tp->phy_id & TG3_PHY_ID_MASK) {
16662         case TG3_PHY_ID_BCM5400:        return "5400";
16663         case TG3_PHY_ID_BCM5401:        return "5401";
16664         case TG3_PHY_ID_BCM5411:        return "5411";
16665         case TG3_PHY_ID_BCM5701:        return "5701";
16666         case TG3_PHY_ID_BCM5703:        return "5703";
16667         case TG3_PHY_ID_BCM5704:        return "5704";
16668         case TG3_PHY_ID_BCM5705:        return "5705";
16669         case TG3_PHY_ID_BCM5750:        return "5750";
16670         case TG3_PHY_ID_BCM5752:        return "5752";
16671         case TG3_PHY_ID_BCM5714:        return "5714";
16672         case TG3_PHY_ID_BCM5780:        return "5780";
16673         case TG3_PHY_ID_BCM5755:        return "5755";
16674         case TG3_PHY_ID_BCM5787:        return "5787";
16675         case TG3_PHY_ID_BCM5784:        return "5784";
16676         case TG3_PHY_ID_BCM5756:        return "5722/5756";
16677         case TG3_PHY_ID_BCM5906:        return "5906";
16678         case TG3_PHY_ID_BCM5761:        return "5761";
16679         case TG3_PHY_ID_BCM5718C:       return "5718C";
16680         case TG3_PHY_ID_BCM5718S:       return "5718S";
16681         case TG3_PHY_ID_BCM57765:       return "57765";
16682         case TG3_PHY_ID_BCM5719C:       return "5719C";
16683         case TG3_PHY_ID_BCM5720C:       return "5720C";
16684         case TG3_PHY_ID_BCM5762:        return "5762C";
16685         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
16686         case 0:                 return "serdes";
16687         default:                return "unknown";
16688         }
16689 }
16690
16691 static char *tg3_bus_string(struct tg3 *tp, char *str)
16692 {
16693         if (tg3_flag(tp, PCI_EXPRESS)) {
16694                 strcpy(str, "PCI Express");
16695                 return str;
16696         } else if (tg3_flag(tp, PCIX_MODE)) {
16697                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
16698
16699                 strcpy(str, "PCIX:");
16700
16701                 if ((clock_ctrl == 7) ||
16702                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
16703                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
16704                         strcat(str, "133MHz");
16705                 else if (clock_ctrl == 0)
16706                         strcat(str, "33MHz");
16707                 else if (clock_ctrl == 2)
16708                         strcat(str, "50MHz");
16709                 else if (clock_ctrl == 4)
16710                         strcat(str, "66MHz");
16711                 else if (clock_ctrl == 6)
16712                         strcat(str, "100MHz");
16713         } else {
16714                 strcpy(str, "PCI:");
16715                 if (tg3_flag(tp, PCI_HIGH_SPEED))
16716                         strcat(str, "66MHz");
16717                 else
16718                         strcat(str, "33MHz");
16719         }
16720         if (tg3_flag(tp, PCI_32BIT))
16721                 strcat(str, ":32-bit");
16722         else
16723                 strcat(str, ":64-bit");
16724         return str;
16725 }
16726
16727 static void tg3_init_coal(struct tg3 *tp)
16728 {
16729         struct ethtool_coalesce *ec = &tp->coal;
16730
16731         memset(ec, 0, sizeof(*ec));
16732         ec->cmd = ETHTOOL_GCOALESCE;
16733         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
16734         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
16735         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
16736         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
16737         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
16738         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
16739         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
16740         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
16741         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
16742
16743         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
16744                                  HOSTCC_MODE_CLRTICK_TXBD)) {
16745                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
16746                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
16747                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
16748                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
16749         }
16750
16751         if (tg3_flag(tp, 5705_PLUS)) {
16752                 ec->rx_coalesce_usecs_irq = 0;
16753                 ec->tx_coalesce_usecs_irq = 0;
16754                 ec->stats_block_coalesce_usecs = 0;
16755         }
16756 }
16757
16758 static int tg3_init_one(struct pci_dev *pdev,
16759                                   const struct pci_device_id *ent)
16760 {
16761         struct net_device *dev;
16762         struct tg3 *tp;
16763         int i, err, pm_cap;
16764         u32 sndmbx, rcvmbx, intmbx;
16765         char str[40];
16766         u64 dma_mask, persist_dma_mask;
16767         netdev_features_t features = 0;
16768
16769         printk_once(KERN_INFO "%s\n", version);
16770
16771         err = pci_enable_device(pdev);
16772         if (err) {
16773                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
16774                 return err;
16775         }
16776
16777         err = pci_request_regions(pdev, DRV_MODULE_NAME);
16778         if (err) {
16779                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
16780                 goto err_out_disable_pdev;
16781         }
16782
16783         pci_set_master(pdev);
16784
16785         /* Find power-management capability. */
16786         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
16787         if (pm_cap == 0) {
16788                 dev_err(&pdev->dev,
16789                         "Cannot find Power Management capability, aborting\n");
16790                 err = -EIO;
16791                 goto err_out_free_res;
16792         }
16793
16794         err = pci_set_power_state(pdev, PCI_D0);
16795         if (err) {
16796                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
16797                 goto err_out_free_res;
16798         }
16799
16800         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
16801         if (!dev) {
16802                 err = -ENOMEM;
16803                 goto err_out_power_down;
16804         }
16805
16806         SET_NETDEV_DEV(dev, &pdev->dev);
16807
16808         tp = netdev_priv(dev);
16809         tp->pdev = pdev;
16810         tp->dev = dev;
16811         tp->pm_cap = pm_cap;
16812         tp->rx_mode = TG3_DEF_RX_MODE;
16813         tp->tx_mode = TG3_DEF_TX_MODE;
16814         tp->irq_sync = 1;
16815
16816         if (tg3_debug > 0)
16817                 tp->msg_enable = tg3_debug;
16818         else
16819                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
16820
16821         if (pdev_is_ssb_gige_core(pdev)) {
16822                 tg3_flag_set(tp, IS_SSB_CORE);
16823                 if (ssb_gige_must_flush_posted_writes(pdev))
16824                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
16825                 if (ssb_gige_one_dma_at_once(pdev))
16826                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
16827                 if (ssb_gige_have_roboswitch(pdev))
16828                         tg3_flag_set(tp, ROBOSWITCH);
16829                 if (ssb_gige_is_rgmii(pdev))
16830                         tg3_flag_set(tp, RGMII_MODE);
16831         }
16832
16833         /* The word/byte swap controls here control register access byte
16834          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
16835          * setting below.
16836          */
16837         tp->misc_host_ctrl =
16838                 MISC_HOST_CTRL_MASK_PCI_INT |
16839                 MISC_HOST_CTRL_WORD_SWAP |
16840                 MISC_HOST_CTRL_INDIR_ACCESS |
16841                 MISC_HOST_CTRL_PCISTATE_RW;
16842
16843         /* The NONFRM (non-frame) byte/word swap controls take effect
16844          * on descriptor entries, anything which isn't packet data.
16845          *
16846          * The StrongARM chips on the board (one for tx, one for rx)
16847          * are running in big-endian mode.
16848          */
16849         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
16850                         GRC_MODE_WSWAP_NONFRM_DATA);
16851 #ifdef __BIG_ENDIAN
16852         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
16853 #endif
16854         spin_lock_init(&tp->lock);
16855         spin_lock_init(&tp->indirect_lock);
16856         INIT_WORK(&tp->reset_task, tg3_reset_task);
16857
16858         tp->regs = pci_ioremap_bar(pdev, BAR_0);
16859         if (!tp->regs) {
16860                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
16861                 err = -ENOMEM;
16862                 goto err_out_free_dev;
16863         }
16864
16865         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16866             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16867             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16868             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16869             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16870             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16871             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16872             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16873             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16874             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16875             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16876             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
16877                 tg3_flag_set(tp, ENABLE_APE);
16878                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16879                 if (!tp->aperegs) {
16880                         dev_err(&pdev->dev,
16881                                 "Cannot map APE registers, aborting\n");
16882                         err = -ENOMEM;
16883                         goto err_out_iounmap;
16884                 }
16885         }
16886
16887         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16888         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
16889
16890         dev->ethtool_ops = &tg3_ethtool_ops;
16891         dev->watchdog_timeo = TG3_TX_TIMEOUT;
16892         dev->netdev_ops = &tg3_netdev_ops;
16893         dev->irq = pdev->irq;
16894
16895         err = tg3_get_invariants(tp, ent);
16896         if (err) {
16897                 dev_err(&pdev->dev,
16898                         "Problem fetching invariants of chip, aborting\n");
16899                 goto err_out_apeunmap;
16900         }
16901
16902         /* The EPB bridge inside 5714, 5715, and 5780 and any
16903          * device behind the EPB cannot support DMA addresses > 40-bit.
16904          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16905          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16906          * do DMA address check in tg3_start_xmit().
16907          */
16908         if (tg3_flag(tp, IS_5788))
16909                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
16910         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
16911                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
16912 #ifdef CONFIG_HIGHMEM
16913                 dma_mask = DMA_BIT_MASK(64);
16914 #endif
16915         } else
16916                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
16917
16918         /* Configure DMA attributes. */
16919         if (dma_mask > DMA_BIT_MASK(32)) {
16920                 err = pci_set_dma_mask(pdev, dma_mask);
16921                 if (!err) {
16922                         features |= NETIF_F_HIGHDMA;
16923                         err = pci_set_consistent_dma_mask(pdev,
16924                                                           persist_dma_mask);
16925                         if (err < 0) {
16926                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
16927                                         "DMA for consistent allocations\n");
16928                                 goto err_out_apeunmap;
16929                         }
16930                 }
16931         }
16932         if (err || dma_mask == DMA_BIT_MASK(32)) {
16933                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
16934                 if (err) {
16935                         dev_err(&pdev->dev,
16936                                 "No usable DMA configuration, aborting\n");
16937                         goto err_out_apeunmap;
16938                 }
16939         }
16940
16941         tg3_init_bufmgr_config(tp);
16942
16943         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16944
16945         /* 5700 B0 chips do not support checksumming correctly due
16946          * to hardware bugs.
16947          */
16948         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
16949                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16950
16951                 if (tg3_flag(tp, 5755_PLUS))
16952                         features |= NETIF_F_IPV6_CSUM;
16953         }
16954
16955         /* TSO is on by default on chips that support hardware TSO.
16956          * Firmware TSO on older chips gives lower performance, so it
16957          * is off by default, but can be enabled using ethtool.
16958          */
16959         if ((tg3_flag(tp, HW_TSO_1) ||
16960              tg3_flag(tp, HW_TSO_2) ||
16961              tg3_flag(tp, HW_TSO_3)) &&
16962             (features & NETIF_F_IP_CSUM))
16963                 features |= NETIF_F_TSO;
16964         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
16965                 if (features & NETIF_F_IPV6_CSUM)
16966                         features |= NETIF_F_TSO6;
16967                 if (tg3_flag(tp, HW_TSO_3) ||
16968                     tg3_asic_rev(tp) == ASIC_REV_5761 ||
16969                     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16970                      tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
16971                     tg3_asic_rev(tp) == ASIC_REV_5785 ||
16972                     tg3_asic_rev(tp) == ASIC_REV_57780)
16973                         features |= NETIF_F_TSO_ECN;
16974         }
16975
16976         dev->features |= features;
16977         dev->vlan_features |= features;
16978
16979         /*
16980          * Add loopback capability only for a subset of devices that support
16981          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16982          * loopback for the remaining devices.
16983          */
16984         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
16985             !tg3_flag(tp, CPMU_PRESENT))
16986                 /* Add the loopback capability */
16987                 features |= NETIF_F_LOOPBACK;
16988
16989         dev->hw_features |= features;
16990
16991         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
16992             !tg3_flag(tp, TSO_CAPABLE) &&
16993             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
16994                 tg3_flag_set(tp, MAX_RXPEND_64);
16995                 tp->rx_pending = 63;
16996         }
16997
16998         err = tg3_get_device_address(tp);
16999         if (err) {
17000                 dev_err(&pdev->dev,
17001                         "Could not obtain valid ethernet address, aborting\n");
17002                 goto err_out_apeunmap;
17003         }
17004
17005         /*
17006          * Reset chip in case UNDI or EFI driver did not shutdown
17007          * DMA self test will enable WDMAC and we'll see (spurious)
17008          * pending DMA on the PCI bus at that point.
17009          */
17010         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17011             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17012                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17013                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17014         }
17015
17016         err = tg3_test_dma(tp);
17017         if (err) {
17018                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17019                 goto err_out_apeunmap;
17020         }
17021
17022         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17023         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17024         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17025         for (i = 0; i < tp->irq_max; i++) {
17026                 struct tg3_napi *tnapi = &tp->napi[i];
17027
17028                 tnapi->tp = tp;
17029                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17030
17031                 tnapi->int_mbox = intmbx;
17032                 if (i <= 4)
17033                         intmbx += 0x8;
17034                 else
17035                         intmbx += 0x4;
17036
17037                 tnapi->consmbox = rcvmbx;
17038                 tnapi->prodmbox = sndmbx;
17039
17040                 if (i)
17041                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17042                 else
17043                         tnapi->coal_now = HOSTCC_MODE_NOW;
17044
17045                 if (!tg3_flag(tp, SUPPORT_MSIX))
17046                         break;
17047
17048                 /*
17049                  * If we support MSIX, we'll be using RSS.  If we're using
17050                  * RSS, the first vector only handles link interrupts and the
17051                  * remaining vectors handle rx and tx interrupts.  Reuse the
17052                  * mailbox values for the next iteration.  The values we setup
17053                  * above are still useful for the single vectored mode.
17054                  */
17055                 if (!i)
17056                         continue;
17057
17058                 rcvmbx += 0x8;
17059
17060                 if (sndmbx & 0x4)
17061                         sndmbx -= 0x4;
17062                 else
17063                         sndmbx += 0xc;
17064         }
17065
17066         tg3_init_coal(tp);
17067
17068         pci_set_drvdata(pdev, dev);
17069
17070         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17071             tg3_asic_rev(tp) == ASIC_REV_5720 ||
17072             tg3_asic_rev(tp) == ASIC_REV_5762)
17073                 tg3_flag_set(tp, PTP_CAPABLE);
17074
17075         if (tg3_flag(tp, 5717_PLUS)) {
17076                 /* Resume a low-power mode */
17077                 tg3_frob_aux_power(tp, false);
17078         }
17079
17080         tg3_timer_init(tp);
17081
17082         tg3_carrier_off(tp);
17083
17084         err = register_netdev(dev);
17085         if (err) {
17086                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17087                 goto err_out_apeunmap;
17088         }
17089
17090         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17091                     tp->board_part_number,
17092                     tg3_chip_rev_id(tp),
17093                     tg3_bus_string(tp, str),
17094                     dev->dev_addr);
17095
17096         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17097                 struct phy_device *phydev;
17098                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
17099                 netdev_info(dev,
17100                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17101                             phydev->drv->name, dev_name(&phydev->dev));
17102         } else {
17103                 char *ethtype;
17104
17105                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17106                         ethtype = "10/100Base-TX";
17107                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17108                         ethtype = "1000Base-SX";
17109                 else
17110                         ethtype = "10/100/1000Base-T";
17111
17112                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17113                             "(WireSpeed[%d], EEE[%d])\n",
17114                             tg3_phy_string(tp), ethtype,
17115                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17116                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17117         }
17118
17119         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17120                     (dev->features & NETIF_F_RXCSUM) != 0,
17121                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
17122                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17123                     tg3_flag(tp, ENABLE_ASF) != 0,
17124                     tg3_flag(tp, TSO_CAPABLE) != 0);
17125         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17126                     tp->dma_rwctrl,
17127                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17128                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17129
17130         pci_save_state(pdev);
17131
17132         return 0;
17133
17134 err_out_apeunmap:
17135         if (tp->aperegs) {
17136                 iounmap(tp->aperegs);
17137                 tp->aperegs = NULL;
17138         }
17139
17140 err_out_iounmap:
17141         if (tp->regs) {
17142                 iounmap(tp->regs);
17143                 tp->regs = NULL;
17144         }
17145
17146 err_out_free_dev:
17147         free_netdev(dev);
17148
17149 err_out_power_down:
17150         pci_set_power_state(pdev, PCI_D3hot);
17151
17152 err_out_free_res:
17153         pci_release_regions(pdev);
17154
17155 err_out_disable_pdev:
17156         pci_disable_device(pdev);
17157         pci_set_drvdata(pdev, NULL);
17158         return err;
17159 }
17160
17161 static void tg3_remove_one(struct pci_dev *pdev)
17162 {
17163         struct net_device *dev = pci_get_drvdata(pdev);
17164
17165         if (dev) {
17166                 struct tg3 *tp = netdev_priv(dev);
17167
17168                 release_firmware(tp->fw);
17169
17170                 tg3_reset_task_cancel(tp);
17171
17172                 if (tg3_flag(tp, USE_PHYLIB)) {
17173                         tg3_phy_fini(tp);
17174                         tg3_mdio_fini(tp);
17175                 }
17176
17177                 unregister_netdev(dev);
17178                 if (tp->aperegs) {
17179                         iounmap(tp->aperegs);
17180                         tp->aperegs = NULL;
17181                 }
17182                 if (tp->regs) {
17183                         iounmap(tp->regs);
17184                         tp->regs = NULL;
17185                 }
17186                 free_netdev(dev);
17187                 pci_release_regions(pdev);
17188                 pci_disable_device(pdev);
17189                 pci_set_drvdata(pdev, NULL);
17190         }
17191 }
17192
17193 #ifdef CONFIG_PM_SLEEP
17194 static int tg3_suspend(struct device *device)
17195 {
17196         struct pci_dev *pdev = to_pci_dev(device);
17197         struct net_device *dev = pci_get_drvdata(pdev);
17198         struct tg3 *tp = netdev_priv(dev);
17199         int err;
17200
17201         if (!netif_running(dev))
17202                 return 0;
17203
17204         tg3_reset_task_cancel(tp);
17205         tg3_phy_stop(tp);
17206         tg3_netif_stop(tp);
17207
17208         tg3_timer_stop(tp);
17209
17210         tg3_full_lock(tp, 1);
17211         tg3_disable_ints(tp);
17212         tg3_full_unlock(tp);
17213
17214         netif_device_detach(dev);
17215
17216         tg3_full_lock(tp, 0);
17217         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17218         tg3_flag_clear(tp, INIT_COMPLETE);
17219         tg3_full_unlock(tp);
17220
17221         err = tg3_power_down_prepare(tp);
17222         if (err) {
17223                 int err2;
17224
17225                 tg3_full_lock(tp, 0);
17226
17227                 tg3_flag_set(tp, INIT_COMPLETE);
17228                 err2 = tg3_restart_hw(tp, 1);
17229                 if (err2)
17230                         goto out;
17231
17232                 tg3_timer_start(tp);
17233
17234                 netif_device_attach(dev);
17235                 tg3_netif_start(tp);
17236
17237 out:
17238                 tg3_full_unlock(tp);
17239
17240                 if (!err2)
17241                         tg3_phy_start(tp);
17242         }
17243
17244         return err;
17245 }
17246
17247 static int tg3_resume(struct device *device)
17248 {
17249         struct pci_dev *pdev = to_pci_dev(device);
17250         struct net_device *dev = pci_get_drvdata(pdev);
17251         struct tg3 *tp = netdev_priv(dev);
17252         int err;
17253
17254         if (!netif_running(dev))
17255                 return 0;
17256
17257         netif_device_attach(dev);
17258
17259         tg3_full_lock(tp, 0);
17260
17261         tg3_flag_set(tp, INIT_COMPLETE);
17262         err = tg3_restart_hw(tp, 1);
17263         if (err)
17264                 goto out;
17265
17266         tg3_timer_start(tp);
17267
17268         tg3_netif_start(tp);
17269
17270 out:
17271         tg3_full_unlock(tp);
17272
17273         if (!err)
17274                 tg3_phy_start(tp);
17275
17276         return err;
17277 }
17278
17279 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17280 #define TG3_PM_OPS (&tg3_pm_ops)
17281
17282 #else
17283
17284 #define TG3_PM_OPS NULL
17285
17286 #endif /* CONFIG_PM_SLEEP */
17287
17288 /**
17289  * tg3_io_error_detected - called when PCI error is detected
17290  * @pdev: Pointer to PCI device
17291  * @state: The current pci connection state
17292  *
17293  * This function is called after a PCI bus error affecting
17294  * this device has been detected.
17295  */
17296 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17297                                               pci_channel_state_t state)
17298 {
17299         struct net_device *netdev = pci_get_drvdata(pdev);
17300         struct tg3 *tp = netdev_priv(netdev);
17301         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17302
17303         netdev_info(netdev, "PCI I/O error detected\n");
17304
17305         rtnl_lock();
17306
17307         if (!netif_running(netdev))
17308                 goto done;
17309
17310         tg3_phy_stop(tp);
17311
17312         tg3_netif_stop(tp);
17313
17314         tg3_timer_stop(tp);
17315
17316         /* Want to make sure that the reset task doesn't run */
17317         tg3_reset_task_cancel(tp);
17318
17319         netif_device_detach(netdev);
17320
17321         /* Clean up software state, even if MMIO is blocked */
17322         tg3_full_lock(tp, 0);
17323         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17324         tg3_full_unlock(tp);
17325
17326 done:
17327         if (state == pci_channel_io_perm_failure)
17328                 err = PCI_ERS_RESULT_DISCONNECT;
17329         else
17330                 pci_disable_device(pdev);
17331
17332         rtnl_unlock();
17333
17334         return err;
17335 }
17336
17337 /**
17338  * tg3_io_slot_reset - called after the pci bus has been reset.
17339  * @pdev: Pointer to PCI device
17340  *
17341  * Restart the card from scratch, as if from a cold-boot.
17342  * At this point, the card has exprienced a hard reset,
17343  * followed by fixups by BIOS, and has its config space
17344  * set up identically to what it was at cold boot.
17345  */
17346 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17347 {
17348         struct net_device *netdev = pci_get_drvdata(pdev);
17349         struct tg3 *tp = netdev_priv(netdev);
17350         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17351         int err;
17352
17353         rtnl_lock();
17354
17355         if (pci_enable_device(pdev)) {
17356                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17357                 goto done;
17358         }
17359
17360         pci_set_master(pdev);
17361         pci_restore_state(pdev);
17362         pci_save_state(pdev);
17363
17364         if (!netif_running(netdev)) {
17365                 rc = PCI_ERS_RESULT_RECOVERED;
17366                 goto done;
17367         }
17368
17369         err = tg3_power_up(tp);
17370         if (err)
17371                 goto done;
17372
17373         rc = PCI_ERS_RESULT_RECOVERED;
17374
17375 done:
17376         rtnl_unlock();
17377
17378         return rc;
17379 }
17380
17381 /**
17382  * tg3_io_resume - called when traffic can start flowing again.
17383  * @pdev: Pointer to PCI device
17384  *
17385  * This callback is called when the error recovery driver tells
17386  * us that its OK to resume normal operation.
17387  */
17388 static void tg3_io_resume(struct pci_dev *pdev)
17389 {
17390         struct net_device *netdev = pci_get_drvdata(pdev);
17391         struct tg3 *tp = netdev_priv(netdev);
17392         int err;
17393
17394         rtnl_lock();
17395
17396         if (!netif_running(netdev))
17397                 goto done;
17398
17399         tg3_full_lock(tp, 0);
17400         tg3_flag_set(tp, INIT_COMPLETE);
17401         err = tg3_restart_hw(tp, 1);
17402         if (err) {
17403                 tg3_full_unlock(tp);
17404                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17405                 goto done;
17406         }
17407
17408         netif_device_attach(netdev);
17409
17410         tg3_timer_start(tp);
17411
17412         tg3_netif_start(tp);
17413
17414         tg3_full_unlock(tp);
17415
17416         tg3_phy_start(tp);
17417
17418 done:
17419         rtnl_unlock();
17420 }
17421
17422 static const struct pci_error_handlers tg3_err_handler = {
17423         .error_detected = tg3_io_error_detected,
17424         .slot_reset     = tg3_io_slot_reset,
17425         .resume         = tg3_io_resume
17426 };
17427
17428 static struct pci_driver tg3_driver = {
17429         .name           = DRV_MODULE_NAME,
17430         .id_table       = tg3_pci_tbl,
17431         .probe          = tg3_init_one,
17432         .remove         = tg3_remove_one,
17433         .err_handler    = &tg3_err_handler,
17434         .driver.pm      = TG3_PM_OPS,
17435 };
17436
17437 static int __init tg3_init(void)
17438 {
17439         return pci_register_driver(&tg3_driver);
17440 }
17441
17442 static void __exit tg3_cleanup(void)
17443 {
17444         pci_unregister_driver(&tg3_driver);
17445 }
17446
17447 module_init(tg3_init);
17448 module_exit(tg3_cleanup);