]> Pileus Git - ~andy/linux/blob - drivers/net/ethernet/broadcom/tg3.c
tg3: Add a warning during link settings change if mgmt enabled
[~andy/linux] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2013 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50
51 #include <net/checksum.h>
52 #include <net/ip.h>
53
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65
66 #define BAR_0   0
67 #define BAR_2   2
68
69 #include "tg3.h"
70
71 /* Functions & macros to verify TG3_FLAGS types */
72
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         return test_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         set_bit(flag, bits);
81 }
82
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85         clear_bit(flag, bits);
86 }
87
88 #define tg3_flag(tp, flag)                              \
89         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag)                          \
91         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag)                        \
93         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94
95 #define DRV_MODULE_NAME         "tg3"
96 #define TG3_MAJ_NUM                     3
97 #define TG3_MIN_NUM                     130
98 #define DRV_MODULE_VERSION      \
99         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE      "February 14, 2013"
101
102 #define RESET_KIND_SHUTDOWN     0
103 #define RESET_KIND_INIT         1
104 #define RESET_KIND_SUSPEND      2
105
106 #define TG3_DEF_RX_MODE         0
107 #define TG3_DEF_TX_MODE         0
108 #define TG3_DEF_MSG_ENABLE        \
109         (NETIF_MSG_DRV          | \
110          NETIF_MSG_PROBE        | \
111          NETIF_MSG_LINK         | \
112          NETIF_MSG_TIMER        | \
113          NETIF_MSG_IFDOWN       | \
114          NETIF_MSG_IFUP         | \
115          NETIF_MSG_RX_ERR       | \
116          NETIF_MSG_TX_ERR)
117
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
119
120 /* length of time before we decide the hardware is borked,
121  * and dev->tx_timeout() should be called to fix the problem
122  */
123
124 #define TG3_TX_TIMEOUT                  (5 * HZ)
125
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU                     60
128 #define TG3_MAX_MTU(tp) \
129         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132  * You can't change the ring sizes, but you can change where you place
133  * them in the NIC onboard memory.
134  */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING         200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
143
144 /* Do not place this n-ring entries value into the tp struct itself,
145  * we really want to expose these constants to GCC so that modulo et
146  * al.  operations are done with shifts and masks instead of with
147  * hw multiply/modulo instructions.  Another solution would be to
148  * replace things like '% foo' with '& (foo - 1)'.
149  */
150
151 #define TG3_TX_RING_SIZE                512
152 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
153
154 #define TG3_RX_STD_RING_BYTES(tp) \
155         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
161                                  TG3_TX_RING_SIZE)
162 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
163
164 #define TG3_DMA_BYTE_ENAB               64
165
166 #define TG3_RX_STD_DMA_SZ               1536
167 #define TG3_RX_JMB_DMA_SZ               9046
168
169 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
170
171 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181  * that are at least dword aligned when used in PCIX mode.  The driver
182  * works around this bug by double copying the packet.  This workaround
183  * is built into the normal double copy length check for efficiency.
184  *
185  * However, the double copy is only necessary on those architectures
186  * where unaligned memory accesses are inefficient.  For those architectures
187  * where unaligned memory accesses incur little penalty, we can reintegrate
188  * the 5701 in the normal rx path.  Doing so saves a device structure
189  * dereference by hardcoding the double copy threshold in place.
190  */
191 #define TG3_RX_COPY_THRESHOLD           256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
194 #else
195         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
196 #endif
197
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
202 #endif
203
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K            2048
207 #define TG3_TX_BD_DMA_MAX_4K            4096
208
209 #define TG3_RAW_IP_ALIGN 2
210
211 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
212 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213
214 #define FIRMWARE_TG3            "tigon/tg3.bin"
215 #define FIRMWARE_TG357766       "tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
218
219 static char version[] =
220         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
221
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
229
230 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
233
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
236
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258                         TG3_DRV_DATA_FLAG_5705_10_100},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261                         TG3_DRV_DATA_FLAG_5705_10_100},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265                         TG3_DRV_DATA_FLAG_5705_10_100},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287                         PCI_VENDOR_ID_LENOVO,
288                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
341         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
342         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
343         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
344         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
345         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
346         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
347         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
348         {}
349 };
350
351 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
352
353 static const struct {
354         const char string[ETH_GSTRING_LEN];
355 } ethtool_stats_keys[] = {
356         { "rx_octets" },
357         { "rx_fragments" },
358         { "rx_ucast_packets" },
359         { "rx_mcast_packets" },
360         { "rx_bcast_packets" },
361         { "rx_fcs_errors" },
362         { "rx_align_errors" },
363         { "rx_xon_pause_rcvd" },
364         { "rx_xoff_pause_rcvd" },
365         { "rx_mac_ctrl_rcvd" },
366         { "rx_xoff_entered" },
367         { "rx_frame_too_long_errors" },
368         { "rx_jabbers" },
369         { "rx_undersize_packets" },
370         { "rx_in_length_errors" },
371         { "rx_out_length_errors" },
372         { "rx_64_or_less_octet_packets" },
373         { "rx_65_to_127_octet_packets" },
374         { "rx_128_to_255_octet_packets" },
375         { "rx_256_to_511_octet_packets" },
376         { "rx_512_to_1023_octet_packets" },
377         { "rx_1024_to_1522_octet_packets" },
378         { "rx_1523_to_2047_octet_packets" },
379         { "rx_2048_to_4095_octet_packets" },
380         { "rx_4096_to_8191_octet_packets" },
381         { "rx_8192_to_9022_octet_packets" },
382
383         { "tx_octets" },
384         { "tx_collisions" },
385
386         { "tx_xon_sent" },
387         { "tx_xoff_sent" },
388         { "tx_flow_control" },
389         { "tx_mac_errors" },
390         { "tx_single_collisions" },
391         { "tx_mult_collisions" },
392         { "tx_deferred" },
393         { "tx_excessive_collisions" },
394         { "tx_late_collisions" },
395         { "tx_collide_2times" },
396         { "tx_collide_3times" },
397         { "tx_collide_4times" },
398         { "tx_collide_5times" },
399         { "tx_collide_6times" },
400         { "tx_collide_7times" },
401         { "tx_collide_8times" },
402         { "tx_collide_9times" },
403         { "tx_collide_10times" },
404         { "tx_collide_11times" },
405         { "tx_collide_12times" },
406         { "tx_collide_13times" },
407         { "tx_collide_14times" },
408         { "tx_collide_15times" },
409         { "tx_ucast_packets" },
410         { "tx_mcast_packets" },
411         { "tx_bcast_packets" },
412         { "tx_carrier_sense_errors" },
413         { "tx_discards" },
414         { "tx_errors" },
415
416         { "dma_writeq_full" },
417         { "dma_write_prioq_full" },
418         { "rxbds_empty" },
419         { "rx_discards" },
420         { "rx_errors" },
421         { "rx_threshold_hit" },
422
423         { "dma_readq_full" },
424         { "dma_read_prioq_full" },
425         { "tx_comp_queue_full" },
426
427         { "ring_set_send_prod_index" },
428         { "ring_status_update" },
429         { "nic_irqs" },
430         { "nic_avoided_irqs" },
431         { "nic_tx_threshold_hit" },
432
433         { "mbuf_lwm_thresh_hit" },
434 };
435
436 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST          0
438 #define TG3_LINK_TEST           1
439 #define TG3_REGISTER_TEST       2
440 #define TG3_MEMORY_TEST         3
441 #define TG3_MAC_LOOPB_TEST      4
442 #define TG3_PHY_LOOPB_TEST      5
443 #define TG3_EXT_LOOPB_TEST      6
444 #define TG3_INTERRUPT_TEST      7
445
446
447 static const struct {
448         const char string[ETH_GSTRING_LEN];
449 } ethtool_test_keys[] = {
450         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
451         [TG3_LINK_TEST]         = { "link test         (online) " },
452         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
453         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
454         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
455         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
456         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
457         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
458 };
459
460 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
461
462
463 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
464 {
465         writel(val, tp->regs + off);
466 }
467
468 static u32 tg3_read32(struct tg3 *tp, u32 off)
469 {
470         return readl(tp->regs + off);
471 }
472
473 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
474 {
475         writel(val, tp->aperegs + off);
476 }
477
478 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
479 {
480         return readl(tp->aperegs + off);
481 }
482
483 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
484 {
485         unsigned long flags;
486
487         spin_lock_irqsave(&tp->indirect_lock, flags);
488         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
489         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
490         spin_unlock_irqrestore(&tp->indirect_lock, flags);
491 }
492
493 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
494 {
495         writel(val, tp->regs + off);
496         readl(tp->regs + off);
497 }
498
499 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
500 {
501         unsigned long flags;
502         u32 val;
503
504         spin_lock_irqsave(&tp->indirect_lock, flags);
505         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
506         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507         spin_unlock_irqrestore(&tp->indirect_lock, flags);
508         return val;
509 }
510
511 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
512 {
513         unsigned long flags;
514
515         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
516                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
517                                        TG3_64BIT_REG_LOW, val);
518                 return;
519         }
520         if (off == TG3_RX_STD_PROD_IDX_REG) {
521                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
522                                        TG3_64BIT_REG_LOW, val);
523                 return;
524         }
525
526         spin_lock_irqsave(&tp->indirect_lock, flags);
527         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
528         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
529         spin_unlock_irqrestore(&tp->indirect_lock, flags);
530
531         /* In indirect mode when disabling interrupts, we also need
532          * to clear the interrupt bit in the GRC local ctrl register.
533          */
534         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
535             (val == 0x1)) {
536                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
537                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
538         }
539 }
540
541 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
542 {
543         unsigned long flags;
544         u32 val;
545
546         spin_lock_irqsave(&tp->indirect_lock, flags);
547         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
548         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
549         spin_unlock_irqrestore(&tp->indirect_lock, flags);
550         return val;
551 }
552
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554  * where it is unsafe to read back the register without some delay.
555  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
557  */
558 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
559 {
560         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
561                 /* Non-posted methods */
562                 tp->write32(tp, off, val);
563         else {
564                 /* Posted method */
565                 tg3_write32(tp, off, val);
566                 if (usec_wait)
567                         udelay(usec_wait);
568                 tp->read32(tp, off);
569         }
570         /* Wait again after the read for the posted method to guarantee that
571          * the wait time is met.
572          */
573         if (usec_wait)
574                 udelay(usec_wait);
575 }
576
577 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
578 {
579         tp->write32_mbox(tp, off, val);
580         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
581             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
582              !tg3_flag(tp, ICH_WORKAROUND)))
583                 tp->read32_mbox(tp, off);
584 }
585
586 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
587 {
588         void __iomem *mbox = tp->regs + off;
589         writel(val, mbox);
590         if (tg3_flag(tp, TXD_MBOX_HWBUG))
591                 writel(val, mbox);
592         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
593             tg3_flag(tp, FLUSH_POSTED_WRITES))
594                 readl(mbox);
595 }
596
597 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
598 {
599         return readl(tp->regs + off + GRCMBOX_BASE);
600 }
601
602 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
603 {
604         writel(val, tp->regs + off + GRCMBOX_BASE);
605 }
606
607 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
612
613 #define tw32(reg, val)                  tp->write32(tp, reg, val)
614 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg)                       tp->read32(tp, reg)
617
618 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
619 {
620         unsigned long flags;
621
622         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
623             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
624                 return;
625
626         spin_lock_irqsave(&tp->indirect_lock, flags);
627         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
628                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
629                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
630
631                 /* Always leave this as zero. */
632                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
633         } else {
634                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
635                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
636
637                 /* Always leave this as zero. */
638                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
639         }
640         spin_unlock_irqrestore(&tp->indirect_lock, flags);
641 }
642
643 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
644 {
645         unsigned long flags;
646
647         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
648             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
649                 *val = 0;
650                 return;
651         }
652
653         spin_lock_irqsave(&tp->indirect_lock, flags);
654         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
655                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
656                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
657
658                 /* Always leave this as zero. */
659                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
660         } else {
661                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
662                 *val = tr32(TG3PCI_MEM_WIN_DATA);
663
664                 /* Always leave this as zero. */
665                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
666         }
667         spin_unlock_irqrestore(&tp->indirect_lock, flags);
668 }
669
670 static void tg3_ape_lock_init(struct tg3 *tp)
671 {
672         int i;
673         u32 regbase, bit;
674
675         if (tg3_asic_rev(tp) == ASIC_REV_5761)
676                 regbase = TG3_APE_LOCK_GRANT;
677         else
678                 regbase = TG3_APE_PER_LOCK_GRANT;
679
680         /* Make sure the driver hasn't any stale locks. */
681         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
682                 switch (i) {
683                 case TG3_APE_LOCK_PHY0:
684                 case TG3_APE_LOCK_PHY1:
685                 case TG3_APE_LOCK_PHY2:
686                 case TG3_APE_LOCK_PHY3:
687                         bit = APE_LOCK_GRANT_DRIVER;
688                         break;
689                 default:
690                         if (!tp->pci_fn)
691                                 bit = APE_LOCK_GRANT_DRIVER;
692                         else
693                                 bit = 1 << tp->pci_fn;
694                 }
695                 tg3_ape_write32(tp, regbase + 4 * i, bit);
696         }
697
698 }
699
700 static int tg3_ape_lock(struct tg3 *tp, int locknum)
701 {
702         int i, off;
703         int ret = 0;
704         u32 status, req, gnt, bit;
705
706         if (!tg3_flag(tp, ENABLE_APE))
707                 return 0;
708
709         switch (locknum) {
710         case TG3_APE_LOCK_GPIO:
711                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
712                         return 0;
713         case TG3_APE_LOCK_GRC:
714         case TG3_APE_LOCK_MEM:
715                 if (!tp->pci_fn)
716                         bit = APE_LOCK_REQ_DRIVER;
717                 else
718                         bit = 1 << tp->pci_fn;
719                 break;
720         case TG3_APE_LOCK_PHY0:
721         case TG3_APE_LOCK_PHY1:
722         case TG3_APE_LOCK_PHY2:
723         case TG3_APE_LOCK_PHY3:
724                 bit = APE_LOCK_REQ_DRIVER;
725                 break;
726         default:
727                 return -EINVAL;
728         }
729
730         if (tg3_asic_rev(tp) == ASIC_REV_5761) {
731                 req = TG3_APE_LOCK_REQ;
732                 gnt = TG3_APE_LOCK_GRANT;
733         } else {
734                 req = TG3_APE_PER_LOCK_REQ;
735                 gnt = TG3_APE_PER_LOCK_GRANT;
736         }
737
738         off = 4 * locknum;
739
740         tg3_ape_write32(tp, req + off, bit);
741
742         /* Wait for up to 1 millisecond to acquire lock. */
743         for (i = 0; i < 100; i++) {
744                 status = tg3_ape_read32(tp, gnt + off);
745                 if (status == bit)
746                         break;
747                 udelay(10);
748         }
749
750         if (status != bit) {
751                 /* Revoke the lock request. */
752                 tg3_ape_write32(tp, gnt + off, bit);
753                 ret = -EBUSY;
754         }
755
756         return ret;
757 }
758
759 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
760 {
761         u32 gnt, bit;
762
763         if (!tg3_flag(tp, ENABLE_APE))
764                 return;
765
766         switch (locknum) {
767         case TG3_APE_LOCK_GPIO:
768                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
769                         return;
770         case TG3_APE_LOCK_GRC:
771         case TG3_APE_LOCK_MEM:
772                 if (!tp->pci_fn)
773                         bit = APE_LOCK_GRANT_DRIVER;
774                 else
775                         bit = 1 << tp->pci_fn;
776                 break;
777         case TG3_APE_LOCK_PHY0:
778         case TG3_APE_LOCK_PHY1:
779         case TG3_APE_LOCK_PHY2:
780         case TG3_APE_LOCK_PHY3:
781                 bit = APE_LOCK_GRANT_DRIVER;
782                 break;
783         default:
784                 return;
785         }
786
787         if (tg3_asic_rev(tp) == ASIC_REV_5761)
788                 gnt = TG3_APE_LOCK_GRANT;
789         else
790                 gnt = TG3_APE_PER_LOCK_GRANT;
791
792         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
793 }
794
795 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
796 {
797         u32 apedata;
798
799         while (timeout_us) {
800                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
801                         return -EBUSY;
802
803                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
804                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
805                         break;
806
807                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
808
809                 udelay(10);
810                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
811         }
812
813         return timeout_us ? 0 : -EBUSY;
814 }
815
816 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
817 {
818         u32 i, apedata;
819
820         for (i = 0; i < timeout_us / 10; i++) {
821                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
822
823                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
824                         break;
825
826                 udelay(10);
827         }
828
829         return i == timeout_us / 10;
830 }
831
832 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
833                                    u32 len)
834 {
835         int err;
836         u32 i, bufoff, msgoff, maxlen, apedata;
837
838         if (!tg3_flag(tp, APE_HAS_NCSI))
839                 return 0;
840
841         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
842         if (apedata != APE_SEG_SIG_MAGIC)
843                 return -ENODEV;
844
845         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
846         if (!(apedata & APE_FW_STATUS_READY))
847                 return -EAGAIN;
848
849         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
850                  TG3_APE_SHMEM_BASE;
851         msgoff = bufoff + 2 * sizeof(u32);
852         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
853
854         while (len) {
855                 u32 length;
856
857                 /* Cap xfer sizes to scratchpad limits. */
858                 length = (len > maxlen) ? maxlen : len;
859                 len -= length;
860
861                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
862                 if (!(apedata & APE_FW_STATUS_READY))
863                         return -EAGAIN;
864
865                 /* Wait for up to 1 msec for APE to service previous event. */
866                 err = tg3_ape_event_lock(tp, 1000);
867                 if (err)
868                         return err;
869
870                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
871                           APE_EVENT_STATUS_SCRTCHPD_READ |
872                           APE_EVENT_STATUS_EVENT_PENDING;
873                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
874
875                 tg3_ape_write32(tp, bufoff, base_off);
876                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
877
878                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
879                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
880
881                 base_off += length;
882
883                 if (tg3_ape_wait_for_event(tp, 30000))
884                         return -EAGAIN;
885
886                 for (i = 0; length; i += 4, length -= 4) {
887                         u32 val = tg3_ape_read32(tp, msgoff + i);
888                         memcpy(data, &val, sizeof(u32));
889                         data++;
890                 }
891         }
892
893         return 0;
894 }
895
896 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
897 {
898         int err;
899         u32 apedata;
900
901         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
902         if (apedata != APE_SEG_SIG_MAGIC)
903                 return -EAGAIN;
904
905         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
906         if (!(apedata & APE_FW_STATUS_READY))
907                 return -EAGAIN;
908
909         /* Wait for up to 1 millisecond for APE to service previous event. */
910         err = tg3_ape_event_lock(tp, 1000);
911         if (err)
912                 return err;
913
914         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
915                         event | APE_EVENT_STATUS_EVENT_PENDING);
916
917         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
918         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
919
920         return 0;
921 }
922
923 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
924 {
925         u32 event;
926         u32 apedata;
927
928         if (!tg3_flag(tp, ENABLE_APE))
929                 return;
930
931         switch (kind) {
932         case RESET_KIND_INIT:
933                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
934                                 APE_HOST_SEG_SIG_MAGIC);
935                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
936                                 APE_HOST_SEG_LEN_MAGIC);
937                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
938                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
939                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
940                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
941                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
942                                 APE_HOST_BEHAV_NO_PHYLOCK);
943                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
944                                     TG3_APE_HOST_DRVR_STATE_START);
945
946                 event = APE_EVENT_STATUS_STATE_START;
947                 break;
948         case RESET_KIND_SHUTDOWN:
949                 /* With the interface we are currently using,
950                  * APE does not track driver state.  Wiping
951                  * out the HOST SEGMENT SIGNATURE forces
952                  * the APE to assume OS absent status.
953                  */
954                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
955
956                 if (device_may_wakeup(&tp->pdev->dev) &&
957                     tg3_flag(tp, WOL_ENABLE)) {
958                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
959                                             TG3_APE_HOST_WOL_SPEED_AUTO);
960                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
961                 } else
962                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
963
964                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
965
966                 event = APE_EVENT_STATUS_STATE_UNLOAD;
967                 break;
968         case RESET_KIND_SUSPEND:
969                 event = APE_EVENT_STATUS_STATE_SUSPEND;
970                 break;
971         default:
972                 return;
973         }
974
975         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
976
977         tg3_ape_send_event(tp, event);
978 }
979
980 static void tg3_disable_ints(struct tg3 *tp)
981 {
982         int i;
983
984         tw32(TG3PCI_MISC_HOST_CTRL,
985              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
986         for (i = 0; i < tp->irq_max; i++)
987                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
988 }
989
990 static void tg3_enable_ints(struct tg3 *tp)
991 {
992         int i;
993
994         tp->irq_sync = 0;
995         wmb();
996
997         tw32(TG3PCI_MISC_HOST_CTRL,
998              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
999
1000         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1001         for (i = 0; i < tp->irq_cnt; i++) {
1002                 struct tg3_napi *tnapi = &tp->napi[i];
1003
1004                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1005                 if (tg3_flag(tp, 1SHOT_MSI))
1006                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1007
1008                 tp->coal_now |= tnapi->coal_now;
1009         }
1010
1011         /* Force an initial interrupt */
1012         if (!tg3_flag(tp, TAGGED_STATUS) &&
1013             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1014                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1015         else
1016                 tw32(HOSTCC_MODE, tp->coal_now);
1017
1018         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1019 }
1020
1021 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1022 {
1023         struct tg3 *tp = tnapi->tp;
1024         struct tg3_hw_status *sblk = tnapi->hw_status;
1025         unsigned int work_exists = 0;
1026
1027         /* check for phy events */
1028         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1029                 if (sblk->status & SD_STATUS_LINK_CHG)
1030                         work_exists = 1;
1031         }
1032
1033         /* check for TX work to do */
1034         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1035                 work_exists = 1;
1036
1037         /* check for RX work to do */
1038         if (tnapi->rx_rcb_prod_idx &&
1039             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1040                 work_exists = 1;
1041
1042         return work_exists;
1043 }
1044
1045 /* tg3_int_reenable
1046  *  similar to tg3_enable_ints, but it accurately determines whether there
1047  *  is new work pending and can return without flushing the PIO write
1048  *  which reenables interrupts
1049  */
1050 static void tg3_int_reenable(struct tg3_napi *tnapi)
1051 {
1052         struct tg3 *tp = tnapi->tp;
1053
1054         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1055         mmiowb();
1056
1057         /* When doing tagged status, this work check is unnecessary.
1058          * The last_tag we write above tells the chip which piece of
1059          * work we've completed.
1060          */
1061         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1062                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1063                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1064 }
1065
1066 static void tg3_switch_clocks(struct tg3 *tp)
1067 {
1068         u32 clock_ctrl;
1069         u32 orig_clock_ctrl;
1070
1071         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1072                 return;
1073
1074         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1075
1076         orig_clock_ctrl = clock_ctrl;
1077         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1078                        CLOCK_CTRL_CLKRUN_OENABLE |
1079                        0x1f);
1080         tp->pci_clock_ctrl = clock_ctrl;
1081
1082         if (tg3_flag(tp, 5705_PLUS)) {
1083                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1084                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1085                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1086                 }
1087         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1088                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1089                             clock_ctrl |
1090                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1091                             40);
1092                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1093                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1094                             40);
1095         }
1096         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1097 }
1098
1099 #define PHY_BUSY_LOOPS  5000
1100
1101 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1102                          u32 *val)
1103 {
1104         u32 frame_val;
1105         unsigned int loops;
1106         int ret;
1107
1108         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1109                 tw32_f(MAC_MI_MODE,
1110                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1111                 udelay(80);
1112         }
1113
1114         tg3_ape_lock(tp, tp->phy_ape_lock);
1115
1116         *val = 0x0;
1117
1118         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1119                       MI_COM_PHY_ADDR_MASK);
1120         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1121                       MI_COM_REG_ADDR_MASK);
1122         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1123
1124         tw32_f(MAC_MI_COM, frame_val);
1125
1126         loops = PHY_BUSY_LOOPS;
1127         while (loops != 0) {
1128                 udelay(10);
1129                 frame_val = tr32(MAC_MI_COM);
1130
1131                 if ((frame_val & MI_COM_BUSY) == 0) {
1132                         udelay(5);
1133                         frame_val = tr32(MAC_MI_COM);
1134                         break;
1135                 }
1136                 loops -= 1;
1137         }
1138
1139         ret = -EBUSY;
1140         if (loops != 0) {
1141                 *val = frame_val & MI_COM_DATA_MASK;
1142                 ret = 0;
1143         }
1144
1145         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1146                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1147                 udelay(80);
1148         }
1149
1150         tg3_ape_unlock(tp, tp->phy_ape_lock);
1151
1152         return ret;
1153 }
1154
1155 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1156 {
1157         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1158 }
1159
1160 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1161                           u32 val)
1162 {
1163         u32 frame_val;
1164         unsigned int loops;
1165         int ret;
1166
1167         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1168             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1169                 return 0;
1170
1171         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1172                 tw32_f(MAC_MI_MODE,
1173                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1174                 udelay(80);
1175         }
1176
1177         tg3_ape_lock(tp, tp->phy_ape_lock);
1178
1179         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1180                       MI_COM_PHY_ADDR_MASK);
1181         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1182                       MI_COM_REG_ADDR_MASK);
1183         frame_val |= (val & MI_COM_DATA_MASK);
1184         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1185
1186         tw32_f(MAC_MI_COM, frame_val);
1187
1188         loops = PHY_BUSY_LOOPS;
1189         while (loops != 0) {
1190                 udelay(10);
1191                 frame_val = tr32(MAC_MI_COM);
1192                 if ((frame_val & MI_COM_BUSY) == 0) {
1193                         udelay(5);
1194                         frame_val = tr32(MAC_MI_COM);
1195                         break;
1196                 }
1197                 loops -= 1;
1198         }
1199
1200         ret = -EBUSY;
1201         if (loops != 0)
1202                 ret = 0;
1203
1204         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1205                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1206                 udelay(80);
1207         }
1208
1209         tg3_ape_unlock(tp, tp->phy_ape_lock);
1210
1211         return ret;
1212 }
1213
1214 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1215 {
1216         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1217 }
1218
1219 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1220 {
1221         int err;
1222
1223         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1224         if (err)
1225                 goto done;
1226
1227         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1228         if (err)
1229                 goto done;
1230
1231         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1232                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1233         if (err)
1234                 goto done;
1235
1236         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1237
1238 done:
1239         return err;
1240 }
1241
1242 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1243 {
1244         int err;
1245
1246         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1247         if (err)
1248                 goto done;
1249
1250         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1251         if (err)
1252                 goto done;
1253
1254         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1255                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1256         if (err)
1257                 goto done;
1258
1259         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1260
1261 done:
1262         return err;
1263 }
1264
1265 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1266 {
1267         int err;
1268
1269         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1270         if (!err)
1271                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1272
1273         return err;
1274 }
1275
1276 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1277 {
1278         int err;
1279
1280         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1281         if (!err)
1282                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1283
1284         return err;
1285 }
1286
1287 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1288 {
1289         int err;
1290
1291         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1292                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1293                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1294         if (!err)
1295                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1296
1297         return err;
1298 }
1299
1300 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1301 {
1302         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1303                 set |= MII_TG3_AUXCTL_MISC_WREN;
1304
1305         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1306 }
1307
1308 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1309 {
1310         u32 val;
1311         int err;
1312
1313         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1314
1315         if (err)
1316                 return err;
1317         if (enable)
1318
1319                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1320         else
1321                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1322
1323         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1324                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1325
1326         return err;
1327 }
1328
1329 static int tg3_bmcr_reset(struct tg3 *tp)
1330 {
1331         u32 phy_control;
1332         int limit, err;
1333
1334         /* OK, reset it, and poll the BMCR_RESET bit until it
1335          * clears or we time out.
1336          */
1337         phy_control = BMCR_RESET;
1338         err = tg3_writephy(tp, MII_BMCR, phy_control);
1339         if (err != 0)
1340                 return -EBUSY;
1341
1342         limit = 5000;
1343         while (limit--) {
1344                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1345                 if (err != 0)
1346                         return -EBUSY;
1347
1348                 if ((phy_control & BMCR_RESET) == 0) {
1349                         udelay(40);
1350                         break;
1351                 }
1352                 udelay(10);
1353         }
1354         if (limit < 0)
1355                 return -EBUSY;
1356
1357         return 0;
1358 }
1359
1360 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1361 {
1362         struct tg3 *tp = bp->priv;
1363         u32 val;
1364
1365         spin_lock_bh(&tp->lock);
1366
1367         if (tg3_readphy(tp, reg, &val))
1368                 val = -EIO;
1369
1370         spin_unlock_bh(&tp->lock);
1371
1372         return val;
1373 }
1374
1375 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1376 {
1377         struct tg3 *tp = bp->priv;
1378         u32 ret = 0;
1379
1380         spin_lock_bh(&tp->lock);
1381
1382         if (tg3_writephy(tp, reg, val))
1383                 ret = -EIO;
1384
1385         spin_unlock_bh(&tp->lock);
1386
1387         return ret;
1388 }
1389
1390 static int tg3_mdio_reset(struct mii_bus *bp)
1391 {
1392         return 0;
1393 }
1394
1395 static void tg3_mdio_config_5785(struct tg3 *tp)
1396 {
1397         u32 val;
1398         struct phy_device *phydev;
1399
1400         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1401         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1402         case PHY_ID_BCM50610:
1403         case PHY_ID_BCM50610M:
1404                 val = MAC_PHYCFG2_50610_LED_MODES;
1405                 break;
1406         case PHY_ID_BCMAC131:
1407                 val = MAC_PHYCFG2_AC131_LED_MODES;
1408                 break;
1409         case PHY_ID_RTL8211C:
1410                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1411                 break;
1412         case PHY_ID_RTL8201E:
1413                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1414                 break;
1415         default:
1416                 return;
1417         }
1418
1419         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1420                 tw32(MAC_PHYCFG2, val);
1421
1422                 val = tr32(MAC_PHYCFG1);
1423                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1424                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1425                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1426                 tw32(MAC_PHYCFG1, val);
1427
1428                 return;
1429         }
1430
1431         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1432                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1433                        MAC_PHYCFG2_FMODE_MASK_MASK |
1434                        MAC_PHYCFG2_GMODE_MASK_MASK |
1435                        MAC_PHYCFG2_ACT_MASK_MASK   |
1436                        MAC_PHYCFG2_QUAL_MASK_MASK |
1437                        MAC_PHYCFG2_INBAND_ENABLE;
1438
1439         tw32(MAC_PHYCFG2, val);
1440
1441         val = tr32(MAC_PHYCFG1);
1442         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1443                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1444         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1445                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1446                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1447                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1448                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1449         }
1450         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1451                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1452         tw32(MAC_PHYCFG1, val);
1453
1454         val = tr32(MAC_EXT_RGMII_MODE);
1455         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1456                  MAC_RGMII_MODE_RX_QUALITY |
1457                  MAC_RGMII_MODE_RX_ACTIVITY |
1458                  MAC_RGMII_MODE_RX_ENG_DET |
1459                  MAC_RGMII_MODE_TX_ENABLE |
1460                  MAC_RGMII_MODE_TX_LOWPWR |
1461                  MAC_RGMII_MODE_TX_RESET);
1462         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1463                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1464                         val |= MAC_RGMII_MODE_RX_INT_B |
1465                                MAC_RGMII_MODE_RX_QUALITY |
1466                                MAC_RGMII_MODE_RX_ACTIVITY |
1467                                MAC_RGMII_MODE_RX_ENG_DET;
1468                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1469                         val |= MAC_RGMII_MODE_TX_ENABLE |
1470                                MAC_RGMII_MODE_TX_LOWPWR |
1471                                MAC_RGMII_MODE_TX_RESET;
1472         }
1473         tw32(MAC_EXT_RGMII_MODE, val);
1474 }
1475
1476 static void tg3_mdio_start(struct tg3 *tp)
1477 {
1478         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1479         tw32_f(MAC_MI_MODE, tp->mi_mode);
1480         udelay(80);
1481
1482         if (tg3_flag(tp, MDIOBUS_INITED) &&
1483             tg3_asic_rev(tp) == ASIC_REV_5785)
1484                 tg3_mdio_config_5785(tp);
1485 }
1486
1487 static int tg3_mdio_init(struct tg3 *tp)
1488 {
1489         int i;
1490         u32 reg;
1491         struct phy_device *phydev;
1492
1493         if (tg3_flag(tp, 5717_PLUS)) {
1494                 u32 is_serdes;
1495
1496                 tp->phy_addr = tp->pci_fn + 1;
1497
1498                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1499                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1500                 else
1501                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1502                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1503                 if (is_serdes)
1504                         tp->phy_addr += 7;
1505         } else
1506                 tp->phy_addr = TG3_PHY_MII_ADDR;
1507
1508         tg3_mdio_start(tp);
1509
1510         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1511                 return 0;
1512
1513         tp->mdio_bus = mdiobus_alloc();
1514         if (tp->mdio_bus == NULL)
1515                 return -ENOMEM;
1516
1517         tp->mdio_bus->name     = "tg3 mdio bus";
1518         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1519                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1520         tp->mdio_bus->priv     = tp;
1521         tp->mdio_bus->parent   = &tp->pdev->dev;
1522         tp->mdio_bus->read     = &tg3_mdio_read;
1523         tp->mdio_bus->write    = &tg3_mdio_write;
1524         tp->mdio_bus->reset    = &tg3_mdio_reset;
1525         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1526         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1527
1528         for (i = 0; i < PHY_MAX_ADDR; i++)
1529                 tp->mdio_bus->irq[i] = PHY_POLL;
1530
1531         /* The bus registration will look for all the PHYs on the mdio bus.
1532          * Unfortunately, it does not ensure the PHY is powered up before
1533          * accessing the PHY ID registers.  A chip reset is the
1534          * quickest way to bring the device back to an operational state..
1535          */
1536         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1537                 tg3_bmcr_reset(tp);
1538
1539         i = mdiobus_register(tp->mdio_bus);
1540         if (i) {
1541                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1542                 mdiobus_free(tp->mdio_bus);
1543                 return i;
1544         }
1545
1546         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1547
1548         if (!phydev || !phydev->drv) {
1549                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1550                 mdiobus_unregister(tp->mdio_bus);
1551                 mdiobus_free(tp->mdio_bus);
1552                 return -ENODEV;
1553         }
1554
1555         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1556         case PHY_ID_BCM57780:
1557                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1558                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1559                 break;
1560         case PHY_ID_BCM50610:
1561         case PHY_ID_BCM50610M:
1562                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1563                                      PHY_BRCM_RX_REFCLK_UNUSED |
1564                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1565                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1566                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1567                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1568                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1569                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1570                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1571                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1572                 /* fallthru */
1573         case PHY_ID_RTL8211C:
1574                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1575                 break;
1576         case PHY_ID_RTL8201E:
1577         case PHY_ID_BCMAC131:
1578                 phydev->interface = PHY_INTERFACE_MODE_MII;
1579                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1580                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1581                 break;
1582         }
1583
1584         tg3_flag_set(tp, MDIOBUS_INITED);
1585
1586         if (tg3_asic_rev(tp) == ASIC_REV_5785)
1587                 tg3_mdio_config_5785(tp);
1588
1589         return 0;
1590 }
1591
1592 static void tg3_mdio_fini(struct tg3 *tp)
1593 {
1594         if (tg3_flag(tp, MDIOBUS_INITED)) {
1595                 tg3_flag_clear(tp, MDIOBUS_INITED);
1596                 mdiobus_unregister(tp->mdio_bus);
1597                 mdiobus_free(tp->mdio_bus);
1598         }
1599 }
1600
1601 /* tp->lock is held. */
1602 static inline void tg3_generate_fw_event(struct tg3 *tp)
1603 {
1604         u32 val;
1605
1606         val = tr32(GRC_RX_CPU_EVENT);
1607         val |= GRC_RX_CPU_DRIVER_EVENT;
1608         tw32_f(GRC_RX_CPU_EVENT, val);
1609
1610         tp->last_event_jiffies = jiffies;
1611 }
1612
1613 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1614
1615 /* tp->lock is held. */
1616 static void tg3_wait_for_event_ack(struct tg3 *tp)
1617 {
1618         int i;
1619         unsigned int delay_cnt;
1620         long time_remain;
1621
1622         /* If enough time has passed, no wait is necessary. */
1623         time_remain = (long)(tp->last_event_jiffies + 1 +
1624                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1625                       (long)jiffies;
1626         if (time_remain < 0)
1627                 return;
1628
1629         /* Check if we can shorten the wait time. */
1630         delay_cnt = jiffies_to_usecs(time_remain);
1631         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1632                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1633         delay_cnt = (delay_cnt >> 3) + 1;
1634
1635         for (i = 0; i < delay_cnt; i++) {
1636                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1637                         break;
1638                 udelay(8);
1639         }
1640 }
1641
1642 /* tp->lock is held. */
1643 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1644 {
1645         u32 reg, val;
1646
1647         val = 0;
1648         if (!tg3_readphy(tp, MII_BMCR, &reg))
1649                 val = reg << 16;
1650         if (!tg3_readphy(tp, MII_BMSR, &reg))
1651                 val |= (reg & 0xffff);
1652         *data++ = val;
1653
1654         val = 0;
1655         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1656                 val = reg << 16;
1657         if (!tg3_readphy(tp, MII_LPA, &reg))
1658                 val |= (reg & 0xffff);
1659         *data++ = val;
1660
1661         val = 0;
1662         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1663                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1664                         val = reg << 16;
1665                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1666                         val |= (reg & 0xffff);
1667         }
1668         *data++ = val;
1669
1670         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1671                 val = reg << 16;
1672         else
1673                 val = 0;
1674         *data++ = val;
1675 }
1676
1677 /* tp->lock is held. */
1678 static void tg3_ump_link_report(struct tg3 *tp)
1679 {
1680         u32 data[4];
1681
1682         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1683                 return;
1684
1685         tg3_phy_gather_ump_data(tp, data);
1686
1687         tg3_wait_for_event_ack(tp);
1688
1689         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1690         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1691         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1692         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1693         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1694         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1695
1696         tg3_generate_fw_event(tp);
1697 }
1698
1699 /* tp->lock is held. */
1700 static void tg3_stop_fw(struct tg3 *tp)
1701 {
1702         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1703                 /* Wait for RX cpu to ACK the previous event. */
1704                 tg3_wait_for_event_ack(tp);
1705
1706                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1707
1708                 tg3_generate_fw_event(tp);
1709
1710                 /* Wait for RX cpu to ACK this event. */
1711                 tg3_wait_for_event_ack(tp);
1712         }
1713 }
1714
1715 /* tp->lock is held. */
1716 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1717 {
1718         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1719                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1720
1721         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1722                 switch (kind) {
1723                 case RESET_KIND_INIT:
1724                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1725                                       DRV_STATE_START);
1726                         break;
1727
1728                 case RESET_KIND_SHUTDOWN:
1729                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1730                                       DRV_STATE_UNLOAD);
1731                         break;
1732
1733                 case RESET_KIND_SUSPEND:
1734                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1735                                       DRV_STATE_SUSPEND);
1736                         break;
1737
1738                 default:
1739                         break;
1740                 }
1741         }
1742
1743         if (kind == RESET_KIND_INIT ||
1744             kind == RESET_KIND_SUSPEND)
1745                 tg3_ape_driver_state_change(tp, kind);
1746 }
1747
1748 /* tp->lock is held. */
1749 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1750 {
1751         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1752                 switch (kind) {
1753                 case RESET_KIND_INIT:
1754                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1755                                       DRV_STATE_START_DONE);
1756                         break;
1757
1758                 case RESET_KIND_SHUTDOWN:
1759                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1760                                       DRV_STATE_UNLOAD_DONE);
1761                         break;
1762
1763                 default:
1764                         break;
1765                 }
1766         }
1767
1768         if (kind == RESET_KIND_SHUTDOWN)
1769                 tg3_ape_driver_state_change(tp, kind);
1770 }
1771
1772 /* tp->lock is held. */
1773 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1774 {
1775         if (tg3_flag(tp, ENABLE_ASF)) {
1776                 switch (kind) {
1777                 case RESET_KIND_INIT:
1778                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1779                                       DRV_STATE_START);
1780                         break;
1781
1782                 case RESET_KIND_SHUTDOWN:
1783                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1784                                       DRV_STATE_UNLOAD);
1785                         break;
1786
1787                 case RESET_KIND_SUSPEND:
1788                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1789                                       DRV_STATE_SUSPEND);
1790                         break;
1791
1792                 default:
1793                         break;
1794                 }
1795         }
1796 }
1797
1798 static int tg3_poll_fw(struct tg3 *tp)
1799 {
1800         int i;
1801         u32 val;
1802
1803         if (tg3_flag(tp, IS_SSB_CORE)) {
1804                 /* We don't use firmware. */
1805                 return 0;
1806         }
1807
1808         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1809                 /* Wait up to 20ms for init done. */
1810                 for (i = 0; i < 200; i++) {
1811                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1812                                 return 0;
1813                         udelay(100);
1814                 }
1815                 return -ENODEV;
1816         }
1817
1818         /* Wait for firmware initialization to complete. */
1819         for (i = 0; i < 100000; i++) {
1820                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1821                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1822                         break;
1823                 udelay(10);
1824         }
1825
1826         /* Chip might not be fitted with firmware.  Some Sun onboard
1827          * parts are configured like that.  So don't signal the timeout
1828          * of the above loop as an error, but do report the lack of
1829          * running firmware once.
1830          */
1831         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1832                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1833
1834                 netdev_info(tp->dev, "No firmware running\n");
1835         }
1836
1837         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1838                 /* The 57765 A0 needs a little more
1839                  * time to do some important work.
1840                  */
1841                 mdelay(10);
1842         }
1843
1844         return 0;
1845 }
1846
1847 static void tg3_link_report(struct tg3 *tp)
1848 {
1849         if (!netif_carrier_ok(tp->dev)) {
1850                 netif_info(tp, link, tp->dev, "Link is down\n");
1851                 tg3_ump_link_report(tp);
1852         } else if (netif_msg_link(tp)) {
1853                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1854                             (tp->link_config.active_speed == SPEED_1000 ?
1855                              1000 :
1856                              (tp->link_config.active_speed == SPEED_100 ?
1857                               100 : 10)),
1858                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1859                              "full" : "half"));
1860
1861                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1862                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1863                             "on" : "off",
1864                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1865                             "on" : "off");
1866
1867                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1868                         netdev_info(tp->dev, "EEE is %s\n",
1869                                     tp->setlpicnt ? "enabled" : "disabled");
1870
1871                 tg3_ump_link_report(tp);
1872         }
1873
1874         tp->link_up = netif_carrier_ok(tp->dev);
1875 }
1876
1877 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1878 {
1879         u16 miireg;
1880
1881         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1882                 miireg = ADVERTISE_1000XPAUSE;
1883         else if (flow_ctrl & FLOW_CTRL_TX)
1884                 miireg = ADVERTISE_1000XPSE_ASYM;
1885         else if (flow_ctrl & FLOW_CTRL_RX)
1886                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1887         else
1888                 miireg = 0;
1889
1890         return miireg;
1891 }
1892
1893 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1894 {
1895         u8 cap = 0;
1896
1897         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1898                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1899         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1900                 if (lcladv & ADVERTISE_1000XPAUSE)
1901                         cap = FLOW_CTRL_RX;
1902                 if (rmtadv & ADVERTISE_1000XPAUSE)
1903                         cap = FLOW_CTRL_TX;
1904         }
1905
1906         return cap;
1907 }
1908
1909 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1910 {
1911         u8 autoneg;
1912         u8 flowctrl = 0;
1913         u32 old_rx_mode = tp->rx_mode;
1914         u32 old_tx_mode = tp->tx_mode;
1915
1916         if (tg3_flag(tp, USE_PHYLIB))
1917                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1918         else
1919                 autoneg = tp->link_config.autoneg;
1920
1921         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1922                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1923                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1924                 else
1925                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1926         } else
1927                 flowctrl = tp->link_config.flowctrl;
1928
1929         tp->link_config.active_flowctrl = flowctrl;
1930
1931         if (flowctrl & FLOW_CTRL_RX)
1932                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1933         else
1934                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1935
1936         if (old_rx_mode != tp->rx_mode)
1937                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1938
1939         if (flowctrl & FLOW_CTRL_TX)
1940                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1941         else
1942                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1943
1944         if (old_tx_mode != tp->tx_mode)
1945                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1946 }
1947
1948 static void tg3_adjust_link(struct net_device *dev)
1949 {
1950         u8 oldflowctrl, linkmesg = 0;
1951         u32 mac_mode, lcl_adv, rmt_adv;
1952         struct tg3 *tp = netdev_priv(dev);
1953         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1954
1955         spin_lock_bh(&tp->lock);
1956
1957         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1958                                     MAC_MODE_HALF_DUPLEX);
1959
1960         oldflowctrl = tp->link_config.active_flowctrl;
1961
1962         if (phydev->link) {
1963                 lcl_adv = 0;
1964                 rmt_adv = 0;
1965
1966                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1967                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1968                 else if (phydev->speed == SPEED_1000 ||
1969                          tg3_asic_rev(tp) != ASIC_REV_5785)
1970                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1971                 else
1972                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1973
1974                 if (phydev->duplex == DUPLEX_HALF)
1975                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1976                 else {
1977                         lcl_adv = mii_advertise_flowctrl(
1978                                   tp->link_config.flowctrl);
1979
1980                         if (phydev->pause)
1981                                 rmt_adv = LPA_PAUSE_CAP;
1982                         if (phydev->asym_pause)
1983                                 rmt_adv |= LPA_PAUSE_ASYM;
1984                 }
1985
1986                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1987         } else
1988                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1989
1990         if (mac_mode != tp->mac_mode) {
1991                 tp->mac_mode = mac_mode;
1992                 tw32_f(MAC_MODE, tp->mac_mode);
1993                 udelay(40);
1994         }
1995
1996         if (tg3_asic_rev(tp) == ASIC_REV_5785) {
1997                 if (phydev->speed == SPEED_10)
1998                         tw32(MAC_MI_STAT,
1999                              MAC_MI_STAT_10MBPS_MODE |
2000                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2001                 else
2002                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2003         }
2004
2005         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2006                 tw32(MAC_TX_LENGTHS,
2007                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2008                       (6 << TX_LENGTHS_IPG_SHIFT) |
2009                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2010         else
2011                 tw32(MAC_TX_LENGTHS,
2012                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2013                       (6 << TX_LENGTHS_IPG_SHIFT) |
2014                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2015
2016         if (phydev->link != tp->old_link ||
2017             phydev->speed != tp->link_config.active_speed ||
2018             phydev->duplex != tp->link_config.active_duplex ||
2019             oldflowctrl != tp->link_config.active_flowctrl)
2020                 linkmesg = 1;
2021
2022         tp->old_link = phydev->link;
2023         tp->link_config.active_speed = phydev->speed;
2024         tp->link_config.active_duplex = phydev->duplex;
2025
2026         spin_unlock_bh(&tp->lock);
2027
2028         if (linkmesg)
2029                 tg3_link_report(tp);
2030 }
2031
2032 static int tg3_phy_init(struct tg3 *tp)
2033 {
2034         struct phy_device *phydev;
2035
2036         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2037                 return 0;
2038
2039         /* Bring the PHY back to a known state. */
2040         tg3_bmcr_reset(tp);
2041
2042         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2043
2044         /* Attach the MAC to the PHY. */
2045         phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2046                              tg3_adjust_link, phydev->interface);
2047         if (IS_ERR(phydev)) {
2048                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2049                 return PTR_ERR(phydev);
2050         }
2051
2052         /* Mask with MAC supported features. */
2053         switch (phydev->interface) {
2054         case PHY_INTERFACE_MODE_GMII:
2055         case PHY_INTERFACE_MODE_RGMII:
2056                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2057                         phydev->supported &= (PHY_GBIT_FEATURES |
2058                                               SUPPORTED_Pause |
2059                                               SUPPORTED_Asym_Pause);
2060                         break;
2061                 }
2062                 /* fallthru */
2063         case PHY_INTERFACE_MODE_MII:
2064                 phydev->supported &= (PHY_BASIC_FEATURES |
2065                                       SUPPORTED_Pause |
2066                                       SUPPORTED_Asym_Pause);
2067                 break;
2068         default:
2069                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2070                 return -EINVAL;
2071         }
2072
2073         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2074
2075         phydev->advertising = phydev->supported;
2076
2077         return 0;
2078 }
2079
2080 static void tg3_phy_start(struct tg3 *tp)
2081 {
2082         struct phy_device *phydev;
2083
2084         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2085                 return;
2086
2087         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2088
2089         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2090                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2091                 phydev->speed = tp->link_config.speed;
2092                 phydev->duplex = tp->link_config.duplex;
2093                 phydev->autoneg = tp->link_config.autoneg;
2094                 phydev->advertising = tp->link_config.advertising;
2095         }
2096
2097         phy_start(phydev);
2098
2099         phy_start_aneg(phydev);
2100 }
2101
2102 static void tg3_phy_stop(struct tg3 *tp)
2103 {
2104         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2105                 return;
2106
2107         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2108 }
2109
2110 static void tg3_phy_fini(struct tg3 *tp)
2111 {
2112         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2113                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2114                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2115         }
2116 }
2117
2118 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2119 {
2120         int err;
2121         u32 val;
2122
2123         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2124                 return 0;
2125
2126         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2127                 /* Cannot do read-modify-write on 5401 */
2128                 err = tg3_phy_auxctl_write(tp,
2129                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2130                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2131                                            0x4c20);
2132                 goto done;
2133         }
2134
2135         err = tg3_phy_auxctl_read(tp,
2136                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2137         if (err)
2138                 return err;
2139
2140         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2141         err = tg3_phy_auxctl_write(tp,
2142                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2143
2144 done:
2145         return err;
2146 }
2147
2148 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2149 {
2150         u32 phytest;
2151
2152         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2153                 u32 phy;
2154
2155                 tg3_writephy(tp, MII_TG3_FET_TEST,
2156                              phytest | MII_TG3_FET_SHADOW_EN);
2157                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2158                         if (enable)
2159                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2160                         else
2161                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2162                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2163                 }
2164                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2165         }
2166 }
2167
2168 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2169 {
2170         u32 reg;
2171
2172         if (!tg3_flag(tp, 5705_PLUS) ||
2173             (tg3_flag(tp, 5717_PLUS) &&
2174              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2175                 return;
2176
2177         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2178                 tg3_phy_fet_toggle_apd(tp, enable);
2179                 return;
2180         }
2181
2182         reg = MII_TG3_MISC_SHDW_WREN |
2183               MII_TG3_MISC_SHDW_SCR5_SEL |
2184               MII_TG3_MISC_SHDW_SCR5_LPED |
2185               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2186               MII_TG3_MISC_SHDW_SCR5_SDTL |
2187               MII_TG3_MISC_SHDW_SCR5_C125OE;
2188         if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2189                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2190
2191         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2192
2193
2194         reg = MII_TG3_MISC_SHDW_WREN |
2195               MII_TG3_MISC_SHDW_APD_SEL |
2196               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2197         if (enable)
2198                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2199
2200         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2201 }
2202
2203 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2204 {
2205         u32 phy;
2206
2207         if (!tg3_flag(tp, 5705_PLUS) ||
2208             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2209                 return;
2210
2211         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2212                 u32 ephy;
2213
2214                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2215                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2216
2217                         tg3_writephy(tp, MII_TG3_FET_TEST,
2218                                      ephy | MII_TG3_FET_SHADOW_EN);
2219                         if (!tg3_readphy(tp, reg, &phy)) {
2220                                 if (enable)
2221                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2222                                 else
2223                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2224                                 tg3_writephy(tp, reg, phy);
2225                         }
2226                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2227                 }
2228         } else {
2229                 int ret;
2230
2231                 ret = tg3_phy_auxctl_read(tp,
2232                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2233                 if (!ret) {
2234                         if (enable)
2235                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2236                         else
2237                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2238                         tg3_phy_auxctl_write(tp,
2239                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2240                 }
2241         }
2242 }
2243
2244 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2245 {
2246         int ret;
2247         u32 val;
2248
2249         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2250                 return;
2251
2252         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2253         if (!ret)
2254                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2255                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2256 }
2257
2258 static void tg3_phy_apply_otp(struct tg3 *tp)
2259 {
2260         u32 otp, phy;
2261
2262         if (!tp->phy_otp)
2263                 return;
2264
2265         otp = tp->phy_otp;
2266
2267         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2268                 return;
2269
2270         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2271         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2272         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2273
2274         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2275               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2276         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2277
2278         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2279         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2280         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2281
2282         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2283         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2284
2285         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2286         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2287
2288         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2289               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2290         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2291
2292         tg3_phy_toggle_auxctl_smdsp(tp, false);
2293 }
2294
2295 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2296 {
2297         u32 val;
2298
2299         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2300                 return;
2301
2302         tp->setlpicnt = 0;
2303
2304         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2305             current_link_up == 1 &&
2306             tp->link_config.active_duplex == DUPLEX_FULL &&
2307             (tp->link_config.active_speed == SPEED_100 ||
2308              tp->link_config.active_speed == SPEED_1000)) {
2309                 u32 eeectl;
2310
2311                 if (tp->link_config.active_speed == SPEED_1000)
2312                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2313                 else
2314                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2315
2316                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2317
2318                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2319                                   TG3_CL45_D7_EEERES_STAT, &val);
2320
2321                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2322                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2323                         tp->setlpicnt = 2;
2324         }
2325
2326         if (!tp->setlpicnt) {
2327                 if (current_link_up == 1 &&
2328                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2329                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2330                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2331                 }
2332
2333                 val = tr32(TG3_CPMU_EEE_MODE);
2334                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2335         }
2336 }
2337
2338 static void tg3_phy_eee_enable(struct tg3 *tp)
2339 {
2340         u32 val;
2341
2342         if (tp->link_config.active_speed == SPEED_1000 &&
2343             (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2344              tg3_asic_rev(tp) == ASIC_REV_5719 ||
2345              tg3_flag(tp, 57765_CLASS)) &&
2346             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2347                 val = MII_TG3_DSP_TAP26_ALNOKO |
2348                       MII_TG3_DSP_TAP26_RMRXSTO;
2349                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2350                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2351         }
2352
2353         val = tr32(TG3_CPMU_EEE_MODE);
2354         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2355 }
2356
2357 static int tg3_wait_macro_done(struct tg3 *tp)
2358 {
2359         int limit = 100;
2360
2361         while (limit--) {
2362                 u32 tmp32;
2363
2364                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2365                         if ((tmp32 & 0x1000) == 0)
2366                                 break;
2367                 }
2368         }
2369         if (limit < 0)
2370                 return -EBUSY;
2371
2372         return 0;
2373 }
2374
2375 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2376 {
2377         static const u32 test_pat[4][6] = {
2378         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2379         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2380         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2381         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2382         };
2383         int chan;
2384
2385         for (chan = 0; chan < 4; chan++) {
2386                 int i;
2387
2388                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2389                              (chan * 0x2000) | 0x0200);
2390                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2391
2392                 for (i = 0; i < 6; i++)
2393                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2394                                      test_pat[chan][i]);
2395
2396                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2397                 if (tg3_wait_macro_done(tp)) {
2398                         *resetp = 1;
2399                         return -EBUSY;
2400                 }
2401
2402                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2403                              (chan * 0x2000) | 0x0200);
2404                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2405                 if (tg3_wait_macro_done(tp)) {
2406                         *resetp = 1;
2407                         return -EBUSY;
2408                 }
2409
2410                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2411                 if (tg3_wait_macro_done(tp)) {
2412                         *resetp = 1;
2413                         return -EBUSY;
2414                 }
2415
2416                 for (i = 0; i < 6; i += 2) {
2417                         u32 low, high;
2418
2419                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2420                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2421                             tg3_wait_macro_done(tp)) {
2422                                 *resetp = 1;
2423                                 return -EBUSY;
2424                         }
2425                         low &= 0x7fff;
2426                         high &= 0x000f;
2427                         if (low != test_pat[chan][i] ||
2428                             high != test_pat[chan][i+1]) {
2429                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2430                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2431                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2432
2433                                 return -EBUSY;
2434                         }
2435                 }
2436         }
2437
2438         return 0;
2439 }
2440
2441 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2442 {
2443         int chan;
2444
2445         for (chan = 0; chan < 4; chan++) {
2446                 int i;
2447
2448                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2449                              (chan * 0x2000) | 0x0200);
2450                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2451                 for (i = 0; i < 6; i++)
2452                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2453                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2454                 if (tg3_wait_macro_done(tp))
2455                         return -EBUSY;
2456         }
2457
2458         return 0;
2459 }
2460
2461 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2462 {
2463         u32 reg32, phy9_orig;
2464         int retries, do_phy_reset, err;
2465
2466         retries = 10;
2467         do_phy_reset = 1;
2468         do {
2469                 if (do_phy_reset) {
2470                         err = tg3_bmcr_reset(tp);
2471                         if (err)
2472                                 return err;
2473                         do_phy_reset = 0;
2474                 }
2475
2476                 /* Disable transmitter and interrupt.  */
2477                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2478                         continue;
2479
2480                 reg32 |= 0x3000;
2481                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2482
2483                 /* Set full-duplex, 1000 mbps.  */
2484                 tg3_writephy(tp, MII_BMCR,
2485                              BMCR_FULLDPLX | BMCR_SPEED1000);
2486
2487                 /* Set to master mode.  */
2488                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2489                         continue;
2490
2491                 tg3_writephy(tp, MII_CTRL1000,
2492                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2493
2494                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2495                 if (err)
2496                         return err;
2497
2498                 /* Block the PHY control access.  */
2499                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2500
2501                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2502                 if (!err)
2503                         break;
2504         } while (--retries);
2505
2506         err = tg3_phy_reset_chanpat(tp);
2507         if (err)
2508                 return err;
2509
2510         tg3_phydsp_write(tp, 0x8005, 0x0000);
2511
2512         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2513         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2514
2515         tg3_phy_toggle_auxctl_smdsp(tp, false);
2516
2517         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2518
2519         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2520                 reg32 &= ~0x3000;
2521                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2522         } else if (!err)
2523                 err = -EBUSY;
2524
2525         return err;
2526 }
2527
2528 static void tg3_carrier_off(struct tg3 *tp)
2529 {
2530         netif_carrier_off(tp->dev);
2531         tp->link_up = false;
2532 }
2533
2534 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2535 {
2536         if (tg3_flag(tp, ENABLE_ASF))
2537                 netdev_warn(tp->dev,
2538                             "Management side-band traffic will be interrupted during phy settings change\n");
2539 }
2540
2541 /* This will reset the tigon3 PHY if there is no valid
2542  * link unless the FORCE argument is non-zero.
2543  */
2544 static int tg3_phy_reset(struct tg3 *tp)
2545 {
2546         u32 val, cpmuctrl;
2547         int err;
2548
2549         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2550                 val = tr32(GRC_MISC_CFG);
2551                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2552                 udelay(40);
2553         }
2554         err  = tg3_readphy(tp, MII_BMSR, &val);
2555         err |= tg3_readphy(tp, MII_BMSR, &val);
2556         if (err != 0)
2557                 return -EBUSY;
2558
2559         if (netif_running(tp->dev) && tp->link_up) {
2560                 netif_carrier_off(tp->dev);
2561                 tg3_link_report(tp);
2562         }
2563
2564         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2565             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2566             tg3_asic_rev(tp) == ASIC_REV_5705) {
2567                 err = tg3_phy_reset_5703_4_5(tp);
2568                 if (err)
2569                         return err;
2570                 goto out;
2571         }
2572
2573         cpmuctrl = 0;
2574         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2575             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2576                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2577                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2578                         tw32(TG3_CPMU_CTRL,
2579                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2580         }
2581
2582         err = tg3_bmcr_reset(tp);
2583         if (err)
2584                 return err;
2585
2586         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2587                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2588                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2589
2590                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2591         }
2592
2593         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2594             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2595                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2596                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2597                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2598                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2599                         udelay(40);
2600                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2601                 }
2602         }
2603
2604         if (tg3_flag(tp, 5717_PLUS) &&
2605             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2606                 return 0;
2607
2608         tg3_phy_apply_otp(tp);
2609
2610         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2611                 tg3_phy_toggle_apd(tp, true);
2612         else
2613                 tg3_phy_toggle_apd(tp, false);
2614
2615 out:
2616         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2617             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2618                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2619                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2620                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2621         }
2622
2623         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2624                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2625                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2626         }
2627
2628         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2629                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2630                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2631                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2632                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2633                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2634                 }
2635         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2636                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2637                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2638                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2639                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2640                                 tg3_writephy(tp, MII_TG3_TEST1,
2641                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2642                         } else
2643                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2644
2645                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2646                 }
2647         }
2648
2649         /* Set Extended packet length bit (bit 14) on all chips that */
2650         /* support jumbo frames */
2651         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2652                 /* Cannot do read-modify-write on 5401 */
2653                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2654         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2655                 /* Set bit 14 with read-modify-write to preserve other bits */
2656                 err = tg3_phy_auxctl_read(tp,
2657                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2658                 if (!err)
2659                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2660                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2661         }
2662
2663         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2664          * jumbo frames transmission.
2665          */
2666         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2667                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2668                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2669                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2670         }
2671
2672         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2673                 /* adjust output voltage */
2674                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2675         }
2676
2677         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2678                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2679
2680         tg3_phy_toggle_automdix(tp, 1);
2681         tg3_phy_set_wirespeed(tp);
2682         return 0;
2683 }
2684
2685 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2686 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2687 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2688                                           TG3_GPIO_MSG_NEED_VAUX)
2689 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2690         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2691          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2692          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2693          (TG3_GPIO_MSG_DRVR_PRES << 12))
2694
2695 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2696         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2697          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2698          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2699          (TG3_GPIO_MSG_NEED_VAUX << 12))
2700
2701 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2702 {
2703         u32 status, shift;
2704
2705         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2706             tg3_asic_rev(tp) == ASIC_REV_5719)
2707                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2708         else
2709                 status = tr32(TG3_CPMU_DRV_STATUS);
2710
2711         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2712         status &= ~(TG3_GPIO_MSG_MASK << shift);
2713         status |= (newstat << shift);
2714
2715         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2716             tg3_asic_rev(tp) == ASIC_REV_5719)
2717                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2718         else
2719                 tw32(TG3_CPMU_DRV_STATUS, status);
2720
2721         return status >> TG3_APE_GPIO_MSG_SHIFT;
2722 }
2723
2724 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2725 {
2726         if (!tg3_flag(tp, IS_NIC))
2727                 return 0;
2728
2729         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2730             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2731             tg3_asic_rev(tp) == ASIC_REV_5720) {
2732                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2733                         return -EIO;
2734
2735                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2736
2737                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2738                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2739
2740                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2741         } else {
2742                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2743                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2744         }
2745
2746         return 0;
2747 }
2748
2749 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2750 {
2751         u32 grc_local_ctrl;
2752
2753         if (!tg3_flag(tp, IS_NIC) ||
2754             tg3_asic_rev(tp) == ASIC_REV_5700 ||
2755             tg3_asic_rev(tp) == ASIC_REV_5701)
2756                 return;
2757
2758         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2759
2760         tw32_wait_f(GRC_LOCAL_CTRL,
2761                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2762                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2763
2764         tw32_wait_f(GRC_LOCAL_CTRL,
2765                     grc_local_ctrl,
2766                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2767
2768         tw32_wait_f(GRC_LOCAL_CTRL,
2769                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2770                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2771 }
2772
2773 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2774 {
2775         if (!tg3_flag(tp, IS_NIC))
2776                 return;
2777
2778         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2779             tg3_asic_rev(tp) == ASIC_REV_5701) {
2780                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2781                             (GRC_LCLCTRL_GPIO_OE0 |
2782                              GRC_LCLCTRL_GPIO_OE1 |
2783                              GRC_LCLCTRL_GPIO_OE2 |
2784                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2785                              GRC_LCLCTRL_GPIO_OUTPUT1),
2786                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2787         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2788                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2789                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2790                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2791                                      GRC_LCLCTRL_GPIO_OE1 |
2792                                      GRC_LCLCTRL_GPIO_OE2 |
2793                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2794                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2795                                      tp->grc_local_ctrl;
2796                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2797                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2798
2799                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2800                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2801                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2802
2803                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2804                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2805                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2806         } else {
2807                 u32 no_gpio2;
2808                 u32 grc_local_ctrl = 0;
2809
2810                 /* Workaround to prevent overdrawing Amps. */
2811                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2812                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2813                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2814                                     grc_local_ctrl,
2815                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2816                 }
2817
2818                 /* On 5753 and variants, GPIO2 cannot be used. */
2819                 no_gpio2 = tp->nic_sram_data_cfg &
2820                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2821
2822                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2823                                   GRC_LCLCTRL_GPIO_OE1 |
2824                                   GRC_LCLCTRL_GPIO_OE2 |
2825                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2826                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2827                 if (no_gpio2) {
2828                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2829                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2830                 }
2831                 tw32_wait_f(GRC_LOCAL_CTRL,
2832                             tp->grc_local_ctrl | grc_local_ctrl,
2833                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2834
2835                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2836
2837                 tw32_wait_f(GRC_LOCAL_CTRL,
2838                             tp->grc_local_ctrl | grc_local_ctrl,
2839                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2840
2841                 if (!no_gpio2) {
2842                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2843                         tw32_wait_f(GRC_LOCAL_CTRL,
2844                                     tp->grc_local_ctrl | grc_local_ctrl,
2845                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2846                 }
2847         }
2848 }
2849
2850 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2851 {
2852         u32 msg = 0;
2853
2854         /* Serialize power state transitions */
2855         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2856                 return;
2857
2858         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2859                 msg = TG3_GPIO_MSG_NEED_VAUX;
2860
2861         msg = tg3_set_function_status(tp, msg);
2862
2863         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2864                 goto done;
2865
2866         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2867                 tg3_pwrsrc_switch_to_vaux(tp);
2868         else
2869                 tg3_pwrsrc_die_with_vmain(tp);
2870
2871 done:
2872         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2873 }
2874
2875 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2876 {
2877         bool need_vaux = false;
2878
2879         /* The GPIOs do something completely different on 57765. */
2880         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2881                 return;
2882
2883         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2884             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2885             tg3_asic_rev(tp) == ASIC_REV_5720) {
2886                 tg3_frob_aux_power_5717(tp, include_wol ?
2887                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2888                 return;
2889         }
2890
2891         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2892                 struct net_device *dev_peer;
2893
2894                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2895
2896                 /* remove_one() may have been run on the peer. */
2897                 if (dev_peer) {
2898                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2899
2900                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2901                                 return;
2902
2903                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2904                             tg3_flag(tp_peer, ENABLE_ASF))
2905                                 need_vaux = true;
2906                 }
2907         }
2908
2909         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2910             tg3_flag(tp, ENABLE_ASF))
2911                 need_vaux = true;
2912
2913         if (need_vaux)
2914                 tg3_pwrsrc_switch_to_vaux(tp);
2915         else
2916                 tg3_pwrsrc_die_with_vmain(tp);
2917 }
2918
2919 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2920 {
2921         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2922                 return 1;
2923         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2924                 if (speed != SPEED_10)
2925                         return 1;
2926         } else if (speed == SPEED_10)
2927                 return 1;
2928
2929         return 0;
2930 }
2931
2932 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2933 {
2934         u32 val;
2935
2936         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2937                 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
2938                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2939                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2940
2941                         sg_dig_ctrl |=
2942                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2943                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2944                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2945                 }
2946                 return;
2947         }
2948
2949         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2950                 tg3_bmcr_reset(tp);
2951                 val = tr32(GRC_MISC_CFG);
2952                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2953                 udelay(40);
2954                 return;
2955         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2956                 u32 phytest;
2957                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2958                         u32 phy;
2959
2960                         tg3_writephy(tp, MII_ADVERTISE, 0);
2961                         tg3_writephy(tp, MII_BMCR,
2962                                      BMCR_ANENABLE | BMCR_ANRESTART);
2963
2964                         tg3_writephy(tp, MII_TG3_FET_TEST,
2965                                      phytest | MII_TG3_FET_SHADOW_EN);
2966                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2967                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2968                                 tg3_writephy(tp,
2969                                              MII_TG3_FET_SHDW_AUXMODE4,
2970                                              phy);
2971                         }
2972                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2973                 }
2974                 return;
2975         } else if (do_low_power) {
2976                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2977                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2978
2979                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2980                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2981                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2982                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2983         }
2984
2985         /* The PHY should not be powered down on some chips because
2986          * of bugs.
2987          */
2988         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2989             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2990             (tg3_asic_rev(tp) == ASIC_REV_5780 &&
2991              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2992             (tg3_asic_rev(tp) == ASIC_REV_5717 &&
2993              !tp->pci_fn))
2994                 return;
2995
2996         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2997             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2998                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2999                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3000                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3001                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3002         }
3003
3004         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3005 }
3006
3007 /* tp->lock is held. */
3008 static int tg3_nvram_lock(struct tg3 *tp)
3009 {
3010         if (tg3_flag(tp, NVRAM)) {
3011                 int i;
3012
3013                 if (tp->nvram_lock_cnt == 0) {
3014                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3015                         for (i = 0; i < 8000; i++) {
3016                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3017                                         break;
3018                                 udelay(20);
3019                         }
3020                         if (i == 8000) {
3021                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3022                                 return -ENODEV;
3023                         }
3024                 }
3025                 tp->nvram_lock_cnt++;
3026         }
3027         return 0;
3028 }
3029
3030 /* tp->lock is held. */
3031 static void tg3_nvram_unlock(struct tg3 *tp)
3032 {
3033         if (tg3_flag(tp, NVRAM)) {
3034                 if (tp->nvram_lock_cnt > 0)
3035                         tp->nvram_lock_cnt--;
3036                 if (tp->nvram_lock_cnt == 0)
3037                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3038         }
3039 }
3040
3041 /* tp->lock is held. */
3042 static void tg3_enable_nvram_access(struct tg3 *tp)
3043 {
3044         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3045                 u32 nvaccess = tr32(NVRAM_ACCESS);
3046
3047                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3048         }
3049 }
3050
3051 /* tp->lock is held. */
3052 static void tg3_disable_nvram_access(struct tg3 *tp)
3053 {
3054         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3055                 u32 nvaccess = tr32(NVRAM_ACCESS);
3056
3057                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3058         }
3059 }
3060
3061 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3062                                         u32 offset, u32 *val)
3063 {
3064         u32 tmp;
3065         int i;
3066
3067         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3068                 return -EINVAL;
3069
3070         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3071                                         EEPROM_ADDR_DEVID_MASK |
3072                                         EEPROM_ADDR_READ);
3073         tw32(GRC_EEPROM_ADDR,
3074              tmp |
3075              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3076              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3077               EEPROM_ADDR_ADDR_MASK) |
3078              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3079
3080         for (i = 0; i < 1000; i++) {
3081                 tmp = tr32(GRC_EEPROM_ADDR);
3082
3083                 if (tmp & EEPROM_ADDR_COMPLETE)
3084                         break;
3085                 msleep(1);
3086         }
3087         if (!(tmp & EEPROM_ADDR_COMPLETE))
3088                 return -EBUSY;
3089
3090         tmp = tr32(GRC_EEPROM_DATA);
3091
3092         /*
3093          * The data will always be opposite the native endian
3094          * format.  Perform a blind byteswap to compensate.
3095          */
3096         *val = swab32(tmp);
3097
3098         return 0;
3099 }
3100
3101 #define NVRAM_CMD_TIMEOUT 10000
3102
3103 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3104 {
3105         int i;
3106
3107         tw32(NVRAM_CMD, nvram_cmd);
3108         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3109                 udelay(10);
3110                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3111                         udelay(10);
3112                         break;
3113                 }
3114         }
3115
3116         if (i == NVRAM_CMD_TIMEOUT)
3117                 return -EBUSY;
3118
3119         return 0;
3120 }
3121
3122 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3123 {
3124         if (tg3_flag(tp, NVRAM) &&
3125             tg3_flag(tp, NVRAM_BUFFERED) &&
3126             tg3_flag(tp, FLASH) &&
3127             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3128             (tp->nvram_jedecnum == JEDEC_ATMEL))
3129
3130                 addr = ((addr / tp->nvram_pagesize) <<
3131                         ATMEL_AT45DB0X1B_PAGE_POS) +
3132                        (addr % tp->nvram_pagesize);
3133
3134         return addr;
3135 }
3136
3137 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3138 {
3139         if (tg3_flag(tp, NVRAM) &&
3140             tg3_flag(tp, NVRAM_BUFFERED) &&
3141             tg3_flag(tp, FLASH) &&
3142             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3143             (tp->nvram_jedecnum == JEDEC_ATMEL))
3144
3145                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3146                         tp->nvram_pagesize) +
3147                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3148
3149         return addr;
3150 }
3151
3152 /* NOTE: Data read in from NVRAM is byteswapped according to
3153  * the byteswapping settings for all other register accesses.
3154  * tg3 devices are BE devices, so on a BE machine, the data
3155  * returned will be exactly as it is seen in NVRAM.  On a LE
3156  * machine, the 32-bit value will be byteswapped.
3157  */
3158 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3159 {
3160         int ret;
3161
3162         if (!tg3_flag(tp, NVRAM))
3163                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3164
3165         offset = tg3_nvram_phys_addr(tp, offset);
3166
3167         if (offset > NVRAM_ADDR_MSK)
3168                 return -EINVAL;
3169
3170         ret = tg3_nvram_lock(tp);
3171         if (ret)
3172                 return ret;
3173
3174         tg3_enable_nvram_access(tp);
3175
3176         tw32(NVRAM_ADDR, offset);
3177         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3178                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3179
3180         if (ret == 0)
3181                 *val = tr32(NVRAM_RDDATA);
3182
3183         tg3_disable_nvram_access(tp);
3184
3185         tg3_nvram_unlock(tp);
3186
3187         return ret;
3188 }
3189
3190 /* Ensures NVRAM data is in bytestream format. */
3191 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3192 {
3193         u32 v;
3194         int res = tg3_nvram_read(tp, offset, &v);
3195         if (!res)
3196                 *val = cpu_to_be32(v);
3197         return res;
3198 }
3199
3200 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3201                                     u32 offset, u32 len, u8 *buf)
3202 {
3203         int i, j, rc = 0;
3204         u32 val;
3205
3206         for (i = 0; i < len; i += 4) {
3207                 u32 addr;
3208                 __be32 data;
3209
3210                 addr = offset + i;
3211
3212                 memcpy(&data, buf + i, 4);
3213
3214                 /*
3215                  * The SEEPROM interface expects the data to always be opposite
3216                  * the native endian format.  We accomplish this by reversing
3217                  * all the operations that would have been performed on the
3218                  * data from a call to tg3_nvram_read_be32().
3219                  */
3220                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3221
3222                 val = tr32(GRC_EEPROM_ADDR);
3223                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3224
3225                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3226                         EEPROM_ADDR_READ);
3227                 tw32(GRC_EEPROM_ADDR, val |
3228                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3229                         (addr & EEPROM_ADDR_ADDR_MASK) |
3230                         EEPROM_ADDR_START |
3231                         EEPROM_ADDR_WRITE);
3232
3233                 for (j = 0; j < 1000; j++) {
3234                         val = tr32(GRC_EEPROM_ADDR);
3235
3236                         if (val & EEPROM_ADDR_COMPLETE)
3237                                 break;
3238                         msleep(1);
3239                 }
3240                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3241                         rc = -EBUSY;
3242                         break;
3243                 }
3244         }
3245
3246         return rc;
3247 }
3248
3249 /* offset and length are dword aligned */
3250 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3251                 u8 *buf)
3252 {
3253         int ret = 0;
3254         u32 pagesize = tp->nvram_pagesize;
3255         u32 pagemask = pagesize - 1;
3256         u32 nvram_cmd;
3257         u8 *tmp;
3258
3259         tmp = kmalloc(pagesize, GFP_KERNEL);
3260         if (tmp == NULL)
3261                 return -ENOMEM;
3262
3263         while (len) {
3264                 int j;
3265                 u32 phy_addr, page_off, size;
3266
3267                 phy_addr = offset & ~pagemask;
3268
3269                 for (j = 0; j < pagesize; j += 4) {
3270                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3271                                                   (__be32 *) (tmp + j));
3272                         if (ret)
3273                                 break;
3274                 }
3275                 if (ret)
3276                         break;
3277
3278                 page_off = offset & pagemask;
3279                 size = pagesize;
3280                 if (len < size)
3281                         size = len;
3282
3283                 len -= size;
3284
3285                 memcpy(tmp + page_off, buf, size);
3286
3287                 offset = offset + (pagesize - page_off);
3288
3289                 tg3_enable_nvram_access(tp);
3290
3291                 /*
3292                  * Before we can erase the flash page, we need
3293                  * to issue a special "write enable" command.
3294                  */
3295                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3296
3297                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3298                         break;
3299
3300                 /* Erase the target page */
3301                 tw32(NVRAM_ADDR, phy_addr);
3302
3303                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3304                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3305
3306                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3307                         break;
3308
3309                 /* Issue another write enable to start the write. */
3310                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3311
3312                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3313                         break;
3314
3315                 for (j = 0; j < pagesize; j += 4) {
3316                         __be32 data;
3317
3318                         data = *((__be32 *) (tmp + j));
3319
3320                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3321
3322                         tw32(NVRAM_ADDR, phy_addr + j);
3323
3324                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3325                                 NVRAM_CMD_WR;
3326
3327                         if (j == 0)
3328                                 nvram_cmd |= NVRAM_CMD_FIRST;
3329                         else if (j == (pagesize - 4))
3330                                 nvram_cmd |= NVRAM_CMD_LAST;
3331
3332                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3333                         if (ret)
3334                                 break;
3335                 }
3336                 if (ret)
3337                         break;
3338         }
3339
3340         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3341         tg3_nvram_exec_cmd(tp, nvram_cmd);
3342
3343         kfree(tmp);
3344
3345         return ret;
3346 }
3347
3348 /* offset and length are dword aligned */
3349 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3350                 u8 *buf)
3351 {
3352         int i, ret = 0;
3353
3354         for (i = 0; i < len; i += 4, offset += 4) {
3355                 u32 page_off, phy_addr, nvram_cmd;
3356                 __be32 data;
3357
3358                 memcpy(&data, buf + i, 4);
3359                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3360
3361                 page_off = offset % tp->nvram_pagesize;
3362
3363                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3364
3365                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3366
3367                 if (page_off == 0 || i == 0)
3368                         nvram_cmd |= NVRAM_CMD_FIRST;
3369                 if (page_off == (tp->nvram_pagesize - 4))
3370                         nvram_cmd |= NVRAM_CMD_LAST;
3371
3372                 if (i == (len - 4))
3373                         nvram_cmd |= NVRAM_CMD_LAST;
3374
3375                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3376                     !tg3_flag(tp, FLASH) ||
3377                     !tg3_flag(tp, 57765_PLUS))
3378                         tw32(NVRAM_ADDR, phy_addr);
3379
3380                 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3381                     !tg3_flag(tp, 5755_PLUS) &&
3382                     (tp->nvram_jedecnum == JEDEC_ST) &&
3383                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3384                         u32 cmd;
3385
3386                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3387                         ret = tg3_nvram_exec_cmd(tp, cmd);
3388                         if (ret)
3389                                 break;
3390                 }
3391                 if (!tg3_flag(tp, FLASH)) {
3392                         /* We always do complete word writes to eeprom. */
3393                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3394                 }
3395
3396                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3397                 if (ret)
3398                         break;
3399         }
3400         return ret;
3401 }
3402
3403 /* offset and length are dword aligned */
3404 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3405 {
3406         int ret;
3407
3408         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3409                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3410                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3411                 udelay(40);
3412         }
3413
3414         if (!tg3_flag(tp, NVRAM)) {
3415                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3416         } else {
3417                 u32 grc_mode;
3418
3419                 ret = tg3_nvram_lock(tp);
3420                 if (ret)
3421                         return ret;
3422
3423                 tg3_enable_nvram_access(tp);
3424                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3425                         tw32(NVRAM_WRITE1, 0x406);
3426
3427                 grc_mode = tr32(GRC_MODE);
3428                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3429
3430                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3431                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3432                                 buf);
3433                 } else {
3434                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3435                                 buf);
3436                 }
3437
3438                 grc_mode = tr32(GRC_MODE);
3439                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3440
3441                 tg3_disable_nvram_access(tp);
3442                 tg3_nvram_unlock(tp);
3443         }
3444
3445         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3446                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3447                 udelay(40);
3448         }
3449
3450         return ret;
3451 }
3452
3453 #define RX_CPU_SCRATCH_BASE     0x30000
3454 #define RX_CPU_SCRATCH_SIZE     0x04000
3455 #define TX_CPU_SCRATCH_BASE     0x34000
3456 #define TX_CPU_SCRATCH_SIZE     0x04000
3457
3458 /* tp->lock is held. */
3459 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3460 {
3461         int i;
3462         const int iters = 10000;
3463
3464         for (i = 0; i < iters; i++) {
3465                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3466                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3467                 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3468                         break;
3469         }
3470
3471         return (i == iters) ? -EBUSY : 0;
3472 }
3473
3474 /* tp->lock is held. */
3475 static int tg3_rxcpu_pause(struct tg3 *tp)
3476 {
3477         int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3478
3479         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3480         tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3481         udelay(10);
3482
3483         return rc;
3484 }
3485
3486 /* tp->lock is held. */
3487 static int tg3_txcpu_pause(struct tg3 *tp)
3488 {
3489         return tg3_pause_cpu(tp, TX_CPU_BASE);
3490 }
3491
3492 /* tp->lock is held. */
3493 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3494 {
3495         tw32(cpu_base + CPU_STATE, 0xffffffff);
3496         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3497 }
3498
3499 /* tp->lock is held. */
3500 static void tg3_rxcpu_resume(struct tg3 *tp)
3501 {
3502         tg3_resume_cpu(tp, RX_CPU_BASE);
3503 }
3504
3505 /* tp->lock is held. */
3506 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3507 {
3508         int rc;
3509
3510         BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3511
3512         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3513                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3514
3515                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3516                 return 0;
3517         }
3518         if (cpu_base == RX_CPU_BASE) {
3519                 rc = tg3_rxcpu_pause(tp);
3520         } else {
3521                 /*
3522                  * There is only an Rx CPU for the 5750 derivative in the
3523                  * BCM4785.
3524                  */
3525                 if (tg3_flag(tp, IS_SSB_CORE))
3526                         return 0;
3527
3528                 rc = tg3_txcpu_pause(tp);
3529         }
3530
3531         if (rc) {
3532                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3533                            __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3534                 return -ENODEV;
3535         }
3536
3537         /* Clear firmware's nvram arbitration. */
3538         if (tg3_flag(tp, NVRAM))
3539                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3540         return 0;
3541 }
3542
3543 static int tg3_fw_data_len(struct tg3 *tp,
3544                            const struct tg3_firmware_hdr *fw_hdr)
3545 {
3546         int fw_len;
3547
3548         /* Non fragmented firmware have one firmware header followed by a
3549          * contiguous chunk of data to be written. The length field in that
3550          * header is not the length of data to be written but the complete
3551          * length of the bss. The data length is determined based on
3552          * tp->fw->size minus headers.
3553          *
3554          * Fragmented firmware have a main header followed by multiple
3555          * fragments. Each fragment is identical to non fragmented firmware
3556          * with a firmware header followed by a contiguous chunk of data. In
3557          * the main header, the length field is unused and set to 0xffffffff.
3558          * In each fragment header the length is the entire size of that
3559          * fragment i.e. fragment data + header length. Data length is
3560          * therefore length field in the header minus TG3_FW_HDR_LEN.
3561          */
3562         if (tp->fw_len == 0xffffffff)
3563                 fw_len = be32_to_cpu(fw_hdr->len);
3564         else
3565                 fw_len = tp->fw->size;
3566
3567         return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3568 }
3569
3570 /* tp->lock is held. */
3571 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3572                                  u32 cpu_scratch_base, int cpu_scratch_size,
3573                                  const struct tg3_firmware_hdr *fw_hdr)
3574 {
3575         int err, i;
3576         void (*write_op)(struct tg3 *, u32, u32);
3577         int total_len = tp->fw->size;
3578
3579         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3580                 netdev_err(tp->dev,
3581                            "%s: Trying to load TX cpu firmware which is 5705\n",
3582                            __func__);
3583                 return -EINVAL;
3584         }
3585
3586         if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3587                 write_op = tg3_write_mem;
3588         else
3589                 write_op = tg3_write_indirect_reg32;
3590
3591         if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3592                 /* It is possible that bootcode is still loading at this point.
3593                  * Get the nvram lock first before halting the cpu.
3594                  */
3595                 int lock_err = tg3_nvram_lock(tp);
3596                 err = tg3_halt_cpu(tp, cpu_base);
3597                 if (!lock_err)
3598                         tg3_nvram_unlock(tp);
3599                 if (err)
3600                         goto out;
3601
3602                 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3603                         write_op(tp, cpu_scratch_base + i, 0);
3604                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3605                 tw32(cpu_base + CPU_MODE,
3606                      tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3607         } else {
3608                 /* Subtract additional main header for fragmented firmware and
3609                  * advance to the first fragment
3610                  */
3611                 total_len -= TG3_FW_HDR_LEN;
3612                 fw_hdr++;
3613         }
3614
3615         do {
3616                 u32 *fw_data = (u32 *)(fw_hdr + 1);
3617                 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3618                         write_op(tp, cpu_scratch_base +
3619                                      (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3620                                      (i * sizeof(u32)),
3621                                  be32_to_cpu(fw_data[i]));
3622
3623                 total_len -= be32_to_cpu(fw_hdr->len);
3624
3625                 /* Advance to next fragment */
3626                 fw_hdr = (struct tg3_firmware_hdr *)
3627                          ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3628         } while (total_len > 0);
3629
3630         err = 0;
3631
3632 out:
3633         return err;
3634 }
3635
3636 /* tp->lock is held. */
3637 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3638 {
3639         int i;
3640         const int iters = 5;
3641
3642         tw32(cpu_base + CPU_STATE, 0xffffffff);
3643         tw32_f(cpu_base + CPU_PC, pc);
3644
3645         for (i = 0; i < iters; i++) {
3646                 if (tr32(cpu_base + CPU_PC) == pc)
3647                         break;
3648                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3649                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3650                 tw32_f(cpu_base + CPU_PC, pc);
3651                 udelay(1000);
3652         }
3653
3654         return (i == iters) ? -EBUSY : 0;
3655 }
3656
3657 /* tp->lock is held. */
3658 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3659 {
3660         const struct tg3_firmware_hdr *fw_hdr;
3661         int err;
3662
3663         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3664
3665         /* Firmware blob starts with version numbers, followed by
3666            start address and length. We are setting complete length.
3667            length = end_address_of_bss - start_address_of_text.
3668            Remainder is the blob to be loaded contiguously
3669            from start address. */
3670
3671         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3672                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3673                                     fw_hdr);
3674         if (err)
3675                 return err;
3676
3677         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3678                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3679                                     fw_hdr);
3680         if (err)
3681                 return err;
3682
3683         /* Now startup only the RX cpu. */
3684         err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3685                                        be32_to_cpu(fw_hdr->base_addr));
3686         if (err) {
3687                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3688                            "should be %08x\n", __func__,
3689                            tr32(RX_CPU_BASE + CPU_PC),
3690                                 be32_to_cpu(fw_hdr->base_addr));
3691                 return -ENODEV;
3692         }
3693
3694         tg3_rxcpu_resume(tp);
3695
3696         return 0;
3697 }
3698
3699 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3700 {
3701         const int iters = 1000;
3702         int i;
3703         u32 val;
3704
3705         /* Wait for boot code to complete initialization and enter service
3706          * loop. It is then safe to download service patches
3707          */
3708         for (i = 0; i < iters; i++) {
3709                 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3710                         break;
3711
3712                 udelay(10);
3713         }
3714
3715         if (i == iters) {
3716                 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3717                 return -EBUSY;
3718         }
3719
3720         val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3721         if (val & 0xff) {
3722                 netdev_warn(tp->dev,
3723                             "Other patches exist. Not downloading EEE patch\n");
3724                 return -EEXIST;
3725         }
3726
3727         return 0;
3728 }
3729
3730 /* tp->lock is held. */
3731 static void tg3_load_57766_firmware(struct tg3 *tp)
3732 {
3733         struct tg3_firmware_hdr *fw_hdr;
3734
3735         if (!tg3_flag(tp, NO_NVRAM))
3736                 return;
3737
3738         if (tg3_validate_rxcpu_state(tp))
3739                 return;
3740
3741         if (!tp->fw)
3742                 return;
3743
3744         /* This firmware blob has a different format than older firmware
3745          * releases as given below. The main difference is we have fragmented
3746          * data to be written to non-contiguous locations.
3747          *
3748          * In the beginning we have a firmware header identical to other
3749          * firmware which consists of version, base addr and length. The length
3750          * here is unused and set to 0xffffffff.
3751          *
3752          * This is followed by a series of firmware fragments which are
3753          * individually identical to previous firmware. i.e. they have the
3754          * firmware header and followed by data for that fragment. The version
3755          * field of the individual fragment header is unused.
3756          */
3757
3758         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3759         if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3760                 return;
3761
3762         if (tg3_rxcpu_pause(tp))
3763                 return;
3764
3765         /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3766         tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3767
3768         tg3_rxcpu_resume(tp);
3769 }
3770
3771 /* tp->lock is held. */
3772 static int tg3_load_tso_firmware(struct tg3 *tp)
3773 {
3774         const struct tg3_firmware_hdr *fw_hdr;
3775         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3776         int err;
3777
3778         if (!tg3_flag(tp, FW_TSO))
3779                 return 0;
3780
3781         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3782
3783         /* Firmware blob starts with version numbers, followed by
3784            start address and length. We are setting complete length.
3785            length = end_address_of_bss - start_address_of_text.
3786            Remainder is the blob to be loaded contiguously
3787            from start address. */
3788
3789         cpu_scratch_size = tp->fw_len;
3790
3791         if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3792                 cpu_base = RX_CPU_BASE;
3793                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3794         } else {
3795                 cpu_base = TX_CPU_BASE;
3796                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3797                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3798         }
3799
3800         err = tg3_load_firmware_cpu(tp, cpu_base,
3801                                     cpu_scratch_base, cpu_scratch_size,
3802                                     fw_hdr);
3803         if (err)
3804                 return err;
3805
3806         /* Now startup the cpu. */
3807         err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3808                                        be32_to_cpu(fw_hdr->base_addr));
3809         if (err) {
3810                 netdev_err(tp->dev,
3811                            "%s fails to set CPU PC, is %08x should be %08x\n",
3812                            __func__, tr32(cpu_base + CPU_PC),
3813                            be32_to_cpu(fw_hdr->base_addr));
3814                 return -ENODEV;
3815         }
3816
3817         tg3_resume_cpu(tp, cpu_base);
3818         return 0;
3819 }
3820
3821
3822 /* tp->lock is held. */
3823 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3824 {
3825         u32 addr_high, addr_low;
3826         int i;
3827
3828         addr_high = ((tp->dev->dev_addr[0] << 8) |
3829                      tp->dev->dev_addr[1]);
3830         addr_low = ((tp->dev->dev_addr[2] << 24) |
3831                     (tp->dev->dev_addr[3] << 16) |
3832                     (tp->dev->dev_addr[4] <<  8) |
3833                     (tp->dev->dev_addr[5] <<  0));
3834         for (i = 0; i < 4; i++) {
3835                 if (i == 1 && skip_mac_1)
3836                         continue;
3837                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3838                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3839         }
3840
3841         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3842             tg3_asic_rev(tp) == ASIC_REV_5704) {
3843                 for (i = 0; i < 12; i++) {
3844                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3845                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3846                 }
3847         }
3848
3849         addr_high = (tp->dev->dev_addr[0] +
3850                      tp->dev->dev_addr[1] +
3851                      tp->dev->dev_addr[2] +
3852                      tp->dev->dev_addr[3] +
3853                      tp->dev->dev_addr[4] +
3854                      tp->dev->dev_addr[5]) &
3855                 TX_BACKOFF_SEED_MASK;
3856         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3857 }
3858
3859 static void tg3_enable_register_access(struct tg3 *tp)
3860 {
3861         /*
3862          * Make sure register accesses (indirect or otherwise) will function
3863          * correctly.
3864          */
3865         pci_write_config_dword(tp->pdev,
3866                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3867 }
3868
3869 static int tg3_power_up(struct tg3 *tp)
3870 {
3871         int err;
3872
3873         tg3_enable_register_access(tp);
3874
3875         err = pci_set_power_state(tp->pdev, PCI_D0);
3876         if (!err) {
3877                 /* Switch out of Vaux if it is a NIC */
3878                 tg3_pwrsrc_switch_to_vmain(tp);
3879         } else {
3880                 netdev_err(tp->dev, "Transition to D0 failed\n");
3881         }
3882
3883         return err;
3884 }
3885
3886 static int tg3_setup_phy(struct tg3 *, int);
3887
3888 static int tg3_power_down_prepare(struct tg3 *tp)
3889 {
3890         u32 misc_host_ctrl;
3891         bool device_should_wake, do_low_power;
3892
3893         tg3_enable_register_access(tp);
3894
3895         /* Restore the CLKREQ setting. */
3896         if (tg3_flag(tp, CLKREQ_BUG))
3897                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3898                                          PCI_EXP_LNKCTL_CLKREQ_EN);
3899
3900         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3901         tw32(TG3PCI_MISC_HOST_CTRL,
3902              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3903
3904         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3905                              tg3_flag(tp, WOL_ENABLE);
3906
3907         if (tg3_flag(tp, USE_PHYLIB)) {
3908                 do_low_power = false;
3909                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3910                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3911                         struct phy_device *phydev;
3912                         u32 phyid, advertising;
3913
3914                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3915
3916                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3917
3918                         tp->link_config.speed = phydev->speed;
3919                         tp->link_config.duplex = phydev->duplex;
3920                         tp->link_config.autoneg = phydev->autoneg;
3921                         tp->link_config.advertising = phydev->advertising;
3922
3923                         advertising = ADVERTISED_TP |
3924                                       ADVERTISED_Pause |
3925                                       ADVERTISED_Autoneg |
3926                                       ADVERTISED_10baseT_Half;
3927
3928                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3929                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3930                                         advertising |=
3931                                                 ADVERTISED_100baseT_Half |
3932                                                 ADVERTISED_100baseT_Full |
3933                                                 ADVERTISED_10baseT_Full;
3934                                 else
3935                                         advertising |= ADVERTISED_10baseT_Full;
3936                         }
3937
3938                         phydev->advertising = advertising;
3939
3940                         phy_start_aneg(phydev);
3941
3942                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3943                         if (phyid != PHY_ID_BCMAC131) {
3944                                 phyid &= PHY_BCM_OUI_MASK;
3945                                 if (phyid == PHY_BCM_OUI_1 ||
3946                                     phyid == PHY_BCM_OUI_2 ||
3947                                     phyid == PHY_BCM_OUI_3)
3948                                         do_low_power = true;
3949                         }
3950                 }
3951         } else {
3952                 do_low_power = true;
3953
3954                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3955                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3956
3957                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3958                         tg3_setup_phy(tp, 0);
3959         }
3960
3961         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3962                 u32 val;
3963
3964                 val = tr32(GRC_VCPU_EXT_CTRL);
3965                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3966         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3967                 int i;
3968                 u32 val;
3969
3970                 for (i = 0; i < 200; i++) {
3971                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3972                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3973                                 break;
3974                         msleep(1);
3975                 }
3976         }
3977         if (tg3_flag(tp, WOL_CAP))
3978                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3979                                                      WOL_DRV_STATE_SHUTDOWN |
3980                                                      WOL_DRV_WOL |
3981                                                      WOL_SET_MAGIC_PKT);
3982
3983         if (device_should_wake) {
3984                 u32 mac_mode;
3985
3986                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3987                         if (do_low_power &&
3988                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3989                                 tg3_phy_auxctl_write(tp,
3990                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3991                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3992                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3993                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3994                                 udelay(40);
3995                         }
3996
3997                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3998                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3999                         else
4000                                 mac_mode = MAC_MODE_PORT_MODE_MII;
4001
4002                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4003                         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4004                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4005                                              SPEED_100 : SPEED_10;
4006                                 if (tg3_5700_link_polarity(tp, speed))
4007                                         mac_mode |= MAC_MODE_LINK_POLARITY;
4008                                 else
4009                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
4010                         }
4011                 } else {
4012                         mac_mode = MAC_MODE_PORT_MODE_TBI;
4013                 }
4014
4015                 if (!tg3_flag(tp, 5750_PLUS))
4016                         tw32(MAC_LED_CTRL, tp->led_ctrl);
4017
4018                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4019                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4020                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4021                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4022
4023                 if (tg3_flag(tp, ENABLE_APE))
4024                         mac_mode |= MAC_MODE_APE_TX_EN |
4025                                     MAC_MODE_APE_RX_EN |
4026                                     MAC_MODE_TDE_ENABLE;
4027
4028                 tw32_f(MAC_MODE, mac_mode);
4029                 udelay(100);
4030
4031                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4032                 udelay(10);
4033         }
4034
4035         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4036             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4037              tg3_asic_rev(tp) == ASIC_REV_5701)) {
4038                 u32 base_val;
4039
4040                 base_val = tp->pci_clock_ctrl;
4041                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4042                              CLOCK_CTRL_TXCLK_DISABLE);
4043
4044                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4045                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
4046         } else if (tg3_flag(tp, 5780_CLASS) ||
4047                    tg3_flag(tp, CPMU_PRESENT) ||
4048                    tg3_asic_rev(tp) == ASIC_REV_5906) {
4049                 /* do nothing */
4050         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4051                 u32 newbits1, newbits2;
4052
4053                 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4054                     tg3_asic_rev(tp) == ASIC_REV_5701) {
4055                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4056                                     CLOCK_CTRL_TXCLK_DISABLE |
4057                                     CLOCK_CTRL_ALTCLK);
4058                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4059                 } else if (tg3_flag(tp, 5705_PLUS)) {
4060                         newbits1 = CLOCK_CTRL_625_CORE;
4061                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4062                 } else {
4063                         newbits1 = CLOCK_CTRL_ALTCLK;
4064                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4065                 }
4066
4067                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4068                             40);
4069
4070                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4071                             40);
4072
4073                 if (!tg3_flag(tp, 5705_PLUS)) {
4074                         u32 newbits3;
4075
4076                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4077                             tg3_asic_rev(tp) == ASIC_REV_5701) {
4078                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4079                                             CLOCK_CTRL_TXCLK_DISABLE |
4080                                             CLOCK_CTRL_44MHZ_CORE);
4081                         } else {
4082                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4083                         }
4084
4085                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
4086                                     tp->pci_clock_ctrl | newbits3, 40);
4087                 }
4088         }
4089
4090         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4091                 tg3_power_down_phy(tp, do_low_power);
4092
4093         tg3_frob_aux_power(tp, true);
4094
4095         /* Workaround for unstable PLL clock */
4096         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4097             ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4098              (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4099                 u32 val = tr32(0x7d00);
4100
4101                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4102                 tw32(0x7d00, val);
4103                 if (!tg3_flag(tp, ENABLE_ASF)) {
4104                         int err;
4105
4106                         err = tg3_nvram_lock(tp);
4107                         tg3_halt_cpu(tp, RX_CPU_BASE);
4108                         if (!err)
4109                                 tg3_nvram_unlock(tp);
4110                 }
4111         }
4112
4113         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4114
4115         return 0;
4116 }
4117
4118 static void tg3_power_down(struct tg3 *tp)
4119 {
4120         tg3_power_down_prepare(tp);
4121
4122         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4123         pci_set_power_state(tp->pdev, PCI_D3hot);
4124 }
4125
4126 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4127 {
4128         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4129         case MII_TG3_AUX_STAT_10HALF:
4130                 *speed = SPEED_10;
4131                 *duplex = DUPLEX_HALF;
4132                 break;
4133
4134         case MII_TG3_AUX_STAT_10FULL:
4135                 *speed = SPEED_10;
4136                 *duplex = DUPLEX_FULL;
4137                 break;
4138
4139         case MII_TG3_AUX_STAT_100HALF:
4140                 *speed = SPEED_100;
4141                 *duplex = DUPLEX_HALF;
4142                 break;
4143
4144         case MII_TG3_AUX_STAT_100FULL:
4145                 *speed = SPEED_100;
4146                 *duplex = DUPLEX_FULL;
4147                 break;
4148
4149         case MII_TG3_AUX_STAT_1000HALF:
4150                 *speed = SPEED_1000;
4151                 *duplex = DUPLEX_HALF;
4152                 break;
4153
4154         case MII_TG3_AUX_STAT_1000FULL:
4155                 *speed = SPEED_1000;
4156                 *duplex = DUPLEX_FULL;
4157                 break;
4158
4159         default:
4160                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4161                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4162                                  SPEED_10;
4163                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4164                                   DUPLEX_HALF;
4165                         break;
4166                 }
4167                 *speed = SPEED_UNKNOWN;
4168                 *duplex = DUPLEX_UNKNOWN;
4169                 break;
4170         }
4171 }
4172
4173 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4174 {
4175         int err = 0;
4176         u32 val, new_adv;
4177
4178         new_adv = ADVERTISE_CSMA;
4179         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4180         new_adv |= mii_advertise_flowctrl(flowctrl);
4181
4182         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4183         if (err)
4184                 goto done;
4185
4186         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4187                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4188
4189                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4190                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4191                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4192
4193                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4194                 if (err)
4195                         goto done;
4196         }
4197
4198         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4199                 goto done;
4200
4201         tw32(TG3_CPMU_EEE_MODE,
4202              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4203
4204         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4205         if (!err) {
4206                 u32 err2;
4207
4208                 val = 0;
4209                 /* Advertise 100-BaseTX EEE ability */
4210                 if (advertise & ADVERTISED_100baseT_Full)
4211                         val |= MDIO_AN_EEE_ADV_100TX;
4212                 /* Advertise 1000-BaseT EEE ability */
4213                 if (advertise & ADVERTISED_1000baseT_Full)
4214                         val |= MDIO_AN_EEE_ADV_1000T;
4215                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4216                 if (err)
4217                         val = 0;
4218
4219                 switch (tg3_asic_rev(tp)) {
4220                 case ASIC_REV_5717:
4221                 case ASIC_REV_57765:
4222                 case ASIC_REV_57766:
4223                 case ASIC_REV_5719:
4224                         /* If we advertised any eee advertisements above... */
4225                         if (val)
4226                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4227                                       MII_TG3_DSP_TAP26_RMRXSTO |
4228                                       MII_TG3_DSP_TAP26_OPCSINPT;
4229                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4230                         /* Fall through */
4231                 case ASIC_REV_5720:
4232                 case ASIC_REV_5762:
4233                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4234                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4235                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4236                 }
4237
4238                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4239                 if (!err)
4240                         err = err2;
4241         }
4242
4243 done:
4244         return err;
4245 }
4246
4247 static void tg3_phy_copper_begin(struct tg3 *tp)
4248 {
4249         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4250             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4251                 u32 adv, fc;
4252
4253                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4254                         adv = ADVERTISED_10baseT_Half |
4255                               ADVERTISED_10baseT_Full;
4256                         if (tg3_flag(tp, WOL_SPEED_100MB))
4257                                 adv |= ADVERTISED_100baseT_Half |
4258                                        ADVERTISED_100baseT_Full;
4259
4260                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4261                 } else {
4262                         adv = tp->link_config.advertising;
4263                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4264                                 adv &= ~(ADVERTISED_1000baseT_Half |
4265                                          ADVERTISED_1000baseT_Full);
4266
4267                         fc = tp->link_config.flowctrl;
4268                 }
4269
4270                 tg3_phy_autoneg_cfg(tp, adv, fc);
4271
4272                 tg3_writephy(tp, MII_BMCR,
4273                              BMCR_ANENABLE | BMCR_ANRESTART);
4274         } else {
4275                 int i;
4276                 u32 bmcr, orig_bmcr;
4277
4278                 tp->link_config.active_speed = tp->link_config.speed;
4279                 tp->link_config.active_duplex = tp->link_config.duplex;
4280
4281                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4282                         /* With autoneg disabled, 5715 only links up when the
4283                          * advertisement register has the configured speed
4284                          * enabled.
4285                          */
4286                         tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4287                 }
4288
4289                 bmcr = 0;
4290                 switch (tp->link_config.speed) {
4291                 default:
4292                 case SPEED_10:
4293                         break;
4294
4295                 case SPEED_100:
4296                         bmcr |= BMCR_SPEED100;
4297                         break;
4298
4299                 case SPEED_1000:
4300                         bmcr |= BMCR_SPEED1000;
4301                         break;
4302                 }
4303
4304                 if (tp->link_config.duplex == DUPLEX_FULL)
4305                         bmcr |= BMCR_FULLDPLX;
4306
4307                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4308                     (bmcr != orig_bmcr)) {
4309                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4310                         for (i = 0; i < 1500; i++) {
4311                                 u32 tmp;
4312
4313                                 udelay(10);
4314                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4315                                     tg3_readphy(tp, MII_BMSR, &tmp))
4316                                         continue;
4317                                 if (!(tmp & BMSR_LSTATUS)) {
4318                                         udelay(40);
4319                                         break;
4320                                 }
4321                         }
4322                         tg3_writephy(tp, MII_BMCR, bmcr);
4323                         udelay(40);
4324                 }
4325         }
4326 }
4327
4328 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4329 {
4330         int err;
4331
4332         /* Turn off tap power management. */
4333         /* Set Extended packet length bit */
4334         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4335
4336         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4337         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4338         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4339         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4340         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4341
4342         udelay(40);
4343
4344         return err;
4345 }
4346
4347 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4348 {
4349         u32 advmsk, tgtadv, advertising;
4350
4351         advertising = tp->link_config.advertising;
4352         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4353
4354         advmsk = ADVERTISE_ALL;
4355         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4356                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4357                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4358         }
4359
4360         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4361                 return false;
4362
4363         if ((*lcladv & advmsk) != tgtadv)
4364                 return false;
4365
4366         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4367                 u32 tg3_ctrl;
4368
4369                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4370
4371                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4372                         return false;
4373
4374                 if (tgtadv &&
4375                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4376                      tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4377                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4378                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4379                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4380                 } else {
4381                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4382                 }
4383
4384                 if (tg3_ctrl != tgtadv)
4385                         return false;
4386         }
4387
4388         return true;
4389 }
4390
4391 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4392 {
4393         u32 lpeth = 0;
4394
4395         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4396                 u32 val;
4397
4398                 if (tg3_readphy(tp, MII_STAT1000, &val))
4399                         return false;
4400
4401                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4402         }
4403
4404         if (tg3_readphy(tp, MII_LPA, rmtadv))
4405                 return false;
4406
4407         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4408         tp->link_config.rmt_adv = lpeth;
4409
4410         return true;
4411 }
4412
4413 static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
4414 {
4415         if (curr_link_up != tp->link_up) {
4416                 if (curr_link_up) {
4417                         netif_carrier_on(tp->dev);
4418                 } else {
4419                         netif_carrier_off(tp->dev);
4420                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4421                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4422                 }
4423
4424                 tg3_link_report(tp);
4425                 return true;
4426         }
4427
4428         return false;
4429 }
4430
4431 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4432 {
4433         int current_link_up;
4434         u32 bmsr, val;
4435         u32 lcl_adv, rmt_adv;
4436         u16 current_speed;
4437         u8 current_duplex;
4438         int i, err;
4439
4440         tw32(MAC_EVENT, 0);
4441
4442         tw32_f(MAC_STATUS,
4443              (MAC_STATUS_SYNC_CHANGED |
4444               MAC_STATUS_CFG_CHANGED |
4445               MAC_STATUS_MI_COMPLETION |
4446               MAC_STATUS_LNKSTATE_CHANGED));
4447         udelay(40);
4448
4449         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4450                 tw32_f(MAC_MI_MODE,
4451                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4452                 udelay(80);
4453         }
4454
4455         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4456
4457         /* Some third-party PHYs need to be reset on link going
4458          * down.
4459          */
4460         if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4461              tg3_asic_rev(tp) == ASIC_REV_5704 ||
4462              tg3_asic_rev(tp) == ASIC_REV_5705) &&
4463             tp->link_up) {
4464                 tg3_readphy(tp, MII_BMSR, &bmsr);
4465                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4466                     !(bmsr & BMSR_LSTATUS))
4467                         force_reset = 1;
4468         }
4469         if (force_reset)
4470                 tg3_phy_reset(tp);
4471
4472         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4473                 tg3_readphy(tp, MII_BMSR, &bmsr);
4474                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4475                     !tg3_flag(tp, INIT_COMPLETE))
4476                         bmsr = 0;
4477
4478                 if (!(bmsr & BMSR_LSTATUS)) {
4479                         err = tg3_init_5401phy_dsp(tp);
4480                         if (err)
4481                                 return err;
4482
4483                         tg3_readphy(tp, MII_BMSR, &bmsr);
4484                         for (i = 0; i < 1000; i++) {
4485                                 udelay(10);
4486                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4487                                     (bmsr & BMSR_LSTATUS)) {
4488                                         udelay(40);
4489                                         break;
4490                                 }
4491                         }
4492
4493                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4494                             TG3_PHY_REV_BCM5401_B0 &&
4495                             !(bmsr & BMSR_LSTATUS) &&
4496                             tp->link_config.active_speed == SPEED_1000) {
4497                                 err = tg3_phy_reset(tp);
4498                                 if (!err)
4499                                         err = tg3_init_5401phy_dsp(tp);
4500                                 if (err)
4501                                         return err;
4502                         }
4503                 }
4504         } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4505                    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4506                 /* 5701 {A0,B0} CRC bug workaround */
4507                 tg3_writephy(tp, 0x15, 0x0a75);
4508                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4509                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4510                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4511         }
4512
4513         /* Clear pending interrupts... */
4514         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4515         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4516
4517         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4518                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4519         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4520                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4521
4522         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4523             tg3_asic_rev(tp) == ASIC_REV_5701) {
4524                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4525                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4526                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4527                 else
4528                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4529         }
4530
4531         current_link_up = 0;
4532         current_speed = SPEED_UNKNOWN;
4533         current_duplex = DUPLEX_UNKNOWN;
4534         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4535         tp->link_config.rmt_adv = 0;
4536
4537         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4538                 err = tg3_phy_auxctl_read(tp,
4539                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4540                                           &val);
4541                 if (!err && !(val & (1 << 10))) {
4542                         tg3_phy_auxctl_write(tp,
4543                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4544                                              val | (1 << 10));
4545                         goto relink;
4546                 }
4547         }
4548
4549         bmsr = 0;
4550         for (i = 0; i < 100; i++) {
4551                 tg3_readphy(tp, MII_BMSR, &bmsr);
4552                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4553                     (bmsr & BMSR_LSTATUS))
4554                         break;
4555                 udelay(40);
4556         }
4557
4558         if (bmsr & BMSR_LSTATUS) {
4559                 u32 aux_stat, bmcr;
4560
4561                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4562                 for (i = 0; i < 2000; i++) {
4563                         udelay(10);
4564                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4565                             aux_stat)
4566                                 break;
4567                 }
4568
4569                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4570                                              &current_speed,
4571                                              &current_duplex);
4572
4573                 bmcr = 0;
4574                 for (i = 0; i < 200; i++) {
4575                         tg3_readphy(tp, MII_BMCR, &bmcr);
4576                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4577                                 continue;
4578                         if (bmcr && bmcr != 0x7fff)
4579                                 break;
4580                         udelay(10);
4581                 }
4582
4583                 lcl_adv = 0;
4584                 rmt_adv = 0;
4585
4586                 tp->link_config.active_speed = current_speed;
4587                 tp->link_config.active_duplex = current_duplex;
4588
4589                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4590                         if ((bmcr & BMCR_ANENABLE) &&
4591                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4592                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4593                                 current_link_up = 1;
4594                 } else {
4595                         if (!(bmcr & BMCR_ANENABLE) &&
4596                             tp->link_config.speed == current_speed &&
4597                             tp->link_config.duplex == current_duplex) {
4598                                 current_link_up = 1;
4599                         }
4600                 }
4601
4602                 if (current_link_up == 1 &&
4603                     tp->link_config.active_duplex == DUPLEX_FULL) {
4604                         u32 reg, bit;
4605
4606                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4607                                 reg = MII_TG3_FET_GEN_STAT;
4608                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4609                         } else {
4610                                 reg = MII_TG3_EXT_STAT;
4611                                 bit = MII_TG3_EXT_STAT_MDIX;
4612                         }
4613
4614                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4615                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4616
4617                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4618                 }
4619         }
4620
4621 relink:
4622         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4623                 tg3_phy_copper_begin(tp);
4624
4625                 if (tg3_flag(tp, ROBOSWITCH)) {
4626                         current_link_up = 1;
4627                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4628                         current_speed = SPEED_1000;
4629                         current_duplex = DUPLEX_FULL;
4630                         tp->link_config.active_speed = current_speed;
4631                         tp->link_config.active_duplex = current_duplex;
4632                 }
4633
4634                 tg3_readphy(tp, MII_BMSR, &bmsr);
4635                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4636                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4637                         current_link_up = 1;
4638         }
4639
4640         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4641         if (current_link_up == 1) {
4642                 if (tp->link_config.active_speed == SPEED_100 ||
4643                     tp->link_config.active_speed == SPEED_10)
4644                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4645                 else
4646                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4647         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4648                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4649         else
4650                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4651
4652         /* In order for the 5750 core in BCM4785 chip to work properly
4653          * in RGMII mode, the Led Control Register must be set up.
4654          */
4655         if (tg3_flag(tp, RGMII_MODE)) {
4656                 u32 led_ctrl = tr32(MAC_LED_CTRL);
4657                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4658
4659                 if (tp->link_config.active_speed == SPEED_10)
4660                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4661                 else if (tp->link_config.active_speed == SPEED_100)
4662                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4663                                      LED_CTRL_100MBPS_ON);
4664                 else if (tp->link_config.active_speed == SPEED_1000)
4665                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4666                                      LED_CTRL_1000MBPS_ON);
4667
4668                 tw32(MAC_LED_CTRL, led_ctrl);
4669                 udelay(40);
4670         }
4671
4672         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4673         if (tp->link_config.active_duplex == DUPLEX_HALF)
4674                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4675
4676         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4677                 if (current_link_up == 1 &&
4678                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4679                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4680                 else
4681                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4682         }
4683
4684         /* ??? Without this setting Netgear GA302T PHY does not
4685          * ??? send/receive packets...
4686          */
4687         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4688             tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
4689                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4690                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4691                 udelay(80);
4692         }
4693
4694         tw32_f(MAC_MODE, tp->mac_mode);
4695         udelay(40);
4696
4697         tg3_phy_eee_adjust(tp, current_link_up);
4698
4699         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4700                 /* Polled via timer. */
4701                 tw32_f(MAC_EVENT, 0);
4702         } else {
4703                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4704         }
4705         udelay(40);
4706
4707         if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
4708             current_link_up == 1 &&
4709             tp->link_config.active_speed == SPEED_1000 &&
4710             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4711                 udelay(120);
4712                 tw32_f(MAC_STATUS,
4713                      (MAC_STATUS_SYNC_CHANGED |
4714                       MAC_STATUS_CFG_CHANGED));
4715                 udelay(40);
4716                 tg3_write_mem(tp,
4717                               NIC_SRAM_FIRMWARE_MBOX,
4718                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4719         }
4720
4721         /* Prevent send BD corruption. */
4722         if (tg3_flag(tp, CLKREQ_BUG)) {
4723                 if (tp->link_config.active_speed == SPEED_100 ||
4724                     tp->link_config.active_speed == SPEED_10)
4725                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4726                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
4727                 else
4728                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4729                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
4730         }
4731
4732         tg3_test_and_report_link_chg(tp, current_link_up);
4733
4734         return 0;
4735 }
4736
4737 struct tg3_fiber_aneginfo {
4738         int state;
4739 #define ANEG_STATE_UNKNOWN              0
4740 #define ANEG_STATE_AN_ENABLE            1
4741 #define ANEG_STATE_RESTART_INIT         2
4742 #define ANEG_STATE_RESTART              3
4743 #define ANEG_STATE_DISABLE_LINK_OK      4
4744 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4745 #define ANEG_STATE_ABILITY_DETECT       6
4746 #define ANEG_STATE_ACK_DETECT_INIT      7
4747 #define ANEG_STATE_ACK_DETECT           8
4748 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4749 #define ANEG_STATE_COMPLETE_ACK         10
4750 #define ANEG_STATE_IDLE_DETECT_INIT     11
4751 #define ANEG_STATE_IDLE_DETECT          12
4752 #define ANEG_STATE_LINK_OK              13
4753 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4754 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4755
4756         u32 flags;
4757 #define MR_AN_ENABLE            0x00000001
4758 #define MR_RESTART_AN           0x00000002
4759 #define MR_AN_COMPLETE          0x00000004
4760 #define MR_PAGE_RX              0x00000008
4761 #define MR_NP_LOADED            0x00000010
4762 #define MR_TOGGLE_TX            0x00000020
4763 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4764 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4765 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4766 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4767 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4768 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4769 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4770 #define MR_TOGGLE_RX            0x00002000
4771 #define MR_NP_RX                0x00004000
4772
4773 #define MR_LINK_OK              0x80000000
4774
4775         unsigned long link_time, cur_time;
4776
4777         u32 ability_match_cfg;
4778         int ability_match_count;
4779
4780         char ability_match, idle_match, ack_match;
4781
4782         u32 txconfig, rxconfig;
4783 #define ANEG_CFG_NP             0x00000080
4784 #define ANEG_CFG_ACK            0x00000040
4785 #define ANEG_CFG_RF2            0x00000020
4786 #define ANEG_CFG_RF1            0x00000010
4787 #define ANEG_CFG_PS2            0x00000001
4788 #define ANEG_CFG_PS1            0x00008000
4789 #define ANEG_CFG_HD             0x00004000
4790 #define ANEG_CFG_FD             0x00002000
4791 #define ANEG_CFG_INVAL          0x00001f06
4792
4793 };
4794 #define ANEG_OK         0
4795 #define ANEG_DONE       1
4796 #define ANEG_TIMER_ENAB 2
4797 #define ANEG_FAILED     -1
4798
4799 #define ANEG_STATE_SETTLE_TIME  10000
4800
4801 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4802                                    struct tg3_fiber_aneginfo *ap)
4803 {
4804         u16 flowctrl;
4805         unsigned long delta;
4806         u32 rx_cfg_reg;
4807         int ret;
4808
4809         if (ap->state == ANEG_STATE_UNKNOWN) {
4810                 ap->rxconfig = 0;
4811                 ap->link_time = 0;
4812                 ap->cur_time = 0;
4813                 ap->ability_match_cfg = 0;
4814                 ap->ability_match_count = 0;
4815                 ap->ability_match = 0;
4816                 ap->idle_match = 0;
4817                 ap->ack_match = 0;
4818         }
4819         ap->cur_time++;
4820
4821         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4822                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4823
4824                 if (rx_cfg_reg != ap->ability_match_cfg) {
4825                         ap->ability_match_cfg = rx_cfg_reg;
4826                         ap->ability_match = 0;
4827                         ap->ability_match_count = 0;
4828                 } else {
4829                         if (++ap->ability_match_count > 1) {
4830                                 ap->ability_match = 1;
4831                                 ap->ability_match_cfg = rx_cfg_reg;
4832                         }
4833                 }
4834                 if (rx_cfg_reg & ANEG_CFG_ACK)
4835                         ap->ack_match = 1;
4836                 else
4837                         ap->ack_match = 0;
4838
4839                 ap->idle_match = 0;
4840         } else {
4841                 ap->idle_match = 1;
4842                 ap->ability_match_cfg = 0;
4843                 ap->ability_match_count = 0;
4844                 ap->ability_match = 0;
4845                 ap->ack_match = 0;
4846
4847                 rx_cfg_reg = 0;
4848         }
4849
4850         ap->rxconfig = rx_cfg_reg;
4851         ret = ANEG_OK;
4852
4853         switch (ap->state) {
4854         case ANEG_STATE_UNKNOWN:
4855                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4856                         ap->state = ANEG_STATE_AN_ENABLE;
4857
4858                 /* fallthru */
4859         case ANEG_STATE_AN_ENABLE:
4860                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4861                 if (ap->flags & MR_AN_ENABLE) {
4862                         ap->link_time = 0;
4863                         ap->cur_time = 0;
4864                         ap->ability_match_cfg = 0;
4865                         ap->ability_match_count = 0;
4866                         ap->ability_match = 0;
4867                         ap->idle_match = 0;
4868                         ap->ack_match = 0;
4869
4870                         ap->state = ANEG_STATE_RESTART_INIT;
4871                 } else {
4872                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4873                 }
4874                 break;
4875
4876         case ANEG_STATE_RESTART_INIT:
4877                 ap->link_time = ap->cur_time;
4878                 ap->flags &= ~(MR_NP_LOADED);
4879                 ap->txconfig = 0;
4880                 tw32(MAC_TX_AUTO_NEG, 0);
4881                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4882                 tw32_f(MAC_MODE, tp->mac_mode);
4883                 udelay(40);
4884
4885                 ret = ANEG_TIMER_ENAB;
4886                 ap->state = ANEG_STATE_RESTART;
4887
4888                 /* fallthru */
4889         case ANEG_STATE_RESTART:
4890                 delta = ap->cur_time - ap->link_time;
4891                 if (delta > ANEG_STATE_SETTLE_TIME)
4892                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4893                 else
4894                         ret = ANEG_TIMER_ENAB;
4895                 break;
4896
4897         case ANEG_STATE_DISABLE_LINK_OK:
4898                 ret = ANEG_DONE;
4899                 break;
4900
4901         case ANEG_STATE_ABILITY_DETECT_INIT:
4902                 ap->flags &= ~(MR_TOGGLE_TX);
4903                 ap->txconfig = ANEG_CFG_FD;
4904                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4905                 if (flowctrl & ADVERTISE_1000XPAUSE)
4906                         ap->txconfig |= ANEG_CFG_PS1;
4907                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4908                         ap->txconfig |= ANEG_CFG_PS2;
4909                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4910                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4911                 tw32_f(MAC_MODE, tp->mac_mode);
4912                 udelay(40);
4913
4914                 ap->state = ANEG_STATE_ABILITY_DETECT;
4915                 break;
4916
4917         case ANEG_STATE_ABILITY_DETECT:
4918                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4919                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4920                 break;
4921
4922         case ANEG_STATE_ACK_DETECT_INIT:
4923                 ap->txconfig |= ANEG_CFG_ACK;
4924                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4925                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4926                 tw32_f(MAC_MODE, tp->mac_mode);
4927                 udelay(40);
4928
4929                 ap->state = ANEG_STATE_ACK_DETECT;
4930
4931                 /* fallthru */
4932         case ANEG_STATE_ACK_DETECT:
4933                 if (ap->ack_match != 0) {
4934                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4935                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4936                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4937                         } else {
4938                                 ap->state = ANEG_STATE_AN_ENABLE;
4939                         }
4940                 } else if (ap->ability_match != 0 &&
4941                            ap->rxconfig == 0) {
4942                         ap->state = ANEG_STATE_AN_ENABLE;
4943                 }
4944                 break;
4945
4946         case ANEG_STATE_COMPLETE_ACK_INIT:
4947                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4948                         ret = ANEG_FAILED;
4949                         break;
4950                 }
4951                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4952                                MR_LP_ADV_HALF_DUPLEX |
4953                                MR_LP_ADV_SYM_PAUSE |
4954                                MR_LP_ADV_ASYM_PAUSE |
4955                                MR_LP_ADV_REMOTE_FAULT1 |
4956                                MR_LP_ADV_REMOTE_FAULT2 |
4957                                MR_LP_ADV_NEXT_PAGE |
4958                                MR_TOGGLE_RX |
4959                                MR_NP_RX);
4960                 if (ap->rxconfig & ANEG_CFG_FD)
4961                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4962                 if (ap->rxconfig & ANEG_CFG_HD)
4963                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4964                 if (ap->rxconfig & ANEG_CFG_PS1)
4965                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4966                 if (ap->rxconfig & ANEG_CFG_PS2)
4967                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4968                 if (ap->rxconfig & ANEG_CFG_RF1)
4969                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4970                 if (ap->rxconfig & ANEG_CFG_RF2)
4971                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4972                 if (ap->rxconfig & ANEG_CFG_NP)
4973                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4974
4975                 ap->link_time = ap->cur_time;
4976
4977                 ap->flags ^= (MR_TOGGLE_TX);
4978                 if (ap->rxconfig & 0x0008)
4979                         ap->flags |= MR_TOGGLE_RX;
4980                 if (ap->rxconfig & ANEG_CFG_NP)
4981                         ap->flags |= MR_NP_RX;
4982                 ap->flags |= MR_PAGE_RX;
4983
4984                 ap->state = ANEG_STATE_COMPLETE_ACK;
4985                 ret = ANEG_TIMER_ENAB;
4986                 break;
4987
4988         case ANEG_STATE_COMPLETE_ACK:
4989                 if (ap->ability_match != 0 &&
4990                     ap->rxconfig == 0) {
4991                         ap->state = ANEG_STATE_AN_ENABLE;
4992                         break;
4993                 }
4994                 delta = ap->cur_time - ap->link_time;
4995                 if (delta > ANEG_STATE_SETTLE_TIME) {
4996                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4997                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4998                         } else {
4999                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5000                                     !(ap->flags & MR_NP_RX)) {
5001                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5002                                 } else {
5003                                         ret = ANEG_FAILED;
5004                                 }
5005                         }
5006                 }
5007                 break;
5008
5009         case ANEG_STATE_IDLE_DETECT_INIT:
5010                 ap->link_time = ap->cur_time;
5011                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5012                 tw32_f(MAC_MODE, tp->mac_mode);
5013                 udelay(40);
5014
5015                 ap->state = ANEG_STATE_IDLE_DETECT;
5016                 ret = ANEG_TIMER_ENAB;
5017                 break;
5018
5019         case ANEG_STATE_IDLE_DETECT:
5020                 if (ap->ability_match != 0 &&
5021                     ap->rxconfig == 0) {
5022                         ap->state = ANEG_STATE_AN_ENABLE;
5023                         break;
5024                 }
5025                 delta = ap->cur_time - ap->link_time;
5026                 if (delta > ANEG_STATE_SETTLE_TIME) {
5027                         /* XXX another gem from the Broadcom driver :( */
5028                         ap->state = ANEG_STATE_LINK_OK;
5029                 }
5030                 break;
5031
5032         case ANEG_STATE_LINK_OK:
5033                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5034                 ret = ANEG_DONE;
5035                 break;
5036
5037         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5038                 /* ??? unimplemented */
5039                 break;
5040
5041         case ANEG_STATE_NEXT_PAGE_WAIT:
5042                 /* ??? unimplemented */
5043                 break;
5044
5045         default:
5046                 ret = ANEG_FAILED;
5047                 break;
5048         }
5049
5050         return ret;
5051 }
5052
5053 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5054 {
5055         int res = 0;
5056         struct tg3_fiber_aneginfo aninfo;
5057         int status = ANEG_FAILED;
5058         unsigned int tick;
5059         u32 tmp;
5060
5061         tw32_f(MAC_TX_AUTO_NEG, 0);
5062
5063         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5064         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5065         udelay(40);
5066
5067         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5068         udelay(40);
5069
5070         memset(&aninfo, 0, sizeof(aninfo));
5071         aninfo.flags |= MR_AN_ENABLE;
5072         aninfo.state = ANEG_STATE_UNKNOWN;
5073         aninfo.cur_time = 0;
5074         tick = 0;
5075         while (++tick < 195000) {
5076                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5077                 if (status == ANEG_DONE || status == ANEG_FAILED)
5078                         break;
5079
5080                 udelay(1);
5081         }
5082
5083         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5084         tw32_f(MAC_MODE, tp->mac_mode);
5085         udelay(40);
5086
5087         *txflags = aninfo.txconfig;
5088         *rxflags = aninfo.flags;
5089
5090         if (status == ANEG_DONE &&
5091             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5092                              MR_LP_ADV_FULL_DUPLEX)))
5093                 res = 1;
5094
5095         return res;
5096 }
5097
5098 static void tg3_init_bcm8002(struct tg3 *tp)
5099 {
5100         u32 mac_status = tr32(MAC_STATUS);
5101         int i;
5102
5103         /* Reset when initting first time or we have a link. */
5104         if (tg3_flag(tp, INIT_COMPLETE) &&
5105             !(mac_status & MAC_STATUS_PCS_SYNCED))
5106                 return;
5107
5108         /* Set PLL lock range. */
5109         tg3_writephy(tp, 0x16, 0x8007);
5110
5111         /* SW reset */
5112         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5113
5114         /* Wait for reset to complete. */
5115         /* XXX schedule_timeout() ... */
5116         for (i = 0; i < 500; i++)
5117                 udelay(10);
5118
5119         /* Config mode; select PMA/Ch 1 regs. */
5120         tg3_writephy(tp, 0x10, 0x8411);
5121
5122         /* Enable auto-lock and comdet, select txclk for tx. */
5123         tg3_writephy(tp, 0x11, 0x0a10);
5124
5125         tg3_writephy(tp, 0x18, 0x00a0);
5126         tg3_writephy(tp, 0x16, 0x41ff);
5127
5128         /* Assert and deassert POR. */
5129         tg3_writephy(tp, 0x13, 0x0400);
5130         udelay(40);
5131         tg3_writephy(tp, 0x13, 0x0000);
5132
5133         tg3_writephy(tp, 0x11, 0x0a50);
5134         udelay(40);
5135         tg3_writephy(tp, 0x11, 0x0a10);
5136
5137         /* Wait for signal to stabilize */
5138         /* XXX schedule_timeout() ... */
5139         for (i = 0; i < 15000; i++)
5140                 udelay(10);
5141
5142         /* Deselect the channel register so we can read the PHYID
5143          * later.
5144          */
5145         tg3_writephy(tp, 0x10, 0x8011);
5146 }
5147
5148 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5149 {
5150         u16 flowctrl;
5151         u32 sg_dig_ctrl, sg_dig_status;
5152         u32 serdes_cfg, expected_sg_dig_ctrl;
5153         int workaround, port_a;
5154         int current_link_up;
5155
5156         serdes_cfg = 0;
5157         expected_sg_dig_ctrl = 0;
5158         workaround = 0;
5159         port_a = 1;
5160         current_link_up = 0;
5161
5162         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5163             tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5164                 workaround = 1;
5165                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5166                         port_a = 0;
5167
5168                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5169                 /* preserve bits 20-23 for voltage regulator */
5170                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5171         }
5172
5173         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5174
5175         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5176                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5177                         if (workaround) {
5178                                 u32 val = serdes_cfg;
5179
5180                                 if (port_a)
5181                                         val |= 0xc010000;
5182                                 else
5183                                         val |= 0x4010000;
5184                                 tw32_f(MAC_SERDES_CFG, val);
5185                         }
5186
5187                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5188                 }
5189                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5190                         tg3_setup_flow_control(tp, 0, 0);
5191                         current_link_up = 1;
5192                 }
5193                 goto out;
5194         }
5195
5196         /* Want auto-negotiation.  */
5197         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5198
5199         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5200         if (flowctrl & ADVERTISE_1000XPAUSE)
5201                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5202         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5203                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5204
5205         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5206                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5207                     tp->serdes_counter &&
5208                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5209                                     MAC_STATUS_RCVD_CFG)) ==
5210                      MAC_STATUS_PCS_SYNCED)) {
5211                         tp->serdes_counter--;
5212                         current_link_up = 1;
5213                         goto out;
5214                 }
5215 restart_autoneg:
5216                 if (workaround)
5217                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5218                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5219                 udelay(5);
5220                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5221
5222                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5223                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5224         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5225                                  MAC_STATUS_SIGNAL_DET)) {
5226                 sg_dig_status = tr32(SG_DIG_STATUS);
5227                 mac_status = tr32(MAC_STATUS);
5228
5229                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5230                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5231                         u32 local_adv = 0, remote_adv = 0;
5232
5233                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5234                                 local_adv |= ADVERTISE_1000XPAUSE;
5235                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5236                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5237
5238                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5239                                 remote_adv |= LPA_1000XPAUSE;
5240                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5241                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5242
5243                         tp->link_config.rmt_adv =
5244                                            mii_adv_to_ethtool_adv_x(remote_adv);
5245
5246                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5247                         current_link_up = 1;
5248                         tp->serdes_counter = 0;
5249                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5250                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5251                         if (tp->serdes_counter)
5252                                 tp->serdes_counter--;
5253                         else {
5254                                 if (workaround) {
5255                                         u32 val = serdes_cfg;
5256
5257                                         if (port_a)
5258                                                 val |= 0xc010000;
5259                                         else
5260                                                 val |= 0x4010000;
5261
5262                                         tw32_f(MAC_SERDES_CFG, val);
5263                                 }
5264
5265                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5266                                 udelay(40);
5267
5268                                 /* Link parallel detection - link is up */
5269                                 /* only if we have PCS_SYNC and not */
5270                                 /* receiving config code words */
5271                                 mac_status = tr32(MAC_STATUS);
5272                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5273                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5274                                         tg3_setup_flow_control(tp, 0, 0);
5275                                         current_link_up = 1;
5276                                         tp->phy_flags |=
5277                                                 TG3_PHYFLG_PARALLEL_DETECT;
5278                                         tp->serdes_counter =
5279                                                 SERDES_PARALLEL_DET_TIMEOUT;
5280                                 } else
5281                                         goto restart_autoneg;
5282                         }
5283                 }
5284         } else {
5285                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5286                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5287         }
5288
5289 out:
5290         return current_link_up;
5291 }
5292
5293 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5294 {
5295         int current_link_up = 0;
5296
5297         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5298                 goto out;
5299
5300         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5301                 u32 txflags, rxflags;
5302                 int i;
5303
5304                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5305                         u32 local_adv = 0, remote_adv = 0;
5306
5307                         if (txflags & ANEG_CFG_PS1)
5308                                 local_adv |= ADVERTISE_1000XPAUSE;
5309                         if (txflags & ANEG_CFG_PS2)
5310                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5311
5312                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5313                                 remote_adv |= LPA_1000XPAUSE;
5314                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5315                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5316
5317                         tp->link_config.rmt_adv =
5318                                            mii_adv_to_ethtool_adv_x(remote_adv);
5319
5320                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5321
5322                         current_link_up = 1;
5323                 }
5324                 for (i = 0; i < 30; i++) {
5325                         udelay(20);
5326                         tw32_f(MAC_STATUS,
5327                                (MAC_STATUS_SYNC_CHANGED |
5328                                 MAC_STATUS_CFG_CHANGED));
5329                         udelay(40);
5330                         if ((tr32(MAC_STATUS) &
5331                              (MAC_STATUS_SYNC_CHANGED |
5332                               MAC_STATUS_CFG_CHANGED)) == 0)
5333                                 break;
5334                 }
5335
5336                 mac_status = tr32(MAC_STATUS);
5337                 if (current_link_up == 0 &&
5338                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5339                     !(mac_status & MAC_STATUS_RCVD_CFG))
5340                         current_link_up = 1;
5341         } else {
5342                 tg3_setup_flow_control(tp, 0, 0);
5343
5344                 /* Forcing 1000FD link up. */
5345                 current_link_up = 1;
5346
5347                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5348                 udelay(40);
5349
5350                 tw32_f(MAC_MODE, tp->mac_mode);
5351                 udelay(40);
5352         }
5353
5354 out:
5355         return current_link_up;
5356 }
5357
5358 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5359 {
5360         u32 orig_pause_cfg;
5361         u16 orig_active_speed;
5362         u8 orig_active_duplex;
5363         u32 mac_status;
5364         int current_link_up;
5365         int i;
5366
5367         orig_pause_cfg = tp->link_config.active_flowctrl;
5368         orig_active_speed = tp->link_config.active_speed;
5369         orig_active_duplex = tp->link_config.active_duplex;
5370
5371         if (!tg3_flag(tp, HW_AUTONEG) &&
5372             tp->link_up &&
5373             tg3_flag(tp, INIT_COMPLETE)) {
5374                 mac_status = tr32(MAC_STATUS);
5375                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5376                                MAC_STATUS_SIGNAL_DET |
5377                                MAC_STATUS_CFG_CHANGED |
5378                                MAC_STATUS_RCVD_CFG);
5379                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5380                                    MAC_STATUS_SIGNAL_DET)) {
5381                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5382                                             MAC_STATUS_CFG_CHANGED));
5383                         return 0;
5384                 }
5385         }
5386
5387         tw32_f(MAC_TX_AUTO_NEG, 0);
5388
5389         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5390         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5391         tw32_f(MAC_MODE, tp->mac_mode);
5392         udelay(40);
5393
5394         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5395                 tg3_init_bcm8002(tp);
5396
5397         /* Enable link change event even when serdes polling.  */
5398         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5399         udelay(40);
5400
5401         current_link_up = 0;
5402         tp->link_config.rmt_adv = 0;
5403         mac_status = tr32(MAC_STATUS);
5404
5405         if (tg3_flag(tp, HW_AUTONEG))
5406                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5407         else
5408                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5409
5410         tp->napi[0].hw_status->status =
5411                 (SD_STATUS_UPDATED |
5412                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5413
5414         for (i = 0; i < 100; i++) {
5415                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5416                                     MAC_STATUS_CFG_CHANGED));
5417                 udelay(5);
5418                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5419                                          MAC_STATUS_CFG_CHANGED |
5420                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5421                         break;
5422         }
5423
5424         mac_status = tr32(MAC_STATUS);
5425         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5426                 current_link_up = 0;
5427                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5428                     tp->serdes_counter == 0) {
5429                         tw32_f(MAC_MODE, (tp->mac_mode |
5430                                           MAC_MODE_SEND_CONFIGS));
5431                         udelay(1);
5432                         tw32_f(MAC_MODE, tp->mac_mode);
5433                 }
5434         }
5435
5436         if (current_link_up == 1) {
5437                 tp->link_config.active_speed = SPEED_1000;
5438                 tp->link_config.active_duplex = DUPLEX_FULL;
5439                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5440                                     LED_CTRL_LNKLED_OVERRIDE |
5441                                     LED_CTRL_1000MBPS_ON));
5442         } else {
5443                 tp->link_config.active_speed = SPEED_UNKNOWN;
5444                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5445                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5446                                     LED_CTRL_LNKLED_OVERRIDE |
5447                                     LED_CTRL_TRAFFIC_OVERRIDE));
5448         }
5449
5450         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5451                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5452                 if (orig_pause_cfg != now_pause_cfg ||
5453                     orig_active_speed != tp->link_config.active_speed ||
5454                     orig_active_duplex != tp->link_config.active_duplex)
5455                         tg3_link_report(tp);
5456         }
5457
5458         return 0;
5459 }
5460
5461 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5462 {
5463         int current_link_up, err = 0;
5464         u32 bmsr, bmcr;
5465         u16 current_speed;
5466         u8 current_duplex;
5467         u32 local_adv, remote_adv;
5468
5469         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5470         tw32_f(MAC_MODE, tp->mac_mode);
5471         udelay(40);
5472
5473         tw32(MAC_EVENT, 0);
5474
5475         tw32_f(MAC_STATUS,
5476              (MAC_STATUS_SYNC_CHANGED |
5477               MAC_STATUS_CFG_CHANGED |
5478               MAC_STATUS_MI_COMPLETION |
5479               MAC_STATUS_LNKSTATE_CHANGED));
5480         udelay(40);
5481
5482         if (force_reset)
5483                 tg3_phy_reset(tp);
5484
5485         current_link_up = 0;
5486         current_speed = SPEED_UNKNOWN;
5487         current_duplex = DUPLEX_UNKNOWN;
5488         tp->link_config.rmt_adv = 0;
5489
5490         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5491         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5492         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5493                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5494                         bmsr |= BMSR_LSTATUS;
5495                 else
5496                         bmsr &= ~BMSR_LSTATUS;
5497         }
5498
5499         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5500
5501         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5502             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5503                 /* do nothing, just check for link up at the end */
5504         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5505                 u32 adv, newadv;
5506
5507                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5508                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5509                                  ADVERTISE_1000XPAUSE |
5510                                  ADVERTISE_1000XPSE_ASYM |
5511                                  ADVERTISE_SLCT);
5512
5513                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5514                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5515
5516                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5517                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5518                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5519                         tg3_writephy(tp, MII_BMCR, bmcr);
5520
5521                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5522                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5523                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5524
5525                         return err;
5526                 }
5527         } else {
5528                 u32 new_bmcr;
5529
5530                 bmcr &= ~BMCR_SPEED1000;
5531                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5532
5533                 if (tp->link_config.duplex == DUPLEX_FULL)
5534                         new_bmcr |= BMCR_FULLDPLX;
5535
5536                 if (new_bmcr != bmcr) {
5537                         /* BMCR_SPEED1000 is a reserved bit that needs
5538                          * to be set on write.
5539                          */
5540                         new_bmcr |= BMCR_SPEED1000;
5541
5542                         /* Force a linkdown */
5543                         if (tp->link_up) {
5544                                 u32 adv;
5545
5546                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5547                                 adv &= ~(ADVERTISE_1000XFULL |
5548                                          ADVERTISE_1000XHALF |
5549                                          ADVERTISE_SLCT);
5550                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5551                                 tg3_writephy(tp, MII_BMCR, bmcr |
5552                                                            BMCR_ANRESTART |
5553                                                            BMCR_ANENABLE);
5554                                 udelay(10);
5555                                 tg3_carrier_off(tp);
5556                         }
5557                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5558                         bmcr = new_bmcr;
5559                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5560                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5561                         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5562                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5563                                         bmsr |= BMSR_LSTATUS;
5564                                 else
5565                                         bmsr &= ~BMSR_LSTATUS;
5566                         }
5567                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5568                 }
5569         }
5570
5571         if (bmsr & BMSR_LSTATUS) {
5572                 current_speed = SPEED_1000;
5573                 current_link_up = 1;
5574                 if (bmcr & BMCR_FULLDPLX)
5575                         current_duplex = DUPLEX_FULL;
5576                 else
5577                         current_duplex = DUPLEX_HALF;
5578
5579                 local_adv = 0;
5580                 remote_adv = 0;
5581
5582                 if (bmcr & BMCR_ANENABLE) {
5583                         u32 common;
5584
5585                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5586                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5587                         common = local_adv & remote_adv;
5588                         if (common & (ADVERTISE_1000XHALF |
5589                                       ADVERTISE_1000XFULL)) {
5590                                 if (common & ADVERTISE_1000XFULL)
5591                                         current_duplex = DUPLEX_FULL;
5592                                 else
5593                                         current_duplex = DUPLEX_HALF;
5594
5595                                 tp->link_config.rmt_adv =
5596                                            mii_adv_to_ethtool_adv_x(remote_adv);
5597                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5598                                 /* Link is up via parallel detect */
5599                         } else {
5600                                 current_link_up = 0;
5601                         }
5602                 }
5603         }
5604
5605         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5606                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5607
5608         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5609         if (tp->link_config.active_duplex == DUPLEX_HALF)
5610                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5611
5612         tw32_f(MAC_MODE, tp->mac_mode);
5613         udelay(40);
5614
5615         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5616
5617         tp->link_config.active_speed = current_speed;
5618         tp->link_config.active_duplex = current_duplex;
5619
5620         tg3_test_and_report_link_chg(tp, current_link_up);
5621         return err;
5622 }
5623
5624 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5625 {
5626         if (tp->serdes_counter) {
5627                 /* Give autoneg time to complete. */
5628                 tp->serdes_counter--;
5629                 return;
5630         }
5631
5632         if (!tp->link_up &&
5633             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5634                 u32 bmcr;
5635
5636                 tg3_readphy(tp, MII_BMCR, &bmcr);
5637                 if (bmcr & BMCR_ANENABLE) {
5638                         u32 phy1, phy2;
5639
5640                         /* Select shadow register 0x1f */
5641                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5642                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5643
5644                         /* Select expansion interrupt status register */
5645                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5646                                          MII_TG3_DSP_EXP1_INT_STAT);
5647                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5648                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5649
5650                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5651                                 /* We have signal detect and not receiving
5652                                  * config code words, link is up by parallel
5653                                  * detection.
5654                                  */
5655
5656                                 bmcr &= ~BMCR_ANENABLE;
5657                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5658                                 tg3_writephy(tp, MII_BMCR, bmcr);
5659                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5660                         }
5661                 }
5662         } else if (tp->link_up &&
5663                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5664                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5665                 u32 phy2;
5666
5667                 /* Select expansion interrupt status register */
5668                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5669                                  MII_TG3_DSP_EXP1_INT_STAT);
5670                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5671                 if (phy2 & 0x20) {
5672                         u32 bmcr;
5673
5674                         /* Config code words received, turn on autoneg. */
5675                         tg3_readphy(tp, MII_BMCR, &bmcr);
5676                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5677
5678                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5679
5680                 }
5681         }
5682 }
5683
5684 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5685 {
5686         u32 val;
5687         int err;
5688
5689         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5690                 err = tg3_setup_fiber_phy(tp, force_reset);
5691         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5692                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5693         else
5694                 err = tg3_setup_copper_phy(tp, force_reset);
5695
5696         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
5697                 u32 scale;
5698
5699                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5700                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5701                         scale = 65;
5702                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5703                         scale = 6;
5704                 else
5705                         scale = 12;
5706
5707                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5708                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5709                 tw32(GRC_MISC_CFG, val);
5710         }
5711
5712         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5713               (6 << TX_LENGTHS_IPG_SHIFT);
5714         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
5715             tg3_asic_rev(tp) == ASIC_REV_5762)
5716                 val |= tr32(MAC_TX_LENGTHS) &
5717                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5718                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5719
5720         if (tp->link_config.active_speed == SPEED_1000 &&
5721             tp->link_config.active_duplex == DUPLEX_HALF)
5722                 tw32(MAC_TX_LENGTHS, val |
5723                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5724         else
5725                 tw32(MAC_TX_LENGTHS, val |
5726                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5727
5728         if (!tg3_flag(tp, 5705_PLUS)) {
5729                 if (tp->link_up) {
5730                         tw32(HOSTCC_STAT_COAL_TICKS,
5731                              tp->coal.stats_block_coalesce_usecs);
5732                 } else {
5733                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5734                 }
5735         }
5736
5737         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5738                 val = tr32(PCIE_PWR_MGMT_THRESH);
5739                 if (!tp->link_up)
5740                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5741                               tp->pwrmgmt_thresh;
5742                 else
5743                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5744                 tw32(PCIE_PWR_MGMT_THRESH, val);
5745         }
5746
5747         return err;
5748 }
5749
5750 /* tp->lock must be held */
5751 static u64 tg3_refclk_read(struct tg3 *tp)
5752 {
5753         u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
5754         return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
5755 }
5756
5757 /* tp->lock must be held */
5758 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
5759 {
5760         tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
5761         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
5762         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
5763         tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
5764 }
5765
5766 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
5767 static inline void tg3_full_unlock(struct tg3 *tp);
5768 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
5769 {
5770         struct tg3 *tp = netdev_priv(dev);
5771
5772         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5773                                 SOF_TIMESTAMPING_RX_SOFTWARE |
5774                                 SOF_TIMESTAMPING_SOFTWARE    |
5775                                 SOF_TIMESTAMPING_TX_HARDWARE |
5776                                 SOF_TIMESTAMPING_RX_HARDWARE |
5777                                 SOF_TIMESTAMPING_RAW_HARDWARE;
5778
5779         if (tp->ptp_clock)
5780                 info->phc_index = ptp_clock_index(tp->ptp_clock);
5781         else
5782                 info->phc_index = -1;
5783
5784         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
5785
5786         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
5787                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
5788                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
5789                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
5790         return 0;
5791 }
5792
5793 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
5794 {
5795         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5796         bool neg_adj = false;
5797         u32 correction = 0;
5798
5799         if (ppb < 0) {
5800                 neg_adj = true;
5801                 ppb = -ppb;
5802         }
5803
5804         /* Frequency adjustment is performed using hardware with a 24 bit
5805          * accumulator and a programmable correction value. On each clk, the
5806          * correction value gets added to the accumulator and when it
5807          * overflows, the time counter is incremented/decremented.
5808          *
5809          * So conversion from ppb to correction value is
5810          *              ppb * (1 << 24) / 1000000000
5811          */
5812         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
5813                      TG3_EAV_REF_CLK_CORRECT_MASK;
5814
5815         tg3_full_lock(tp, 0);
5816
5817         if (correction)
5818                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
5819                      TG3_EAV_REF_CLK_CORRECT_EN |
5820                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
5821         else
5822                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
5823
5824         tg3_full_unlock(tp);
5825
5826         return 0;
5827 }
5828
5829 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
5830 {
5831         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5832
5833         tg3_full_lock(tp, 0);
5834         tp->ptp_adjust += delta;
5835         tg3_full_unlock(tp);
5836
5837         return 0;
5838 }
5839
5840 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
5841 {
5842         u64 ns;
5843         u32 remainder;
5844         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5845
5846         tg3_full_lock(tp, 0);
5847         ns = tg3_refclk_read(tp);
5848         ns += tp->ptp_adjust;
5849         tg3_full_unlock(tp);
5850
5851         ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
5852         ts->tv_nsec = remainder;
5853
5854         return 0;
5855 }
5856
5857 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
5858                            const struct timespec *ts)
5859 {
5860         u64 ns;
5861         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5862
5863         ns = timespec_to_ns(ts);
5864
5865         tg3_full_lock(tp, 0);
5866         tg3_refclk_write(tp, ns);
5867         tp->ptp_adjust = 0;
5868         tg3_full_unlock(tp);
5869
5870         return 0;
5871 }
5872
5873 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
5874                           struct ptp_clock_request *rq, int on)
5875 {
5876         return -EOPNOTSUPP;
5877 }
5878
5879 static const struct ptp_clock_info tg3_ptp_caps = {
5880         .owner          = THIS_MODULE,
5881         .name           = "tg3 clock",
5882         .max_adj        = 250000000,
5883         .n_alarm        = 0,
5884         .n_ext_ts       = 0,
5885         .n_per_out      = 0,
5886         .pps            = 0,
5887         .adjfreq        = tg3_ptp_adjfreq,
5888         .adjtime        = tg3_ptp_adjtime,
5889         .gettime        = tg3_ptp_gettime,
5890         .settime        = tg3_ptp_settime,
5891         .enable         = tg3_ptp_enable,
5892 };
5893
5894 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
5895                                      struct skb_shared_hwtstamps *timestamp)
5896 {
5897         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
5898         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
5899                                            tp->ptp_adjust);
5900 }
5901
5902 /* tp->lock must be held */
5903 static void tg3_ptp_init(struct tg3 *tp)
5904 {
5905         if (!tg3_flag(tp, PTP_CAPABLE))
5906                 return;
5907
5908         /* Initialize the hardware clock to the system time. */
5909         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
5910         tp->ptp_adjust = 0;
5911         tp->ptp_info = tg3_ptp_caps;
5912 }
5913
5914 /* tp->lock must be held */
5915 static void tg3_ptp_resume(struct tg3 *tp)
5916 {
5917         if (!tg3_flag(tp, PTP_CAPABLE))
5918                 return;
5919
5920         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
5921         tp->ptp_adjust = 0;
5922 }
5923
5924 static void tg3_ptp_fini(struct tg3 *tp)
5925 {
5926         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
5927                 return;
5928
5929         ptp_clock_unregister(tp->ptp_clock);
5930         tp->ptp_clock = NULL;
5931         tp->ptp_adjust = 0;
5932 }
5933
5934 static inline int tg3_irq_sync(struct tg3 *tp)
5935 {
5936         return tp->irq_sync;
5937 }
5938
5939 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5940 {
5941         int i;
5942
5943         dst = (u32 *)((u8 *)dst + off);
5944         for (i = 0; i < len; i += sizeof(u32))
5945                 *dst++ = tr32(off + i);
5946 }
5947
5948 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5949 {
5950         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5951         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5952         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5953         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5954         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5955         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5956         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5957         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5958         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5959         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5960         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5961         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5962         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5963         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5964         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5965         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5966         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5967         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5968         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5969
5970         if (tg3_flag(tp, SUPPORT_MSIX))
5971                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5972
5973         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5974         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5975         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5976         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5977         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5978         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5979         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5980         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5981
5982         if (!tg3_flag(tp, 5705_PLUS)) {
5983                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5984                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5985                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5986         }
5987
5988         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5989         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5990         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5991         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5992         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5993
5994         if (tg3_flag(tp, NVRAM))
5995                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5996 }
5997
5998 static void tg3_dump_state(struct tg3 *tp)
5999 {
6000         int i;
6001         u32 *regs;
6002
6003         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6004         if (!regs)
6005                 return;
6006
6007         if (tg3_flag(tp, PCI_EXPRESS)) {
6008                 /* Read up to but not including private PCI registers */
6009                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6010                         regs[i / sizeof(u32)] = tr32(i);
6011         } else
6012                 tg3_dump_legacy_regs(tp, regs);
6013
6014         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6015                 if (!regs[i + 0] && !regs[i + 1] &&
6016                     !regs[i + 2] && !regs[i + 3])
6017                         continue;
6018
6019                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6020                            i * 4,
6021                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6022         }
6023
6024         kfree(regs);
6025
6026         for (i = 0; i < tp->irq_cnt; i++) {
6027                 struct tg3_napi *tnapi = &tp->napi[i];
6028
6029                 /* SW status block */
6030                 netdev_err(tp->dev,
6031                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6032                            i,
6033                            tnapi->hw_status->status,
6034                            tnapi->hw_status->status_tag,
6035                            tnapi->hw_status->rx_jumbo_consumer,
6036                            tnapi->hw_status->rx_consumer,
6037                            tnapi->hw_status->rx_mini_consumer,
6038                            tnapi->hw_status->idx[0].rx_producer,
6039                            tnapi->hw_status->idx[0].tx_consumer);
6040
6041                 netdev_err(tp->dev,
6042                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6043                            i,
6044                            tnapi->last_tag, tnapi->last_irq_tag,
6045                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6046                            tnapi->rx_rcb_ptr,
6047                            tnapi->prodring.rx_std_prod_idx,
6048                            tnapi->prodring.rx_std_cons_idx,
6049                            tnapi->prodring.rx_jmb_prod_idx,
6050                            tnapi->prodring.rx_jmb_cons_idx);
6051         }
6052 }
6053
6054 /* This is called whenever we suspect that the system chipset is re-
6055  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6056  * is bogus tx completions. We try to recover by setting the
6057  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6058  * in the workqueue.
6059  */
6060 static void tg3_tx_recover(struct tg3 *tp)
6061 {
6062         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6063                tp->write32_tx_mbox == tg3_write_indirect_mbox);
6064
6065         netdev_warn(tp->dev,
6066                     "The system may be re-ordering memory-mapped I/O "
6067                     "cycles to the network device, attempting to recover. "
6068                     "Please report the problem to the driver maintainer "
6069                     "and include system chipset information.\n");
6070
6071         spin_lock(&tp->lock);
6072         tg3_flag_set(tp, TX_RECOVERY_PENDING);
6073         spin_unlock(&tp->lock);
6074 }
6075
6076 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6077 {
6078         /* Tell compiler to fetch tx indices from memory. */
6079         barrier();
6080         return tnapi->tx_pending -
6081                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6082 }
6083
6084 /* Tigon3 never reports partial packet sends.  So we do not
6085  * need special logic to handle SKBs that have not had all
6086  * of their frags sent yet, like SunGEM does.
6087  */
6088 static void tg3_tx(struct tg3_napi *tnapi)
6089 {
6090         struct tg3 *tp = tnapi->tp;
6091         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6092         u32 sw_idx = tnapi->tx_cons;
6093         struct netdev_queue *txq;
6094         int index = tnapi - tp->napi;
6095         unsigned int pkts_compl = 0, bytes_compl = 0;
6096
6097         if (tg3_flag(tp, ENABLE_TSS))
6098                 index--;
6099
6100         txq = netdev_get_tx_queue(tp->dev, index);
6101
6102         while (sw_idx != hw_idx) {
6103                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6104                 struct sk_buff *skb = ri->skb;
6105                 int i, tx_bug = 0;
6106
6107                 if (unlikely(skb == NULL)) {
6108                         tg3_tx_recover(tp);
6109                         return;
6110                 }
6111
6112                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6113                         struct skb_shared_hwtstamps timestamp;
6114                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6115                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6116
6117                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6118
6119                         skb_tstamp_tx(skb, &timestamp);
6120                 }
6121
6122                 pci_unmap_single(tp->pdev,
6123                                  dma_unmap_addr(ri, mapping),
6124                                  skb_headlen(skb),
6125                                  PCI_DMA_TODEVICE);
6126
6127                 ri->skb = NULL;
6128
6129                 while (ri->fragmented) {
6130                         ri->fragmented = false;
6131                         sw_idx = NEXT_TX(sw_idx);
6132                         ri = &tnapi->tx_buffers[sw_idx];
6133                 }
6134
6135                 sw_idx = NEXT_TX(sw_idx);
6136
6137                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6138                         ri = &tnapi->tx_buffers[sw_idx];
6139                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6140                                 tx_bug = 1;
6141
6142                         pci_unmap_page(tp->pdev,
6143                                        dma_unmap_addr(ri, mapping),
6144                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
6145                                        PCI_DMA_TODEVICE);
6146
6147                         while (ri->fragmented) {
6148                                 ri->fragmented = false;
6149                                 sw_idx = NEXT_TX(sw_idx);
6150                                 ri = &tnapi->tx_buffers[sw_idx];
6151                         }
6152
6153                         sw_idx = NEXT_TX(sw_idx);
6154                 }
6155
6156                 pkts_compl++;
6157                 bytes_compl += skb->len;
6158
6159                 dev_kfree_skb(skb);
6160
6161                 if (unlikely(tx_bug)) {
6162                         tg3_tx_recover(tp);
6163                         return;
6164                 }
6165         }
6166
6167         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6168
6169         tnapi->tx_cons = sw_idx;
6170
6171         /* Need to make the tx_cons update visible to tg3_start_xmit()
6172          * before checking for netif_queue_stopped().  Without the
6173          * memory barrier, there is a small possibility that tg3_start_xmit()
6174          * will miss it and cause the queue to be stopped forever.
6175          */
6176         smp_mb();
6177
6178         if (unlikely(netif_tx_queue_stopped(txq) &&
6179                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6180                 __netif_tx_lock(txq, smp_processor_id());
6181                 if (netif_tx_queue_stopped(txq) &&
6182                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6183                         netif_tx_wake_queue(txq);
6184                 __netif_tx_unlock(txq);
6185         }
6186 }
6187
6188 static void tg3_frag_free(bool is_frag, void *data)
6189 {
6190         if (is_frag)
6191                 put_page(virt_to_head_page(data));
6192         else
6193                 kfree(data);
6194 }
6195
6196 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6197 {
6198         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6199                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6200
6201         if (!ri->data)
6202                 return;
6203
6204         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6205                          map_sz, PCI_DMA_FROMDEVICE);
6206         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6207         ri->data = NULL;
6208 }
6209
6210
6211 /* Returns size of skb allocated or < 0 on error.
6212  *
6213  * We only need to fill in the address because the other members
6214  * of the RX descriptor are invariant, see tg3_init_rings.
6215  *
6216  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6217  * posting buffers we only dirty the first cache line of the RX
6218  * descriptor (containing the address).  Whereas for the RX status
6219  * buffers the cpu only reads the last cacheline of the RX descriptor
6220  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6221  */
6222 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6223                              u32 opaque_key, u32 dest_idx_unmasked,
6224                              unsigned int *frag_size)
6225 {
6226         struct tg3_rx_buffer_desc *desc;
6227         struct ring_info *map;
6228         u8 *data;
6229         dma_addr_t mapping;
6230         int skb_size, data_size, dest_idx;
6231
6232         switch (opaque_key) {
6233         case RXD_OPAQUE_RING_STD:
6234                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6235                 desc = &tpr->rx_std[dest_idx];
6236                 map = &tpr->rx_std_buffers[dest_idx];
6237                 data_size = tp->rx_pkt_map_sz;
6238                 break;
6239
6240         case RXD_OPAQUE_RING_JUMBO:
6241                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6242                 desc = &tpr->rx_jmb[dest_idx].std;
6243                 map = &tpr->rx_jmb_buffers[dest_idx];
6244                 data_size = TG3_RX_JMB_MAP_SZ;
6245                 break;
6246
6247         default:
6248                 return -EINVAL;
6249         }
6250
6251         /* Do not overwrite any of the map or rp information
6252          * until we are sure we can commit to a new buffer.
6253          *
6254          * Callers depend upon this behavior and assume that
6255          * we leave everything unchanged if we fail.
6256          */
6257         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6258                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6259         if (skb_size <= PAGE_SIZE) {
6260                 data = netdev_alloc_frag(skb_size);
6261                 *frag_size = skb_size;
6262         } else {
6263                 data = kmalloc(skb_size, GFP_ATOMIC);
6264                 *frag_size = 0;
6265         }
6266         if (!data)
6267                 return -ENOMEM;
6268
6269         mapping = pci_map_single(tp->pdev,
6270                                  data + TG3_RX_OFFSET(tp),
6271                                  data_size,
6272                                  PCI_DMA_FROMDEVICE);
6273         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6274                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6275                 return -EIO;
6276         }
6277
6278         map->data = data;
6279         dma_unmap_addr_set(map, mapping, mapping);
6280
6281         desc->addr_hi = ((u64)mapping >> 32);
6282         desc->addr_lo = ((u64)mapping & 0xffffffff);
6283
6284         return data_size;
6285 }
6286
6287 /* We only need to move over in the address because the other
6288  * members of the RX descriptor are invariant.  See notes above
6289  * tg3_alloc_rx_data for full details.
6290  */
6291 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6292                            struct tg3_rx_prodring_set *dpr,
6293                            u32 opaque_key, int src_idx,
6294                            u32 dest_idx_unmasked)
6295 {
6296         struct tg3 *tp = tnapi->tp;
6297         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6298         struct ring_info *src_map, *dest_map;
6299         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6300         int dest_idx;
6301
6302         switch (opaque_key) {
6303         case RXD_OPAQUE_RING_STD:
6304                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6305                 dest_desc = &dpr->rx_std[dest_idx];
6306                 dest_map = &dpr->rx_std_buffers[dest_idx];
6307                 src_desc = &spr->rx_std[src_idx];
6308                 src_map = &spr->rx_std_buffers[src_idx];
6309                 break;
6310
6311         case RXD_OPAQUE_RING_JUMBO:
6312                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6313                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6314                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6315                 src_desc = &spr->rx_jmb[src_idx].std;
6316                 src_map = &spr->rx_jmb_buffers[src_idx];
6317                 break;
6318
6319         default:
6320                 return;
6321         }
6322
6323         dest_map->data = src_map->data;
6324         dma_unmap_addr_set(dest_map, mapping,
6325                            dma_unmap_addr(src_map, mapping));
6326         dest_desc->addr_hi = src_desc->addr_hi;
6327         dest_desc->addr_lo = src_desc->addr_lo;
6328
6329         /* Ensure that the update to the skb happens after the physical
6330          * addresses have been transferred to the new BD location.
6331          */
6332         smp_wmb();
6333
6334         src_map->data = NULL;
6335 }
6336
6337 /* The RX ring scheme is composed of multiple rings which post fresh
6338  * buffers to the chip, and one special ring the chip uses to report
6339  * status back to the host.
6340  *
6341  * The special ring reports the status of received packets to the
6342  * host.  The chip does not write into the original descriptor the
6343  * RX buffer was obtained from.  The chip simply takes the original
6344  * descriptor as provided by the host, updates the status and length
6345  * field, then writes this into the next status ring entry.
6346  *
6347  * Each ring the host uses to post buffers to the chip is described
6348  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6349  * it is first placed into the on-chip ram.  When the packet's length
6350  * is known, it walks down the TG3_BDINFO entries to select the ring.
6351  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6352  * which is within the range of the new packet's length is chosen.
6353  *
6354  * The "separate ring for rx status" scheme may sound queer, but it makes
6355  * sense from a cache coherency perspective.  If only the host writes
6356  * to the buffer post rings, and only the chip writes to the rx status
6357  * rings, then cache lines never move beyond shared-modified state.
6358  * If both the host and chip were to write into the same ring, cache line
6359  * eviction could occur since both entities want it in an exclusive state.
6360  */
6361 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6362 {
6363         struct tg3 *tp = tnapi->tp;
6364         u32 work_mask, rx_std_posted = 0;
6365         u32 std_prod_idx, jmb_prod_idx;
6366         u32 sw_idx = tnapi->rx_rcb_ptr;
6367         u16 hw_idx;
6368         int received;
6369         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6370
6371         hw_idx = *(tnapi->rx_rcb_prod_idx);
6372         /*
6373          * We need to order the read of hw_idx and the read of
6374          * the opaque cookie.
6375          */
6376         rmb();
6377         work_mask = 0;
6378         received = 0;
6379         std_prod_idx = tpr->rx_std_prod_idx;
6380         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6381         while (sw_idx != hw_idx && budget > 0) {
6382                 struct ring_info *ri;
6383                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6384                 unsigned int len;
6385                 struct sk_buff *skb;
6386                 dma_addr_t dma_addr;
6387                 u32 opaque_key, desc_idx, *post_ptr;
6388                 u8 *data;
6389                 u64 tstamp = 0;
6390
6391                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6392                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6393                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6394                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6395                         dma_addr = dma_unmap_addr(ri, mapping);
6396                         data = ri->data;
6397                         post_ptr = &std_prod_idx;
6398                         rx_std_posted++;
6399                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6400                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6401                         dma_addr = dma_unmap_addr(ri, mapping);
6402                         data = ri->data;
6403                         post_ptr = &jmb_prod_idx;
6404                 } else
6405                         goto next_pkt_nopost;
6406
6407                 work_mask |= opaque_key;
6408
6409                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6410                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6411                 drop_it:
6412                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6413                                        desc_idx, *post_ptr);
6414                 drop_it_no_recycle:
6415                         /* Other statistics kept track of by card. */
6416                         tp->rx_dropped++;
6417                         goto next_pkt;
6418                 }
6419
6420                 prefetch(data + TG3_RX_OFFSET(tp));
6421                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6422                       ETH_FCS_LEN;
6423
6424                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6425                      RXD_FLAG_PTPSTAT_PTPV1 ||
6426                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6427                      RXD_FLAG_PTPSTAT_PTPV2) {
6428                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6429                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6430                 }
6431
6432                 if (len > TG3_RX_COPY_THRESH(tp)) {
6433                         int skb_size;
6434                         unsigned int frag_size;
6435
6436                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6437                                                     *post_ptr, &frag_size);
6438                         if (skb_size < 0)
6439                                 goto drop_it;
6440
6441                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6442                                          PCI_DMA_FROMDEVICE);
6443
6444                         skb = build_skb(data, frag_size);
6445                         if (!skb) {
6446                                 tg3_frag_free(frag_size != 0, data);
6447                                 goto drop_it_no_recycle;
6448                         }
6449                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6450                         /* Ensure that the update to the data happens
6451                          * after the usage of the old DMA mapping.
6452                          */
6453                         smp_wmb();
6454
6455                         ri->data = NULL;
6456
6457                 } else {
6458                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6459                                        desc_idx, *post_ptr);
6460
6461                         skb = netdev_alloc_skb(tp->dev,
6462                                                len + TG3_RAW_IP_ALIGN);
6463                         if (skb == NULL)
6464                                 goto drop_it_no_recycle;
6465
6466                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6467                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6468                         memcpy(skb->data,
6469                                data + TG3_RX_OFFSET(tp),
6470                                len);
6471                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6472                 }
6473
6474                 skb_put(skb, len);
6475                 if (tstamp)
6476                         tg3_hwclock_to_timestamp(tp, tstamp,
6477                                                  skb_hwtstamps(skb));
6478
6479                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6480                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6481                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6482                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6483                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6484                 else
6485                         skb_checksum_none_assert(skb);
6486
6487                 skb->protocol = eth_type_trans(skb, tp->dev);
6488
6489                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6490                     skb->protocol != htons(ETH_P_8021Q)) {
6491                         dev_kfree_skb(skb);
6492                         goto drop_it_no_recycle;
6493                 }
6494
6495                 if (desc->type_flags & RXD_FLAG_VLAN &&
6496                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6497                         __vlan_hwaccel_put_tag(skb,
6498                                                desc->err_vlan & RXD_VLAN_MASK);
6499
6500                 napi_gro_receive(&tnapi->napi, skb);
6501
6502                 received++;
6503                 budget--;
6504
6505 next_pkt:
6506                 (*post_ptr)++;
6507
6508                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6509                         tpr->rx_std_prod_idx = std_prod_idx &
6510                                                tp->rx_std_ring_mask;
6511                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6512                                      tpr->rx_std_prod_idx);
6513                         work_mask &= ~RXD_OPAQUE_RING_STD;
6514                         rx_std_posted = 0;
6515                 }
6516 next_pkt_nopost:
6517                 sw_idx++;
6518                 sw_idx &= tp->rx_ret_ring_mask;
6519
6520                 /* Refresh hw_idx to see if there is new work */
6521                 if (sw_idx == hw_idx) {
6522                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6523                         rmb();
6524                 }
6525         }
6526
6527         /* ACK the status ring. */
6528         tnapi->rx_rcb_ptr = sw_idx;
6529         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6530
6531         /* Refill RX ring(s). */
6532         if (!tg3_flag(tp, ENABLE_RSS)) {
6533                 /* Sync BD data before updating mailbox */
6534                 wmb();
6535
6536                 if (work_mask & RXD_OPAQUE_RING_STD) {
6537                         tpr->rx_std_prod_idx = std_prod_idx &
6538                                                tp->rx_std_ring_mask;
6539                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6540                                      tpr->rx_std_prod_idx);
6541                 }
6542                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6543                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6544                                                tp->rx_jmb_ring_mask;
6545                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6546                                      tpr->rx_jmb_prod_idx);
6547                 }
6548                 mmiowb();
6549         } else if (work_mask) {
6550                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6551                  * updated before the producer indices can be updated.
6552                  */
6553                 smp_wmb();
6554
6555                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6556                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6557
6558                 if (tnapi != &tp->napi[1]) {
6559                         tp->rx_refill = true;
6560                         napi_schedule(&tp->napi[1].napi);
6561                 }
6562         }
6563
6564         return received;
6565 }
6566
6567 static void tg3_poll_link(struct tg3 *tp)
6568 {
6569         /* handle link change and other phy events */
6570         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6571                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6572
6573                 if (sblk->status & SD_STATUS_LINK_CHG) {
6574                         sblk->status = SD_STATUS_UPDATED |
6575                                        (sblk->status & ~SD_STATUS_LINK_CHG);
6576                         spin_lock(&tp->lock);
6577                         if (tg3_flag(tp, USE_PHYLIB)) {
6578                                 tw32_f(MAC_STATUS,
6579                                      (MAC_STATUS_SYNC_CHANGED |
6580                                       MAC_STATUS_CFG_CHANGED |
6581                                       MAC_STATUS_MI_COMPLETION |
6582                                       MAC_STATUS_LNKSTATE_CHANGED));
6583                                 udelay(40);
6584                         } else
6585                                 tg3_setup_phy(tp, 0);
6586                         spin_unlock(&tp->lock);
6587                 }
6588         }
6589 }
6590
6591 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6592                                 struct tg3_rx_prodring_set *dpr,
6593                                 struct tg3_rx_prodring_set *spr)
6594 {
6595         u32 si, di, cpycnt, src_prod_idx;
6596         int i, err = 0;
6597
6598         while (1) {
6599                 src_prod_idx = spr->rx_std_prod_idx;
6600
6601                 /* Make sure updates to the rx_std_buffers[] entries and the
6602                  * standard producer index are seen in the correct order.
6603                  */
6604                 smp_rmb();
6605
6606                 if (spr->rx_std_cons_idx == src_prod_idx)
6607                         break;
6608
6609                 if (spr->rx_std_cons_idx < src_prod_idx)
6610                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6611                 else
6612                         cpycnt = tp->rx_std_ring_mask + 1 -
6613                                  spr->rx_std_cons_idx;
6614
6615                 cpycnt = min(cpycnt,
6616                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6617
6618                 si = spr->rx_std_cons_idx;
6619                 di = dpr->rx_std_prod_idx;
6620
6621                 for (i = di; i < di + cpycnt; i++) {
6622                         if (dpr->rx_std_buffers[i].data) {
6623                                 cpycnt = i - di;
6624                                 err = -ENOSPC;
6625                                 break;
6626                         }
6627                 }
6628
6629                 if (!cpycnt)
6630                         break;
6631
6632                 /* Ensure that updates to the rx_std_buffers ring and the
6633                  * shadowed hardware producer ring from tg3_recycle_skb() are
6634                  * ordered correctly WRT the skb check above.
6635                  */
6636                 smp_rmb();
6637
6638                 memcpy(&dpr->rx_std_buffers[di],
6639                        &spr->rx_std_buffers[si],
6640                        cpycnt * sizeof(struct ring_info));
6641
6642                 for (i = 0; i < cpycnt; i++, di++, si++) {
6643                         struct tg3_rx_buffer_desc *sbd, *dbd;
6644                         sbd = &spr->rx_std[si];
6645                         dbd = &dpr->rx_std[di];
6646                         dbd->addr_hi = sbd->addr_hi;
6647                         dbd->addr_lo = sbd->addr_lo;
6648                 }
6649
6650                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6651                                        tp->rx_std_ring_mask;
6652                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6653                                        tp->rx_std_ring_mask;
6654         }
6655
6656         while (1) {
6657                 src_prod_idx = spr->rx_jmb_prod_idx;
6658
6659                 /* Make sure updates to the rx_jmb_buffers[] entries and
6660                  * the jumbo producer index are seen in the correct order.
6661                  */
6662                 smp_rmb();
6663
6664                 if (spr->rx_jmb_cons_idx == src_prod_idx)
6665                         break;
6666
6667                 if (spr->rx_jmb_cons_idx < src_prod_idx)
6668                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6669                 else
6670                         cpycnt = tp->rx_jmb_ring_mask + 1 -
6671                                  spr->rx_jmb_cons_idx;
6672
6673                 cpycnt = min(cpycnt,
6674                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6675
6676                 si = spr->rx_jmb_cons_idx;
6677                 di = dpr->rx_jmb_prod_idx;
6678
6679                 for (i = di; i < di + cpycnt; i++) {
6680                         if (dpr->rx_jmb_buffers[i].data) {
6681                                 cpycnt = i - di;
6682                                 err = -ENOSPC;
6683                                 break;
6684                         }
6685                 }
6686
6687                 if (!cpycnt)
6688                         break;
6689
6690                 /* Ensure that updates to the rx_jmb_buffers ring and the
6691                  * shadowed hardware producer ring from tg3_recycle_skb() are
6692                  * ordered correctly WRT the skb check above.
6693                  */
6694                 smp_rmb();
6695
6696                 memcpy(&dpr->rx_jmb_buffers[di],
6697                        &spr->rx_jmb_buffers[si],
6698                        cpycnt * sizeof(struct ring_info));
6699
6700                 for (i = 0; i < cpycnt; i++, di++, si++) {
6701                         struct tg3_rx_buffer_desc *sbd, *dbd;
6702                         sbd = &spr->rx_jmb[si].std;
6703                         dbd = &dpr->rx_jmb[di].std;
6704                         dbd->addr_hi = sbd->addr_hi;
6705                         dbd->addr_lo = sbd->addr_lo;
6706                 }
6707
6708                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6709                                        tp->rx_jmb_ring_mask;
6710                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6711                                        tp->rx_jmb_ring_mask;
6712         }
6713
6714         return err;
6715 }
6716
6717 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6718 {
6719         struct tg3 *tp = tnapi->tp;
6720
6721         /* run TX completion thread */
6722         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6723                 tg3_tx(tnapi);
6724                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6725                         return work_done;
6726         }
6727
6728         if (!tnapi->rx_rcb_prod_idx)
6729                 return work_done;
6730
6731         /* run RX thread, within the bounds set by NAPI.
6732          * All RX "locking" is done by ensuring outside
6733          * code synchronizes with tg3->napi.poll()
6734          */
6735         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6736                 work_done += tg3_rx(tnapi, budget - work_done);
6737
6738         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6739                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6740                 int i, err = 0;
6741                 u32 std_prod_idx = dpr->rx_std_prod_idx;
6742                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6743
6744                 tp->rx_refill = false;
6745                 for (i = 1; i <= tp->rxq_cnt; i++)
6746                         err |= tg3_rx_prodring_xfer(tp, dpr,
6747                                                     &tp->napi[i].prodring);
6748
6749                 wmb();
6750
6751                 if (std_prod_idx != dpr->rx_std_prod_idx)
6752                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6753                                      dpr->rx_std_prod_idx);
6754
6755                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6756                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6757                                      dpr->rx_jmb_prod_idx);
6758
6759                 mmiowb();
6760
6761                 if (err)
6762                         tw32_f(HOSTCC_MODE, tp->coal_now);
6763         }
6764
6765         return work_done;
6766 }
6767
6768 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6769 {
6770         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6771                 schedule_work(&tp->reset_task);
6772 }
6773
6774 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6775 {
6776         cancel_work_sync(&tp->reset_task);
6777         tg3_flag_clear(tp, RESET_TASK_PENDING);
6778         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6779 }
6780
6781 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6782 {
6783         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6784         struct tg3 *tp = tnapi->tp;
6785         int work_done = 0;
6786         struct tg3_hw_status *sblk = tnapi->hw_status;
6787
6788         while (1) {
6789                 work_done = tg3_poll_work(tnapi, work_done, budget);
6790
6791                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6792                         goto tx_recovery;
6793
6794                 if (unlikely(work_done >= budget))
6795                         break;
6796
6797                 /* tp->last_tag is used in tg3_int_reenable() below
6798                  * to tell the hw how much work has been processed,
6799                  * so we must read it before checking for more work.
6800                  */
6801                 tnapi->last_tag = sblk->status_tag;
6802                 tnapi->last_irq_tag = tnapi->last_tag;
6803                 rmb();
6804
6805                 /* check for RX/TX work to do */
6806                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6807                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6808
6809                         /* This test here is not race free, but will reduce
6810                          * the number of interrupts by looping again.
6811                          */
6812                         if (tnapi == &tp->napi[1] && tp->rx_refill)
6813                                 continue;
6814
6815                         napi_complete(napi);
6816                         /* Reenable interrupts. */
6817                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6818
6819                         /* This test here is synchronized by napi_schedule()
6820                          * and napi_complete() to close the race condition.
6821                          */
6822                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6823                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
6824                                                   HOSTCC_MODE_ENABLE |
6825                                                   tnapi->coal_now);
6826                         }
6827                         mmiowb();
6828                         break;
6829                 }
6830         }
6831
6832         return work_done;
6833
6834 tx_recovery:
6835         /* work_done is guaranteed to be less than budget. */
6836         napi_complete(napi);
6837         tg3_reset_task_schedule(tp);
6838         return work_done;
6839 }
6840
6841 static void tg3_process_error(struct tg3 *tp)
6842 {
6843         u32 val;
6844         bool real_error = false;
6845
6846         if (tg3_flag(tp, ERROR_PROCESSED))
6847                 return;
6848
6849         /* Check Flow Attention register */
6850         val = tr32(HOSTCC_FLOW_ATTN);
6851         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6852                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6853                 real_error = true;
6854         }
6855
6856         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6857                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6858                 real_error = true;
6859         }
6860
6861         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6862                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6863                 real_error = true;
6864         }
6865
6866         if (!real_error)
6867                 return;
6868
6869         tg3_dump_state(tp);
6870
6871         tg3_flag_set(tp, ERROR_PROCESSED);
6872         tg3_reset_task_schedule(tp);
6873 }
6874
6875 static int tg3_poll(struct napi_struct *napi, int budget)
6876 {
6877         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6878         struct tg3 *tp = tnapi->tp;
6879         int work_done = 0;
6880         struct tg3_hw_status *sblk = tnapi->hw_status;
6881
6882         while (1) {
6883                 if (sblk->status & SD_STATUS_ERROR)
6884                         tg3_process_error(tp);
6885
6886                 tg3_poll_link(tp);
6887
6888                 work_done = tg3_poll_work(tnapi, work_done, budget);
6889
6890                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6891                         goto tx_recovery;
6892
6893                 if (unlikely(work_done >= budget))
6894                         break;
6895
6896                 if (tg3_flag(tp, TAGGED_STATUS)) {
6897                         /* tp->last_tag is used in tg3_int_reenable() below
6898                          * to tell the hw how much work has been processed,
6899                          * so we must read it before checking for more work.
6900                          */
6901                         tnapi->last_tag = sblk->status_tag;
6902                         tnapi->last_irq_tag = tnapi->last_tag;
6903                         rmb();
6904                 } else
6905                         sblk->status &= ~SD_STATUS_UPDATED;
6906
6907                 if (likely(!tg3_has_work(tnapi))) {
6908                         napi_complete(napi);
6909                         tg3_int_reenable(tnapi);
6910                         break;
6911                 }
6912         }
6913
6914         return work_done;
6915
6916 tx_recovery:
6917         /* work_done is guaranteed to be less than budget. */
6918         napi_complete(napi);
6919         tg3_reset_task_schedule(tp);
6920         return work_done;
6921 }
6922
6923 static void tg3_napi_disable(struct tg3 *tp)
6924 {
6925         int i;
6926
6927         for (i = tp->irq_cnt - 1; i >= 0; i--)
6928                 napi_disable(&tp->napi[i].napi);
6929 }
6930
6931 static void tg3_napi_enable(struct tg3 *tp)
6932 {
6933         int i;
6934
6935         for (i = 0; i < tp->irq_cnt; i++)
6936                 napi_enable(&tp->napi[i].napi);
6937 }
6938
6939 static void tg3_napi_init(struct tg3 *tp)
6940 {
6941         int i;
6942
6943         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6944         for (i = 1; i < tp->irq_cnt; i++)
6945                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6946 }
6947
6948 static void tg3_napi_fini(struct tg3 *tp)
6949 {
6950         int i;
6951
6952         for (i = 0; i < tp->irq_cnt; i++)
6953                 netif_napi_del(&tp->napi[i].napi);
6954 }
6955
6956 static inline void tg3_netif_stop(struct tg3 *tp)
6957 {
6958         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6959         tg3_napi_disable(tp);
6960         netif_carrier_off(tp->dev);
6961         netif_tx_disable(tp->dev);
6962 }
6963
6964 /* tp->lock must be held */
6965 static inline void tg3_netif_start(struct tg3 *tp)
6966 {
6967         tg3_ptp_resume(tp);
6968
6969         /* NOTE: unconditional netif_tx_wake_all_queues is only
6970          * appropriate so long as all callers are assured to
6971          * have free tx slots (such as after tg3_init_hw)
6972          */
6973         netif_tx_wake_all_queues(tp->dev);
6974
6975         if (tp->link_up)
6976                 netif_carrier_on(tp->dev);
6977
6978         tg3_napi_enable(tp);
6979         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6980         tg3_enable_ints(tp);
6981 }
6982
6983 static void tg3_irq_quiesce(struct tg3 *tp)
6984 {
6985         int i;
6986
6987         BUG_ON(tp->irq_sync);
6988
6989         tp->irq_sync = 1;
6990         smp_mb();
6991
6992         for (i = 0; i < tp->irq_cnt; i++)
6993                 synchronize_irq(tp->napi[i].irq_vec);
6994 }
6995
6996 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6997  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6998  * with as well.  Most of the time, this is not necessary except when
6999  * shutting down the device.
7000  */
7001 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7002 {
7003         spin_lock_bh(&tp->lock);
7004         if (irq_sync)
7005                 tg3_irq_quiesce(tp);
7006 }
7007
7008 static inline void tg3_full_unlock(struct tg3 *tp)
7009 {
7010         spin_unlock_bh(&tp->lock);
7011 }
7012
7013 /* One-shot MSI handler - Chip automatically disables interrupt
7014  * after sending MSI so driver doesn't have to do it.
7015  */
7016 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7017 {
7018         struct tg3_napi *tnapi = dev_id;
7019         struct tg3 *tp = tnapi->tp;
7020
7021         prefetch(tnapi->hw_status);
7022         if (tnapi->rx_rcb)
7023                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7024
7025         if (likely(!tg3_irq_sync(tp)))
7026                 napi_schedule(&tnapi->napi);
7027
7028         return IRQ_HANDLED;
7029 }
7030
7031 /* MSI ISR - No need to check for interrupt sharing and no need to
7032  * flush status block and interrupt mailbox. PCI ordering rules
7033  * guarantee that MSI will arrive after the status block.
7034  */
7035 static irqreturn_t tg3_msi(int irq, void *dev_id)
7036 {
7037         struct tg3_napi *tnapi = dev_id;
7038         struct tg3 *tp = tnapi->tp;
7039
7040         prefetch(tnapi->hw_status);
7041         if (tnapi->rx_rcb)
7042                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7043         /*
7044          * Writing any value to intr-mbox-0 clears PCI INTA# and
7045          * chip-internal interrupt pending events.
7046          * Writing non-zero to intr-mbox-0 additional tells the
7047          * NIC to stop sending us irqs, engaging "in-intr-handler"
7048          * event coalescing.
7049          */
7050         tw32_mailbox(tnapi->int_mbox, 0x00000001);
7051         if (likely(!tg3_irq_sync(tp)))
7052                 napi_schedule(&tnapi->napi);
7053
7054         return IRQ_RETVAL(1);
7055 }
7056
7057 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7058 {
7059         struct tg3_napi *tnapi = dev_id;
7060         struct tg3 *tp = tnapi->tp;
7061         struct tg3_hw_status *sblk = tnapi->hw_status;
7062         unsigned int handled = 1;
7063
7064         /* In INTx mode, it is possible for the interrupt to arrive at
7065          * the CPU before the status block posted prior to the interrupt.
7066          * Reading the PCI State register will confirm whether the
7067          * interrupt is ours and will flush the status block.
7068          */
7069         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7070                 if (tg3_flag(tp, CHIP_RESETTING) ||
7071                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7072                         handled = 0;
7073                         goto out;
7074                 }
7075         }
7076
7077         /*
7078          * Writing any value to intr-mbox-0 clears PCI INTA# and
7079          * chip-internal interrupt pending events.
7080          * Writing non-zero to intr-mbox-0 additional tells the
7081          * NIC to stop sending us irqs, engaging "in-intr-handler"
7082          * event coalescing.
7083          *
7084          * Flush the mailbox to de-assert the IRQ immediately to prevent
7085          * spurious interrupts.  The flush impacts performance but
7086          * excessive spurious interrupts can be worse in some cases.
7087          */
7088         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7089         if (tg3_irq_sync(tp))
7090                 goto out;
7091         sblk->status &= ~SD_STATUS_UPDATED;
7092         if (likely(tg3_has_work(tnapi))) {
7093                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7094                 napi_schedule(&tnapi->napi);
7095         } else {
7096                 /* No work, shared interrupt perhaps?  re-enable
7097                  * interrupts, and flush that PCI write
7098                  */
7099                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7100                                0x00000000);
7101         }
7102 out:
7103         return IRQ_RETVAL(handled);
7104 }
7105
7106 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7107 {
7108         struct tg3_napi *tnapi = dev_id;
7109         struct tg3 *tp = tnapi->tp;
7110         struct tg3_hw_status *sblk = tnapi->hw_status;
7111         unsigned int handled = 1;
7112
7113         /* In INTx mode, it is possible for the interrupt to arrive at
7114          * the CPU before the status block posted prior to the interrupt.
7115          * Reading the PCI State register will confirm whether the
7116          * interrupt is ours and will flush the status block.
7117          */
7118         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7119                 if (tg3_flag(tp, CHIP_RESETTING) ||
7120                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7121                         handled = 0;
7122                         goto out;
7123                 }
7124         }
7125
7126         /*
7127          * writing any value to intr-mbox-0 clears PCI INTA# and
7128          * chip-internal interrupt pending events.
7129          * writing non-zero to intr-mbox-0 additional tells the
7130          * NIC to stop sending us irqs, engaging "in-intr-handler"
7131          * event coalescing.
7132          *
7133          * Flush the mailbox to de-assert the IRQ immediately to prevent
7134          * spurious interrupts.  The flush impacts performance but
7135          * excessive spurious interrupts can be worse in some cases.
7136          */
7137         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7138
7139         /*
7140          * In a shared interrupt configuration, sometimes other devices'
7141          * interrupts will scream.  We record the current status tag here
7142          * so that the above check can report that the screaming interrupts
7143          * are unhandled.  Eventually they will be silenced.
7144          */
7145         tnapi->last_irq_tag = sblk->status_tag;
7146
7147         if (tg3_irq_sync(tp))
7148                 goto out;
7149
7150         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7151
7152         napi_schedule(&tnapi->napi);
7153
7154 out:
7155         return IRQ_RETVAL(handled);
7156 }
7157
7158 /* ISR for interrupt test */
7159 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7160 {
7161         struct tg3_napi *tnapi = dev_id;
7162         struct tg3 *tp = tnapi->tp;
7163         struct tg3_hw_status *sblk = tnapi->hw_status;
7164
7165         if ((sblk->status & SD_STATUS_UPDATED) ||
7166             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7167                 tg3_disable_ints(tp);
7168                 return IRQ_RETVAL(1);
7169         }
7170         return IRQ_RETVAL(0);
7171 }
7172
7173 #ifdef CONFIG_NET_POLL_CONTROLLER
7174 static void tg3_poll_controller(struct net_device *dev)
7175 {
7176         int i;
7177         struct tg3 *tp = netdev_priv(dev);
7178
7179         if (tg3_irq_sync(tp))
7180                 return;
7181
7182         for (i = 0; i < tp->irq_cnt; i++)
7183                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7184 }
7185 #endif
7186
7187 static void tg3_tx_timeout(struct net_device *dev)
7188 {
7189         struct tg3 *tp = netdev_priv(dev);
7190
7191         if (netif_msg_tx_err(tp)) {
7192                 netdev_err(dev, "transmit timed out, resetting\n");
7193                 tg3_dump_state(tp);
7194         }
7195
7196         tg3_reset_task_schedule(tp);
7197 }
7198
7199 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7200 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7201 {
7202         u32 base = (u32) mapping & 0xffffffff;
7203
7204         return (base > 0xffffdcc0) && (base + len + 8 < base);
7205 }
7206
7207 /* Test for DMA addresses > 40-bit */
7208 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7209                                           int len)
7210 {
7211 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7212         if (tg3_flag(tp, 40BIT_DMA_BUG))
7213                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7214         return 0;
7215 #else
7216         return 0;
7217 #endif
7218 }
7219
7220 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7221                                  dma_addr_t mapping, u32 len, u32 flags,
7222                                  u32 mss, u32 vlan)
7223 {
7224         txbd->addr_hi = ((u64) mapping >> 32);
7225         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7226         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7227         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7228 }
7229
7230 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7231                             dma_addr_t map, u32 len, u32 flags,
7232                             u32 mss, u32 vlan)
7233 {
7234         struct tg3 *tp = tnapi->tp;
7235         bool hwbug = false;
7236
7237         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7238                 hwbug = true;
7239
7240         if (tg3_4g_overflow_test(map, len))
7241                 hwbug = true;
7242
7243         if (tg3_40bit_overflow_test(tp, map, len))
7244                 hwbug = true;
7245
7246         if (tp->dma_limit) {
7247                 u32 prvidx = *entry;
7248                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7249                 while (len > tp->dma_limit && *budget) {
7250                         u32 frag_len = tp->dma_limit;
7251                         len -= tp->dma_limit;
7252
7253                         /* Avoid the 8byte DMA problem */
7254                         if (len <= 8) {
7255                                 len += tp->dma_limit / 2;
7256                                 frag_len = tp->dma_limit / 2;
7257                         }
7258
7259                         tnapi->tx_buffers[*entry].fragmented = true;
7260
7261                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7262                                       frag_len, tmp_flag, mss, vlan);
7263                         *budget -= 1;
7264                         prvidx = *entry;
7265                         *entry = NEXT_TX(*entry);
7266
7267                         map += frag_len;
7268                 }
7269
7270                 if (len) {
7271                         if (*budget) {
7272                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7273                                               len, flags, mss, vlan);
7274                                 *budget -= 1;
7275                                 *entry = NEXT_TX(*entry);
7276                         } else {
7277                                 hwbug = true;
7278                                 tnapi->tx_buffers[prvidx].fragmented = false;
7279                         }
7280                 }
7281         } else {
7282                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7283                               len, flags, mss, vlan);
7284                 *entry = NEXT_TX(*entry);
7285         }
7286
7287         return hwbug;
7288 }
7289
7290 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7291 {
7292         int i;
7293         struct sk_buff *skb;
7294         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7295
7296         skb = txb->skb;
7297         txb->skb = NULL;
7298
7299         pci_unmap_single(tnapi->tp->pdev,
7300                          dma_unmap_addr(txb, mapping),
7301                          skb_headlen(skb),
7302                          PCI_DMA_TODEVICE);
7303
7304         while (txb->fragmented) {
7305                 txb->fragmented = false;
7306                 entry = NEXT_TX(entry);
7307                 txb = &tnapi->tx_buffers[entry];
7308         }
7309
7310         for (i = 0; i <= last; i++) {
7311                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7312
7313                 entry = NEXT_TX(entry);
7314                 txb = &tnapi->tx_buffers[entry];
7315
7316                 pci_unmap_page(tnapi->tp->pdev,
7317                                dma_unmap_addr(txb, mapping),
7318                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7319
7320                 while (txb->fragmented) {
7321                         txb->fragmented = false;
7322                         entry = NEXT_TX(entry);
7323                         txb = &tnapi->tx_buffers[entry];
7324                 }
7325         }
7326 }
7327
7328 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7329 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7330                                        struct sk_buff **pskb,
7331                                        u32 *entry, u32 *budget,
7332                                        u32 base_flags, u32 mss, u32 vlan)
7333 {
7334         struct tg3 *tp = tnapi->tp;
7335         struct sk_buff *new_skb, *skb = *pskb;
7336         dma_addr_t new_addr = 0;
7337         int ret = 0;
7338
7339         if (tg3_asic_rev(tp) != ASIC_REV_5701)
7340                 new_skb = skb_copy(skb, GFP_ATOMIC);
7341         else {
7342                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7343
7344                 new_skb = skb_copy_expand(skb,
7345                                           skb_headroom(skb) + more_headroom,
7346                                           skb_tailroom(skb), GFP_ATOMIC);
7347         }
7348
7349         if (!new_skb) {
7350                 ret = -1;
7351         } else {
7352                 /* New SKB is guaranteed to be linear. */
7353                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7354                                           PCI_DMA_TODEVICE);
7355                 /* Make sure the mapping succeeded */
7356                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7357                         dev_kfree_skb(new_skb);
7358                         ret = -1;
7359                 } else {
7360                         u32 save_entry = *entry;
7361
7362                         base_flags |= TXD_FLAG_END;
7363
7364                         tnapi->tx_buffers[*entry].skb = new_skb;
7365                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7366                                            mapping, new_addr);
7367
7368                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7369                                             new_skb->len, base_flags,
7370                                             mss, vlan)) {
7371                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7372                                 dev_kfree_skb(new_skb);
7373                                 ret = -1;
7374                         }
7375                 }
7376         }
7377
7378         dev_kfree_skb(skb);
7379         *pskb = new_skb;
7380         return ret;
7381 }
7382
7383 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7384
7385 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7386  * TSO header is greater than 80 bytes.
7387  */
7388 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7389 {
7390         struct sk_buff *segs, *nskb;
7391         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7392
7393         /* Estimate the number of fragments in the worst case */
7394         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7395                 netif_stop_queue(tp->dev);
7396
7397                 /* netif_tx_stop_queue() must be done before checking
7398                  * checking tx index in tg3_tx_avail() below, because in
7399                  * tg3_tx(), we update tx index before checking for
7400                  * netif_tx_queue_stopped().
7401                  */
7402                 smp_mb();
7403                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7404                         return NETDEV_TX_BUSY;
7405
7406                 netif_wake_queue(tp->dev);
7407         }
7408
7409         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7410         if (IS_ERR(segs))
7411                 goto tg3_tso_bug_end;
7412
7413         do {
7414                 nskb = segs;
7415                 segs = segs->next;
7416                 nskb->next = NULL;
7417                 tg3_start_xmit(nskb, tp->dev);
7418         } while (segs);
7419
7420 tg3_tso_bug_end:
7421         dev_kfree_skb(skb);
7422
7423         return NETDEV_TX_OK;
7424 }
7425
7426 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7427  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7428  */
7429 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7430 {
7431         struct tg3 *tp = netdev_priv(dev);
7432         u32 len, entry, base_flags, mss, vlan = 0;
7433         u32 budget;
7434         int i = -1, would_hit_hwbug;
7435         dma_addr_t mapping;
7436         struct tg3_napi *tnapi;
7437         struct netdev_queue *txq;
7438         unsigned int last;
7439
7440         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7441         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7442         if (tg3_flag(tp, ENABLE_TSS))
7443                 tnapi++;
7444
7445         budget = tg3_tx_avail(tnapi);
7446
7447         /* We are running in BH disabled context with netif_tx_lock
7448          * and TX reclaim runs via tp->napi.poll inside of a software
7449          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7450          * no IRQ context deadlocks to worry about either.  Rejoice!
7451          */
7452         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7453                 if (!netif_tx_queue_stopped(txq)) {
7454                         netif_tx_stop_queue(txq);
7455
7456                         /* This is a hard error, log it. */
7457                         netdev_err(dev,
7458                                    "BUG! Tx Ring full when queue awake!\n");
7459                 }
7460                 return NETDEV_TX_BUSY;
7461         }
7462
7463         entry = tnapi->tx_prod;
7464         base_flags = 0;
7465         if (skb->ip_summed == CHECKSUM_PARTIAL)
7466                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7467
7468         mss = skb_shinfo(skb)->gso_size;
7469         if (mss) {
7470                 struct iphdr *iph;
7471                 u32 tcp_opt_len, hdr_len;
7472
7473                 if (skb_header_cloned(skb) &&
7474                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7475                         goto drop;
7476
7477                 iph = ip_hdr(skb);
7478                 tcp_opt_len = tcp_optlen(skb);
7479
7480                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7481
7482                 if (!skb_is_gso_v6(skb)) {
7483                         iph->check = 0;
7484                         iph->tot_len = htons(mss + hdr_len);
7485                 }
7486
7487                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7488                     tg3_flag(tp, TSO_BUG))
7489                         return tg3_tso_bug(tp, skb);
7490
7491                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7492                                TXD_FLAG_CPU_POST_DMA);
7493
7494                 if (tg3_flag(tp, HW_TSO_1) ||
7495                     tg3_flag(tp, HW_TSO_2) ||
7496                     tg3_flag(tp, HW_TSO_3)) {
7497                         tcp_hdr(skb)->check = 0;
7498                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7499                 } else
7500                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7501                                                                  iph->daddr, 0,
7502                                                                  IPPROTO_TCP,
7503                                                                  0);
7504
7505                 if (tg3_flag(tp, HW_TSO_3)) {
7506                         mss |= (hdr_len & 0xc) << 12;
7507                         if (hdr_len & 0x10)
7508                                 base_flags |= 0x00000010;
7509                         base_flags |= (hdr_len & 0x3e0) << 5;
7510                 } else if (tg3_flag(tp, HW_TSO_2))
7511                         mss |= hdr_len << 9;
7512                 else if (tg3_flag(tp, HW_TSO_1) ||
7513                          tg3_asic_rev(tp) == ASIC_REV_5705) {
7514                         if (tcp_opt_len || iph->ihl > 5) {
7515                                 int tsflags;
7516
7517                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7518                                 mss |= (tsflags << 11);
7519                         }
7520                 } else {
7521                         if (tcp_opt_len || iph->ihl > 5) {
7522                                 int tsflags;
7523
7524                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7525                                 base_flags |= tsflags << 12;
7526                         }
7527                 }
7528         }
7529
7530         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7531             !mss && skb->len > VLAN_ETH_FRAME_LEN)
7532                 base_flags |= TXD_FLAG_JMB_PKT;
7533
7534         if (vlan_tx_tag_present(skb)) {
7535                 base_flags |= TXD_FLAG_VLAN;
7536                 vlan = vlan_tx_tag_get(skb);
7537         }
7538
7539         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7540             tg3_flag(tp, TX_TSTAMP_EN)) {
7541                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7542                 base_flags |= TXD_FLAG_HWTSTAMP;
7543         }
7544
7545         len = skb_headlen(skb);
7546
7547         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7548         if (pci_dma_mapping_error(tp->pdev, mapping))
7549                 goto drop;
7550
7551
7552         tnapi->tx_buffers[entry].skb = skb;
7553         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7554
7555         would_hit_hwbug = 0;
7556
7557         if (tg3_flag(tp, 5701_DMA_BUG))
7558                 would_hit_hwbug = 1;
7559
7560         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7561                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7562                             mss, vlan)) {
7563                 would_hit_hwbug = 1;
7564         } else if (skb_shinfo(skb)->nr_frags > 0) {
7565                 u32 tmp_mss = mss;
7566
7567                 if (!tg3_flag(tp, HW_TSO_1) &&
7568                     !tg3_flag(tp, HW_TSO_2) &&
7569                     !tg3_flag(tp, HW_TSO_3))
7570                         tmp_mss = 0;
7571
7572                 /* Now loop through additional data
7573                  * fragments, and queue them.
7574                  */
7575                 last = skb_shinfo(skb)->nr_frags - 1;
7576                 for (i = 0; i <= last; i++) {
7577                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7578
7579                         len = skb_frag_size(frag);
7580                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7581                                                    len, DMA_TO_DEVICE);
7582
7583                         tnapi->tx_buffers[entry].skb = NULL;
7584                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7585                                            mapping);
7586                         if (dma_mapping_error(&tp->pdev->dev, mapping))
7587                                 goto dma_error;
7588
7589                         if (!budget ||
7590                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7591                                             len, base_flags |
7592                                             ((i == last) ? TXD_FLAG_END : 0),
7593                                             tmp_mss, vlan)) {
7594                                 would_hit_hwbug = 1;
7595                                 break;
7596                         }
7597                 }
7598         }
7599
7600         if (would_hit_hwbug) {
7601                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7602
7603                 /* If the workaround fails due to memory/mapping
7604                  * failure, silently drop this packet.
7605                  */
7606                 entry = tnapi->tx_prod;
7607                 budget = tg3_tx_avail(tnapi);
7608                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7609                                                 base_flags, mss, vlan))
7610                         goto drop_nofree;
7611         }
7612
7613         skb_tx_timestamp(skb);
7614         netdev_tx_sent_queue(txq, skb->len);
7615
7616         /* Sync BD data before updating mailbox */
7617         wmb();
7618
7619         /* Packets are ready, update Tx producer idx local and on card. */
7620         tw32_tx_mbox(tnapi->prodmbox, entry);
7621
7622         tnapi->tx_prod = entry;
7623         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7624                 netif_tx_stop_queue(txq);
7625
7626                 /* netif_tx_stop_queue() must be done before checking
7627                  * checking tx index in tg3_tx_avail() below, because in
7628                  * tg3_tx(), we update tx index before checking for
7629                  * netif_tx_queue_stopped().
7630                  */
7631                 smp_mb();
7632                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7633                         netif_tx_wake_queue(txq);
7634         }
7635
7636         mmiowb();
7637         return NETDEV_TX_OK;
7638
7639 dma_error:
7640         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7641         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7642 drop:
7643         dev_kfree_skb(skb);
7644 drop_nofree:
7645         tp->tx_dropped++;
7646         return NETDEV_TX_OK;
7647 }
7648
7649 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7650 {
7651         if (enable) {
7652                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7653                                   MAC_MODE_PORT_MODE_MASK);
7654
7655                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7656
7657                 if (!tg3_flag(tp, 5705_PLUS))
7658                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7659
7660                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7661                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7662                 else
7663                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7664         } else {
7665                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7666
7667                 if (tg3_flag(tp, 5705_PLUS) ||
7668                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7669                     tg3_asic_rev(tp) == ASIC_REV_5700)
7670                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7671         }
7672
7673         tw32(MAC_MODE, tp->mac_mode);
7674         udelay(40);
7675 }
7676
7677 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7678 {
7679         u32 val, bmcr, mac_mode, ptest = 0;
7680
7681         tg3_phy_toggle_apd(tp, false);
7682         tg3_phy_toggle_automdix(tp, 0);
7683
7684         if (extlpbk && tg3_phy_set_extloopbk(tp))
7685                 return -EIO;
7686
7687         bmcr = BMCR_FULLDPLX;
7688         switch (speed) {
7689         case SPEED_10:
7690                 break;
7691         case SPEED_100:
7692                 bmcr |= BMCR_SPEED100;
7693                 break;
7694         case SPEED_1000:
7695         default:
7696                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7697                         speed = SPEED_100;
7698                         bmcr |= BMCR_SPEED100;
7699                 } else {
7700                         speed = SPEED_1000;
7701                         bmcr |= BMCR_SPEED1000;
7702                 }
7703         }
7704
7705         if (extlpbk) {
7706                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7707                         tg3_readphy(tp, MII_CTRL1000, &val);
7708                         val |= CTL1000_AS_MASTER |
7709                                CTL1000_ENABLE_MASTER;
7710                         tg3_writephy(tp, MII_CTRL1000, val);
7711                 } else {
7712                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7713                                 MII_TG3_FET_PTEST_TRIM_2;
7714                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7715                 }
7716         } else
7717                 bmcr |= BMCR_LOOPBACK;
7718
7719         tg3_writephy(tp, MII_BMCR, bmcr);
7720
7721         /* The write needs to be flushed for the FETs */
7722         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7723                 tg3_readphy(tp, MII_BMCR, &bmcr);
7724
7725         udelay(40);
7726
7727         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7728             tg3_asic_rev(tp) == ASIC_REV_5785) {
7729                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7730                              MII_TG3_FET_PTEST_FRC_TX_LINK |
7731                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
7732
7733                 /* The write needs to be flushed for the AC131 */
7734                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7735         }
7736
7737         /* Reset to prevent losing 1st rx packet intermittently */
7738         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7739             tg3_flag(tp, 5780_CLASS)) {
7740                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7741                 udelay(10);
7742                 tw32_f(MAC_RX_MODE, tp->rx_mode);
7743         }
7744
7745         mac_mode = tp->mac_mode &
7746                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7747         if (speed == SPEED_1000)
7748                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7749         else
7750                 mac_mode |= MAC_MODE_PORT_MODE_MII;
7751
7752         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
7753                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7754
7755                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7756                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
7757                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7758                         mac_mode |= MAC_MODE_LINK_POLARITY;
7759
7760                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7761                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7762         }
7763
7764         tw32(MAC_MODE, mac_mode);
7765         udelay(40);
7766
7767         return 0;
7768 }
7769
7770 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7771 {
7772         struct tg3 *tp = netdev_priv(dev);
7773
7774         if (features & NETIF_F_LOOPBACK) {
7775                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7776                         return;
7777
7778                 spin_lock_bh(&tp->lock);
7779                 tg3_mac_loopback(tp, true);
7780                 netif_carrier_on(tp->dev);
7781                 spin_unlock_bh(&tp->lock);
7782                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7783         } else {
7784                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7785                         return;
7786
7787                 spin_lock_bh(&tp->lock);
7788                 tg3_mac_loopback(tp, false);
7789                 /* Force link status check */
7790                 tg3_setup_phy(tp, 1);
7791                 spin_unlock_bh(&tp->lock);
7792                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7793         }
7794 }
7795
7796 static netdev_features_t tg3_fix_features(struct net_device *dev,
7797         netdev_features_t features)
7798 {
7799         struct tg3 *tp = netdev_priv(dev);
7800
7801         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7802                 features &= ~NETIF_F_ALL_TSO;
7803
7804         return features;
7805 }
7806
7807 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7808 {
7809         netdev_features_t changed = dev->features ^ features;
7810
7811         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7812                 tg3_set_loopback(dev, features);
7813
7814         return 0;
7815 }
7816
7817 static void tg3_rx_prodring_free(struct tg3 *tp,
7818                                  struct tg3_rx_prodring_set *tpr)
7819 {
7820         int i;
7821
7822         if (tpr != &tp->napi[0].prodring) {
7823                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7824                      i = (i + 1) & tp->rx_std_ring_mask)
7825                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7826                                         tp->rx_pkt_map_sz);
7827
7828                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7829                         for (i = tpr->rx_jmb_cons_idx;
7830                              i != tpr->rx_jmb_prod_idx;
7831                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7832                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7833                                                 TG3_RX_JMB_MAP_SZ);
7834                         }
7835                 }
7836
7837                 return;
7838         }
7839
7840         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7841                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7842                                 tp->rx_pkt_map_sz);
7843
7844         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7845                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7846                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7847                                         TG3_RX_JMB_MAP_SZ);
7848         }
7849 }
7850
7851 /* Initialize rx rings for packet processing.
7852  *
7853  * The chip has been shut down and the driver detached from
7854  * the networking, so no interrupts or new tx packets will
7855  * end up in the driver.  tp->{tx,}lock are held and thus
7856  * we may not sleep.
7857  */
7858 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7859                                  struct tg3_rx_prodring_set *tpr)
7860 {
7861         u32 i, rx_pkt_dma_sz;
7862
7863         tpr->rx_std_cons_idx = 0;
7864         tpr->rx_std_prod_idx = 0;
7865         tpr->rx_jmb_cons_idx = 0;
7866         tpr->rx_jmb_prod_idx = 0;
7867
7868         if (tpr != &tp->napi[0].prodring) {
7869                 memset(&tpr->rx_std_buffers[0], 0,
7870                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7871                 if (tpr->rx_jmb_buffers)
7872                         memset(&tpr->rx_jmb_buffers[0], 0,
7873                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7874                 goto done;
7875         }
7876
7877         /* Zero out all descriptors. */
7878         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7879
7880         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7881         if (tg3_flag(tp, 5780_CLASS) &&
7882             tp->dev->mtu > ETH_DATA_LEN)
7883                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7884         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7885
7886         /* Initialize invariants of the rings, we only set this
7887          * stuff once.  This works because the card does not
7888          * write into the rx buffer posting rings.
7889          */
7890         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7891                 struct tg3_rx_buffer_desc *rxd;
7892
7893                 rxd = &tpr->rx_std[i];
7894                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7895                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7896                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7897                                (i << RXD_OPAQUE_INDEX_SHIFT));
7898         }
7899
7900         /* Now allocate fresh SKBs for each rx ring. */
7901         for (i = 0; i < tp->rx_pending; i++) {
7902                 unsigned int frag_size;
7903
7904                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7905                                       &frag_size) < 0) {
7906                         netdev_warn(tp->dev,
7907                                     "Using a smaller RX standard ring. Only "
7908                                     "%d out of %d buffers were allocated "
7909                                     "successfully\n", i, tp->rx_pending);
7910                         if (i == 0)
7911                                 goto initfail;
7912                         tp->rx_pending = i;
7913                         break;
7914                 }
7915         }
7916
7917         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7918                 goto done;
7919
7920         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7921
7922         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7923                 goto done;
7924
7925         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7926                 struct tg3_rx_buffer_desc *rxd;
7927
7928                 rxd = &tpr->rx_jmb[i].std;
7929                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7930                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7931                                   RXD_FLAG_JUMBO;
7932                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7933                        (i << RXD_OPAQUE_INDEX_SHIFT));
7934         }
7935
7936         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7937                 unsigned int frag_size;
7938
7939                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7940                                       &frag_size) < 0) {
7941                         netdev_warn(tp->dev,
7942                                     "Using a smaller RX jumbo ring. Only %d "
7943                                     "out of %d buffers were allocated "
7944                                     "successfully\n", i, tp->rx_jumbo_pending);
7945                         if (i == 0)
7946                                 goto initfail;
7947                         tp->rx_jumbo_pending = i;
7948                         break;
7949                 }
7950         }
7951
7952 done:
7953         return 0;
7954
7955 initfail:
7956         tg3_rx_prodring_free(tp, tpr);
7957         return -ENOMEM;
7958 }
7959
7960 static void tg3_rx_prodring_fini(struct tg3 *tp,
7961                                  struct tg3_rx_prodring_set *tpr)
7962 {
7963         kfree(tpr->rx_std_buffers);
7964         tpr->rx_std_buffers = NULL;
7965         kfree(tpr->rx_jmb_buffers);
7966         tpr->rx_jmb_buffers = NULL;
7967         if (tpr->rx_std) {
7968                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7969                                   tpr->rx_std, tpr->rx_std_mapping);
7970                 tpr->rx_std = NULL;
7971         }
7972         if (tpr->rx_jmb) {
7973                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7974                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7975                 tpr->rx_jmb = NULL;
7976         }
7977 }
7978
7979 static int tg3_rx_prodring_init(struct tg3 *tp,
7980                                 struct tg3_rx_prodring_set *tpr)
7981 {
7982         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7983                                       GFP_KERNEL);
7984         if (!tpr->rx_std_buffers)
7985                 return -ENOMEM;
7986
7987         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7988                                          TG3_RX_STD_RING_BYTES(tp),
7989                                          &tpr->rx_std_mapping,
7990                                          GFP_KERNEL);
7991         if (!tpr->rx_std)
7992                 goto err_out;
7993
7994         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7995                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7996                                               GFP_KERNEL);
7997                 if (!tpr->rx_jmb_buffers)
7998                         goto err_out;
7999
8000                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8001                                                  TG3_RX_JMB_RING_BYTES(tp),
8002                                                  &tpr->rx_jmb_mapping,
8003                                                  GFP_KERNEL);
8004                 if (!tpr->rx_jmb)
8005                         goto err_out;
8006         }
8007
8008         return 0;
8009
8010 err_out:
8011         tg3_rx_prodring_fini(tp, tpr);
8012         return -ENOMEM;
8013 }
8014
8015 /* Free up pending packets in all rx/tx rings.
8016  *
8017  * The chip has been shut down and the driver detached from
8018  * the networking, so no interrupts or new tx packets will
8019  * end up in the driver.  tp->{tx,}lock is not held and we are not
8020  * in an interrupt context and thus may sleep.
8021  */
8022 static void tg3_free_rings(struct tg3 *tp)
8023 {
8024         int i, j;
8025
8026         for (j = 0; j < tp->irq_cnt; j++) {
8027                 struct tg3_napi *tnapi = &tp->napi[j];
8028
8029                 tg3_rx_prodring_free(tp, &tnapi->prodring);
8030
8031                 if (!tnapi->tx_buffers)
8032                         continue;
8033
8034                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8035                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8036
8037                         if (!skb)
8038                                 continue;
8039
8040                         tg3_tx_skb_unmap(tnapi, i,
8041                                          skb_shinfo(skb)->nr_frags - 1);
8042
8043                         dev_kfree_skb_any(skb);
8044                 }
8045                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8046         }
8047 }
8048
8049 /* Initialize tx/rx rings for packet processing.
8050  *
8051  * The chip has been shut down and the driver detached from
8052  * the networking, so no interrupts or new tx packets will
8053  * end up in the driver.  tp->{tx,}lock are held and thus
8054  * we may not sleep.
8055  */
8056 static int tg3_init_rings(struct tg3 *tp)
8057 {
8058         int i;
8059
8060         /* Free up all the SKBs. */
8061         tg3_free_rings(tp);
8062
8063         for (i = 0; i < tp->irq_cnt; i++) {
8064                 struct tg3_napi *tnapi = &tp->napi[i];
8065
8066                 tnapi->last_tag = 0;
8067                 tnapi->last_irq_tag = 0;
8068                 tnapi->hw_status->status = 0;
8069                 tnapi->hw_status->status_tag = 0;
8070                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8071
8072                 tnapi->tx_prod = 0;
8073                 tnapi->tx_cons = 0;
8074                 if (tnapi->tx_ring)
8075                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8076
8077                 tnapi->rx_rcb_ptr = 0;
8078                 if (tnapi->rx_rcb)
8079                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8080
8081                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8082                         tg3_free_rings(tp);
8083                         return -ENOMEM;
8084                 }
8085         }
8086
8087         return 0;
8088 }
8089
8090 static void tg3_mem_tx_release(struct tg3 *tp)
8091 {
8092         int i;
8093
8094         for (i = 0; i < tp->irq_max; i++) {
8095                 struct tg3_napi *tnapi = &tp->napi[i];
8096
8097                 if (tnapi->tx_ring) {
8098                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8099                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
8100                         tnapi->tx_ring = NULL;
8101                 }
8102
8103                 kfree(tnapi->tx_buffers);
8104                 tnapi->tx_buffers = NULL;
8105         }
8106 }
8107
8108 static int tg3_mem_tx_acquire(struct tg3 *tp)
8109 {
8110         int i;
8111         struct tg3_napi *tnapi = &tp->napi[0];
8112
8113         /* If multivector TSS is enabled, vector 0 does not handle
8114          * tx interrupts.  Don't allocate any resources for it.
8115          */
8116         if (tg3_flag(tp, ENABLE_TSS))
8117                 tnapi++;
8118
8119         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8120                 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8121                                             TG3_TX_RING_SIZE, GFP_KERNEL);
8122                 if (!tnapi->tx_buffers)
8123                         goto err_out;
8124
8125                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8126                                                     TG3_TX_RING_BYTES,
8127                                                     &tnapi->tx_desc_mapping,
8128                                                     GFP_KERNEL);
8129                 if (!tnapi->tx_ring)
8130                         goto err_out;
8131         }
8132
8133         return 0;
8134
8135 err_out:
8136         tg3_mem_tx_release(tp);
8137         return -ENOMEM;
8138 }
8139
8140 static void tg3_mem_rx_release(struct tg3 *tp)
8141 {
8142         int i;
8143
8144         for (i = 0; i < tp->irq_max; i++) {
8145                 struct tg3_napi *tnapi = &tp->napi[i];
8146
8147                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8148
8149                 if (!tnapi->rx_rcb)
8150                         continue;
8151
8152                 dma_free_coherent(&tp->pdev->dev,
8153                                   TG3_RX_RCB_RING_BYTES(tp),
8154                                   tnapi->rx_rcb,
8155                                   tnapi->rx_rcb_mapping);
8156                 tnapi->rx_rcb = NULL;
8157         }
8158 }
8159
8160 static int tg3_mem_rx_acquire(struct tg3 *tp)
8161 {
8162         unsigned int i, limit;
8163
8164         limit = tp->rxq_cnt;
8165
8166         /* If RSS is enabled, we need a (dummy) producer ring
8167          * set on vector zero.  This is the true hw prodring.
8168          */
8169         if (tg3_flag(tp, ENABLE_RSS))
8170                 limit++;
8171
8172         for (i = 0; i < limit; i++) {
8173                 struct tg3_napi *tnapi = &tp->napi[i];
8174
8175                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8176                         goto err_out;
8177
8178                 /* If multivector RSS is enabled, vector 0
8179                  * does not handle rx or tx interrupts.
8180                  * Don't allocate any resources for it.
8181                  */
8182                 if (!i && tg3_flag(tp, ENABLE_RSS))
8183                         continue;
8184
8185                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8186                                                    TG3_RX_RCB_RING_BYTES(tp),
8187                                                    &tnapi->rx_rcb_mapping,
8188                                                    GFP_KERNEL | __GFP_ZERO);
8189                 if (!tnapi->rx_rcb)
8190                         goto err_out;
8191         }
8192
8193         return 0;
8194
8195 err_out:
8196         tg3_mem_rx_release(tp);
8197         return -ENOMEM;
8198 }
8199
8200 /*
8201  * Must not be invoked with interrupt sources disabled and
8202  * the hardware shutdown down.
8203  */
8204 static void tg3_free_consistent(struct tg3 *tp)
8205 {
8206         int i;
8207
8208         for (i = 0; i < tp->irq_cnt; i++) {
8209                 struct tg3_napi *tnapi = &tp->napi[i];
8210
8211                 if (tnapi->hw_status) {
8212                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8213                                           tnapi->hw_status,
8214                                           tnapi->status_mapping);
8215                         tnapi->hw_status = NULL;
8216                 }
8217         }
8218
8219         tg3_mem_rx_release(tp);
8220         tg3_mem_tx_release(tp);
8221
8222         if (tp->hw_stats) {
8223                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8224                                   tp->hw_stats, tp->stats_mapping);
8225                 tp->hw_stats = NULL;
8226         }
8227 }
8228
8229 /*
8230  * Must not be invoked with interrupt sources disabled and
8231  * the hardware shutdown down.  Can sleep.
8232  */
8233 static int tg3_alloc_consistent(struct tg3 *tp)
8234 {
8235         int i;
8236
8237         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8238                                           sizeof(struct tg3_hw_stats),
8239                                           &tp->stats_mapping,
8240                                           GFP_KERNEL | __GFP_ZERO);
8241         if (!tp->hw_stats)
8242                 goto err_out;
8243
8244         for (i = 0; i < tp->irq_cnt; i++) {
8245                 struct tg3_napi *tnapi = &tp->napi[i];
8246                 struct tg3_hw_status *sblk;
8247
8248                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8249                                                       TG3_HW_STATUS_SIZE,
8250                                                       &tnapi->status_mapping,
8251                                                       GFP_KERNEL | __GFP_ZERO);
8252                 if (!tnapi->hw_status)
8253                         goto err_out;
8254
8255                 sblk = tnapi->hw_status;
8256
8257                 if (tg3_flag(tp, ENABLE_RSS)) {
8258                         u16 *prodptr = NULL;
8259
8260                         /*
8261                          * When RSS is enabled, the status block format changes
8262                          * slightly.  The "rx_jumbo_consumer", "reserved",
8263                          * and "rx_mini_consumer" members get mapped to the
8264                          * other three rx return ring producer indexes.
8265                          */
8266                         switch (i) {
8267                         case 1:
8268                                 prodptr = &sblk->idx[0].rx_producer;
8269                                 break;
8270                         case 2:
8271                                 prodptr = &sblk->rx_jumbo_consumer;
8272                                 break;
8273                         case 3:
8274                                 prodptr = &sblk->reserved;
8275                                 break;
8276                         case 4:
8277                                 prodptr = &sblk->rx_mini_consumer;
8278                                 break;
8279                         }
8280                         tnapi->rx_rcb_prod_idx = prodptr;
8281                 } else {
8282                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8283                 }
8284         }
8285
8286         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8287                 goto err_out;
8288
8289         return 0;
8290
8291 err_out:
8292         tg3_free_consistent(tp);
8293         return -ENOMEM;
8294 }
8295
8296 #define MAX_WAIT_CNT 1000
8297
8298 /* To stop a block, clear the enable bit and poll till it
8299  * clears.  tp->lock is held.
8300  */
8301 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
8302 {
8303         unsigned int i;
8304         u32 val;
8305
8306         if (tg3_flag(tp, 5705_PLUS)) {
8307                 switch (ofs) {
8308                 case RCVLSC_MODE:
8309                 case DMAC_MODE:
8310                 case MBFREE_MODE:
8311                 case BUFMGR_MODE:
8312                 case MEMARB_MODE:
8313                         /* We can't enable/disable these bits of the
8314                          * 5705/5750, just say success.
8315                          */
8316                         return 0;
8317
8318                 default:
8319                         break;
8320                 }
8321         }
8322
8323         val = tr32(ofs);
8324         val &= ~enable_bit;
8325         tw32_f(ofs, val);
8326
8327         for (i = 0; i < MAX_WAIT_CNT; i++) {
8328                 udelay(100);
8329                 val = tr32(ofs);
8330                 if ((val & enable_bit) == 0)
8331                         break;
8332         }
8333
8334         if (i == MAX_WAIT_CNT && !silent) {
8335                 dev_err(&tp->pdev->dev,
8336                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8337                         ofs, enable_bit);
8338                 return -ENODEV;
8339         }
8340
8341         return 0;
8342 }
8343
8344 /* tp->lock is held. */
8345 static int tg3_abort_hw(struct tg3 *tp, int silent)
8346 {
8347         int i, err;
8348
8349         tg3_disable_ints(tp);
8350
8351         tp->rx_mode &= ~RX_MODE_ENABLE;
8352         tw32_f(MAC_RX_MODE, tp->rx_mode);
8353         udelay(10);
8354
8355         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8356         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8357         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8358         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8359         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8360         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8361
8362         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8363         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8364         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8365         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8366         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8367         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8368         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8369
8370         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8371         tw32_f(MAC_MODE, tp->mac_mode);
8372         udelay(40);
8373
8374         tp->tx_mode &= ~TX_MODE_ENABLE;
8375         tw32_f(MAC_TX_MODE, tp->tx_mode);
8376
8377         for (i = 0; i < MAX_WAIT_CNT; i++) {
8378                 udelay(100);
8379                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8380                         break;
8381         }
8382         if (i >= MAX_WAIT_CNT) {
8383                 dev_err(&tp->pdev->dev,
8384                         "%s timed out, TX_MODE_ENABLE will not clear "
8385                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8386                 err |= -ENODEV;
8387         }
8388
8389         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8390         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8391         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8392
8393         tw32(FTQ_RESET, 0xffffffff);
8394         tw32(FTQ_RESET, 0x00000000);
8395
8396         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8397         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8398
8399         for (i = 0; i < tp->irq_cnt; i++) {
8400                 struct tg3_napi *tnapi = &tp->napi[i];
8401                 if (tnapi->hw_status)
8402                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8403         }
8404
8405         return err;
8406 }
8407
8408 /* Save PCI command register before chip reset */
8409 static void tg3_save_pci_state(struct tg3 *tp)
8410 {
8411         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8412 }
8413
8414 /* Restore PCI state after chip reset */
8415 static void tg3_restore_pci_state(struct tg3 *tp)
8416 {
8417         u32 val;
8418
8419         /* Re-enable indirect register accesses. */
8420         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8421                                tp->misc_host_ctrl);
8422
8423         /* Set MAX PCI retry to zero. */
8424         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8425         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8426             tg3_flag(tp, PCIX_MODE))
8427                 val |= PCISTATE_RETRY_SAME_DMA;
8428         /* Allow reads and writes to the APE register and memory space. */
8429         if (tg3_flag(tp, ENABLE_APE))
8430                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8431                        PCISTATE_ALLOW_APE_SHMEM_WR |
8432                        PCISTATE_ALLOW_APE_PSPACE_WR;
8433         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8434
8435         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8436
8437         if (!tg3_flag(tp, PCI_EXPRESS)) {
8438                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8439                                       tp->pci_cacheline_sz);
8440                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8441                                       tp->pci_lat_timer);
8442         }
8443
8444         /* Make sure PCI-X relaxed ordering bit is clear. */
8445         if (tg3_flag(tp, PCIX_MODE)) {
8446                 u16 pcix_cmd;
8447
8448                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8449                                      &pcix_cmd);
8450                 pcix_cmd &= ~PCI_X_CMD_ERO;
8451                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8452                                       pcix_cmd);
8453         }
8454
8455         if (tg3_flag(tp, 5780_CLASS)) {
8456
8457                 /* Chip reset on 5780 will reset MSI enable bit,
8458                  * so need to restore it.
8459                  */
8460                 if (tg3_flag(tp, USING_MSI)) {
8461                         u16 ctrl;
8462
8463                         pci_read_config_word(tp->pdev,
8464                                              tp->msi_cap + PCI_MSI_FLAGS,
8465                                              &ctrl);
8466                         pci_write_config_word(tp->pdev,
8467                                               tp->msi_cap + PCI_MSI_FLAGS,
8468                                               ctrl | PCI_MSI_FLAGS_ENABLE);
8469                         val = tr32(MSGINT_MODE);
8470                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8471                 }
8472         }
8473 }
8474
8475 /* tp->lock is held. */
8476 static int tg3_chip_reset(struct tg3 *tp)
8477 {
8478         u32 val;
8479         void (*write_op)(struct tg3 *, u32, u32);
8480         int i, err;
8481
8482         tg3_nvram_lock(tp);
8483
8484         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8485
8486         /* No matching tg3_nvram_unlock() after this because
8487          * chip reset below will undo the nvram lock.
8488          */
8489         tp->nvram_lock_cnt = 0;
8490
8491         /* GRC_MISC_CFG core clock reset will clear the memory
8492          * enable bit in PCI register 4 and the MSI enable bit
8493          * on some chips, so we save relevant registers here.
8494          */
8495         tg3_save_pci_state(tp);
8496
8497         if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8498             tg3_flag(tp, 5755_PLUS))
8499                 tw32(GRC_FASTBOOT_PC, 0);
8500
8501         /*
8502          * We must avoid the readl() that normally takes place.
8503          * It locks machines, causes machine checks, and other
8504          * fun things.  So, temporarily disable the 5701
8505          * hardware workaround, while we do the reset.
8506          */
8507         write_op = tp->write32;
8508         if (write_op == tg3_write_flush_reg32)
8509                 tp->write32 = tg3_write32;
8510
8511         /* Prevent the irq handler from reading or writing PCI registers
8512          * during chip reset when the memory enable bit in the PCI command
8513          * register may be cleared.  The chip does not generate interrupt
8514          * at this time, but the irq handler may still be called due to irq
8515          * sharing or irqpoll.
8516          */
8517         tg3_flag_set(tp, CHIP_RESETTING);
8518         for (i = 0; i < tp->irq_cnt; i++) {
8519                 struct tg3_napi *tnapi = &tp->napi[i];
8520                 if (tnapi->hw_status) {
8521                         tnapi->hw_status->status = 0;
8522                         tnapi->hw_status->status_tag = 0;
8523                 }
8524                 tnapi->last_tag = 0;
8525                 tnapi->last_irq_tag = 0;
8526         }
8527         smp_mb();
8528
8529         for (i = 0; i < tp->irq_cnt; i++)
8530                 synchronize_irq(tp->napi[i].irq_vec);
8531
8532         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8533                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8534                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8535         }
8536
8537         /* do the reset */
8538         val = GRC_MISC_CFG_CORECLK_RESET;
8539
8540         if (tg3_flag(tp, PCI_EXPRESS)) {
8541                 /* Force PCIe 1.0a mode */
8542                 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8543                     !tg3_flag(tp, 57765_PLUS) &&
8544                     tr32(TG3_PCIE_PHY_TSTCTL) ==
8545                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8546                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8547
8548                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8549                         tw32(GRC_MISC_CFG, (1 << 29));
8550                         val |= (1 << 29);
8551                 }
8552         }
8553
8554         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8555                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8556                 tw32(GRC_VCPU_EXT_CTRL,
8557                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8558         }
8559
8560         /* Manage gphy power for all CPMU absent PCIe devices. */
8561         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8562                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8563
8564         tw32(GRC_MISC_CFG, val);
8565
8566         /* restore 5701 hardware bug workaround write method */
8567         tp->write32 = write_op;
8568
8569         /* Unfortunately, we have to delay before the PCI read back.
8570          * Some 575X chips even will not respond to a PCI cfg access
8571          * when the reset command is given to the chip.
8572          *
8573          * How do these hardware designers expect things to work
8574          * properly if the PCI write is posted for a long period
8575          * of time?  It is always necessary to have some method by
8576          * which a register read back can occur to push the write
8577          * out which does the reset.
8578          *
8579          * For most tg3 variants the trick below was working.
8580          * Ho hum...
8581          */
8582         udelay(120);
8583
8584         /* Flush PCI posted writes.  The normal MMIO registers
8585          * are inaccessible at this time so this is the only
8586          * way to make this reliably (actually, this is no longer
8587          * the case, see above).  I tried to use indirect
8588          * register read/write but this upset some 5701 variants.
8589          */
8590         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8591
8592         udelay(120);
8593
8594         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8595                 u16 val16;
8596
8597                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
8598                         int j;
8599                         u32 cfg_val;
8600
8601                         /* Wait for link training to complete.  */
8602                         for (j = 0; j < 5000; j++)
8603                                 udelay(100);
8604
8605                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8606                         pci_write_config_dword(tp->pdev, 0xc4,
8607                                                cfg_val | (1 << 15));
8608                 }
8609
8610                 /* Clear the "no snoop" and "relaxed ordering" bits. */
8611                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8612                 /*
8613                  * Older PCIe devices only support the 128 byte
8614                  * MPS setting.  Enforce the restriction.
8615                  */
8616                 if (!tg3_flag(tp, CPMU_PRESENT))
8617                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8618                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8619
8620                 /* Clear error status */
8621                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8622                                       PCI_EXP_DEVSTA_CED |
8623                                       PCI_EXP_DEVSTA_NFED |
8624                                       PCI_EXP_DEVSTA_FED |
8625                                       PCI_EXP_DEVSTA_URD);
8626         }
8627
8628         tg3_restore_pci_state(tp);
8629
8630         tg3_flag_clear(tp, CHIP_RESETTING);
8631         tg3_flag_clear(tp, ERROR_PROCESSED);
8632
8633         val = 0;
8634         if (tg3_flag(tp, 5780_CLASS))
8635                 val = tr32(MEMARB_MODE);
8636         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8637
8638         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
8639                 tg3_stop_fw(tp);
8640                 tw32(0x5000, 0x400);
8641         }
8642
8643         if (tg3_flag(tp, IS_SSB_CORE)) {
8644                 /*
8645                  * BCM4785: In order to avoid repercussions from using
8646                  * potentially defective internal ROM, stop the Rx RISC CPU,
8647                  * which is not required.
8648                  */
8649                 tg3_stop_fw(tp);
8650                 tg3_halt_cpu(tp, RX_CPU_BASE);
8651         }
8652
8653         tw32(GRC_MODE, tp->grc_mode);
8654
8655         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
8656                 val = tr32(0xc4);
8657
8658                 tw32(0xc4, val | (1 << 15));
8659         }
8660
8661         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8662             tg3_asic_rev(tp) == ASIC_REV_5705) {
8663                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8664                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
8665                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8666                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8667         }
8668
8669         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8670                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8671                 val = tp->mac_mode;
8672         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8673                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8674                 val = tp->mac_mode;
8675         } else
8676                 val = 0;
8677
8678         tw32_f(MAC_MODE, val);
8679         udelay(40);
8680
8681         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8682
8683         err = tg3_poll_fw(tp);
8684         if (err)
8685                 return err;
8686
8687         tg3_mdio_start(tp);
8688
8689         if (tg3_flag(tp, PCI_EXPRESS) &&
8690             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
8691             tg3_asic_rev(tp) != ASIC_REV_5785 &&
8692             !tg3_flag(tp, 57765_PLUS)) {
8693                 val = tr32(0x7c00);
8694
8695                 tw32(0x7c00, val | (1 << 25));
8696         }
8697
8698         if (tg3_asic_rev(tp) == ASIC_REV_5720) {
8699                 val = tr32(TG3_CPMU_CLCK_ORIDE);
8700                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8701         }
8702
8703         /* Reprobe ASF enable state.  */
8704         tg3_flag_clear(tp, ENABLE_ASF);
8705         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8706         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8707         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8708                 u32 nic_cfg;
8709
8710                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8711                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8712                         tg3_flag_set(tp, ENABLE_ASF);
8713                         tp->last_event_jiffies = jiffies;
8714                         if (tg3_flag(tp, 5750_PLUS))
8715                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8716                 }
8717         }
8718
8719         return 0;
8720 }
8721
8722 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8723 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8724
8725 /* tp->lock is held. */
8726 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8727 {
8728         int err;
8729
8730         tg3_stop_fw(tp);
8731
8732         tg3_write_sig_pre_reset(tp, kind);
8733
8734         tg3_abort_hw(tp, silent);
8735         err = tg3_chip_reset(tp);
8736
8737         __tg3_set_mac_addr(tp, 0);
8738
8739         tg3_write_sig_legacy(tp, kind);
8740         tg3_write_sig_post_reset(tp, kind);
8741
8742         if (tp->hw_stats) {
8743                 /* Save the stats across chip resets... */
8744                 tg3_get_nstats(tp, &tp->net_stats_prev);
8745                 tg3_get_estats(tp, &tp->estats_prev);
8746
8747                 /* And make sure the next sample is new data */
8748                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8749         }
8750
8751         if (err)
8752                 return err;
8753
8754         return 0;
8755 }
8756
8757 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8758 {
8759         struct tg3 *tp = netdev_priv(dev);
8760         struct sockaddr *addr = p;
8761         int err = 0, skip_mac_1 = 0;
8762
8763         if (!is_valid_ether_addr(addr->sa_data))
8764                 return -EADDRNOTAVAIL;
8765
8766         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8767
8768         if (!netif_running(dev))
8769                 return 0;
8770
8771         if (tg3_flag(tp, ENABLE_ASF)) {
8772                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8773
8774                 addr0_high = tr32(MAC_ADDR_0_HIGH);
8775                 addr0_low = tr32(MAC_ADDR_0_LOW);
8776                 addr1_high = tr32(MAC_ADDR_1_HIGH);
8777                 addr1_low = tr32(MAC_ADDR_1_LOW);
8778
8779                 /* Skip MAC addr 1 if ASF is using it. */
8780                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8781                     !(addr1_high == 0 && addr1_low == 0))
8782                         skip_mac_1 = 1;
8783         }
8784         spin_lock_bh(&tp->lock);
8785         __tg3_set_mac_addr(tp, skip_mac_1);
8786         spin_unlock_bh(&tp->lock);
8787
8788         return err;
8789 }
8790
8791 /* tp->lock is held. */
8792 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8793                            dma_addr_t mapping, u32 maxlen_flags,
8794                            u32 nic_addr)
8795 {
8796         tg3_write_mem(tp,
8797                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8798                       ((u64) mapping >> 32));
8799         tg3_write_mem(tp,
8800                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8801                       ((u64) mapping & 0xffffffff));
8802         tg3_write_mem(tp,
8803                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8804                        maxlen_flags);
8805
8806         if (!tg3_flag(tp, 5705_PLUS))
8807                 tg3_write_mem(tp,
8808                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8809                               nic_addr);
8810 }
8811
8812
8813 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8814 {
8815         int i = 0;
8816
8817         if (!tg3_flag(tp, ENABLE_TSS)) {
8818                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8819                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8820                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8821         } else {
8822                 tw32(HOSTCC_TXCOL_TICKS, 0);
8823                 tw32(HOSTCC_TXMAX_FRAMES, 0);
8824                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8825
8826                 for (; i < tp->txq_cnt; i++) {
8827                         u32 reg;
8828
8829                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8830                         tw32(reg, ec->tx_coalesce_usecs);
8831                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8832                         tw32(reg, ec->tx_max_coalesced_frames);
8833                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8834                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8835                 }
8836         }
8837
8838         for (; i < tp->irq_max - 1; i++) {
8839                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8840                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8841                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8842         }
8843 }
8844
8845 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8846 {
8847         int i = 0;
8848         u32 limit = tp->rxq_cnt;
8849
8850         if (!tg3_flag(tp, ENABLE_RSS)) {
8851                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8852                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8853                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8854                 limit--;
8855         } else {
8856                 tw32(HOSTCC_RXCOL_TICKS, 0);
8857                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8858                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8859         }
8860
8861         for (; i < limit; i++) {
8862                 u32 reg;
8863
8864                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8865                 tw32(reg, ec->rx_coalesce_usecs);
8866                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8867                 tw32(reg, ec->rx_max_coalesced_frames);
8868                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8869                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8870         }
8871
8872         for (; i < tp->irq_max - 1; i++) {
8873                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8874                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8875                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8876         }
8877 }
8878
8879 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8880 {
8881         tg3_coal_tx_init(tp, ec);
8882         tg3_coal_rx_init(tp, ec);
8883
8884         if (!tg3_flag(tp, 5705_PLUS)) {
8885                 u32 val = ec->stats_block_coalesce_usecs;
8886
8887                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8888                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8889
8890                 if (!tp->link_up)
8891                         val = 0;
8892
8893                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8894         }
8895 }
8896
8897 /* tp->lock is held. */
8898 static void tg3_rings_reset(struct tg3 *tp)
8899 {
8900         int i;
8901         u32 stblk, txrcb, rxrcb, limit;
8902         struct tg3_napi *tnapi = &tp->napi[0];
8903
8904         /* Disable all transmit rings but the first. */
8905         if (!tg3_flag(tp, 5705_PLUS))
8906                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8907         else if (tg3_flag(tp, 5717_PLUS))
8908                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8909         else if (tg3_flag(tp, 57765_CLASS) ||
8910                  tg3_asic_rev(tp) == ASIC_REV_5762)
8911                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8912         else
8913                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8914
8915         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8916              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8917                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8918                               BDINFO_FLAGS_DISABLED);
8919
8920
8921         /* Disable all receive return rings but the first. */
8922         if (tg3_flag(tp, 5717_PLUS))
8923                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8924         else if (!tg3_flag(tp, 5705_PLUS))
8925                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8926         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
8927                  tg3_asic_rev(tp) == ASIC_REV_5762 ||
8928                  tg3_flag(tp, 57765_CLASS))
8929                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8930         else
8931                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8932
8933         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8934              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8935                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8936                               BDINFO_FLAGS_DISABLED);
8937
8938         /* Disable interrupts */
8939         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8940         tp->napi[0].chk_msi_cnt = 0;
8941         tp->napi[0].last_rx_cons = 0;
8942         tp->napi[0].last_tx_cons = 0;
8943
8944         /* Zero mailbox registers. */
8945         if (tg3_flag(tp, SUPPORT_MSIX)) {
8946                 for (i = 1; i < tp->irq_max; i++) {
8947                         tp->napi[i].tx_prod = 0;
8948                         tp->napi[i].tx_cons = 0;
8949                         if (tg3_flag(tp, ENABLE_TSS))
8950                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8951                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8952                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8953                         tp->napi[i].chk_msi_cnt = 0;
8954                         tp->napi[i].last_rx_cons = 0;
8955                         tp->napi[i].last_tx_cons = 0;
8956                 }
8957                 if (!tg3_flag(tp, ENABLE_TSS))
8958                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8959         } else {
8960                 tp->napi[0].tx_prod = 0;
8961                 tp->napi[0].tx_cons = 0;
8962                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8963                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8964         }
8965
8966         /* Make sure the NIC-based send BD rings are disabled. */
8967         if (!tg3_flag(tp, 5705_PLUS)) {
8968                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8969                 for (i = 0; i < 16; i++)
8970                         tw32_tx_mbox(mbox + i * 8, 0);
8971         }
8972
8973         txrcb = NIC_SRAM_SEND_RCB;
8974         rxrcb = NIC_SRAM_RCV_RET_RCB;
8975
8976         /* Clear status block in ram. */
8977         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8978
8979         /* Set status block DMA address */
8980         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8981              ((u64) tnapi->status_mapping >> 32));
8982         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8983              ((u64) tnapi->status_mapping & 0xffffffff));
8984
8985         if (tnapi->tx_ring) {
8986                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8987                                (TG3_TX_RING_SIZE <<
8988                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8989                                NIC_SRAM_TX_BUFFER_DESC);
8990                 txrcb += TG3_BDINFO_SIZE;
8991         }
8992
8993         if (tnapi->rx_rcb) {
8994                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8995                                (tp->rx_ret_ring_mask + 1) <<
8996                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8997                 rxrcb += TG3_BDINFO_SIZE;
8998         }
8999
9000         stblk = HOSTCC_STATBLCK_RING1;
9001
9002         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9003                 u64 mapping = (u64)tnapi->status_mapping;
9004                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9005                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9006
9007                 /* Clear status block in ram. */
9008                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9009
9010                 if (tnapi->tx_ring) {
9011                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9012                                        (TG3_TX_RING_SIZE <<
9013                                         BDINFO_FLAGS_MAXLEN_SHIFT),
9014                                        NIC_SRAM_TX_BUFFER_DESC);
9015                         txrcb += TG3_BDINFO_SIZE;
9016                 }
9017
9018                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9019                                ((tp->rx_ret_ring_mask + 1) <<
9020                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
9021
9022                 stblk += 8;
9023                 rxrcb += TG3_BDINFO_SIZE;
9024         }
9025 }
9026
9027 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9028 {
9029         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9030
9031         if (!tg3_flag(tp, 5750_PLUS) ||
9032             tg3_flag(tp, 5780_CLASS) ||
9033             tg3_asic_rev(tp) == ASIC_REV_5750 ||
9034             tg3_asic_rev(tp) == ASIC_REV_5752 ||
9035             tg3_flag(tp, 57765_PLUS))
9036                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9037         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9038                  tg3_asic_rev(tp) == ASIC_REV_5787)
9039                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9040         else
9041                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9042
9043         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9044         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9045
9046         val = min(nic_rep_thresh, host_rep_thresh);
9047         tw32(RCVBDI_STD_THRESH, val);
9048
9049         if (tg3_flag(tp, 57765_PLUS))
9050                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9051
9052         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9053                 return;
9054
9055         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9056
9057         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9058
9059         val = min(bdcache_maxcnt / 2, host_rep_thresh);
9060         tw32(RCVBDI_JUMBO_THRESH, val);
9061
9062         if (tg3_flag(tp, 57765_PLUS))
9063                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9064 }
9065
9066 static inline u32 calc_crc(unsigned char *buf, int len)
9067 {
9068         u32 reg;
9069         u32 tmp;
9070         int j, k;
9071
9072         reg = 0xffffffff;
9073
9074         for (j = 0; j < len; j++) {
9075                 reg ^= buf[j];
9076
9077                 for (k = 0; k < 8; k++) {
9078                         tmp = reg & 0x01;
9079
9080                         reg >>= 1;
9081
9082                         if (tmp)
9083                                 reg ^= 0xedb88320;
9084                 }
9085         }
9086
9087         return ~reg;
9088 }
9089
9090 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9091 {
9092         /* accept or reject all multicast frames */
9093         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9094         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9095         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9096         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9097 }
9098
9099 static void __tg3_set_rx_mode(struct net_device *dev)
9100 {
9101         struct tg3 *tp = netdev_priv(dev);
9102         u32 rx_mode;
9103
9104         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9105                                   RX_MODE_KEEP_VLAN_TAG);
9106
9107 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9108         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9109          * flag clear.
9110          */
9111         if (!tg3_flag(tp, ENABLE_ASF))
9112                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9113 #endif
9114
9115         if (dev->flags & IFF_PROMISC) {
9116                 /* Promiscuous mode. */
9117                 rx_mode |= RX_MODE_PROMISC;
9118         } else if (dev->flags & IFF_ALLMULTI) {
9119                 /* Accept all multicast. */
9120                 tg3_set_multi(tp, 1);
9121         } else if (netdev_mc_empty(dev)) {
9122                 /* Reject all multicast. */
9123                 tg3_set_multi(tp, 0);
9124         } else {
9125                 /* Accept one or more multicast(s). */
9126                 struct netdev_hw_addr *ha;
9127                 u32 mc_filter[4] = { 0, };
9128                 u32 regidx;
9129                 u32 bit;
9130                 u32 crc;
9131
9132                 netdev_for_each_mc_addr(ha, dev) {
9133                         crc = calc_crc(ha->addr, ETH_ALEN);
9134                         bit = ~crc & 0x7f;
9135                         regidx = (bit & 0x60) >> 5;
9136                         bit &= 0x1f;
9137                         mc_filter[regidx] |= (1 << bit);
9138                 }
9139
9140                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9141                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9142                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9143                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9144         }
9145
9146         if (rx_mode != tp->rx_mode) {
9147                 tp->rx_mode = rx_mode;
9148                 tw32_f(MAC_RX_MODE, rx_mode);
9149                 udelay(10);
9150         }
9151 }
9152
9153 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9154 {
9155         int i;
9156
9157         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9158                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9159 }
9160
9161 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9162 {
9163         int i;
9164
9165         if (!tg3_flag(tp, SUPPORT_MSIX))
9166                 return;
9167
9168         if (tp->rxq_cnt == 1) {
9169                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9170                 return;
9171         }
9172
9173         /* Validate table against current IRQ count */
9174         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9175                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9176                         break;
9177         }
9178
9179         if (i != TG3_RSS_INDIR_TBL_SIZE)
9180                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9181 }
9182
9183 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9184 {
9185         int i = 0;
9186         u32 reg = MAC_RSS_INDIR_TBL_0;
9187
9188         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9189                 u32 val = tp->rss_ind_tbl[i];
9190                 i++;
9191                 for (; i % 8; i++) {
9192                         val <<= 4;
9193                         val |= tp->rss_ind_tbl[i];
9194                 }
9195                 tw32(reg, val);
9196                 reg += 4;
9197         }
9198 }
9199
9200 /* tp->lock is held. */
9201 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
9202 {
9203         u32 val, rdmac_mode;
9204         int i, err, limit;
9205         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9206
9207         tg3_disable_ints(tp);
9208
9209         tg3_stop_fw(tp);
9210
9211         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9212
9213         if (tg3_flag(tp, INIT_COMPLETE))
9214                 tg3_abort_hw(tp, 1);
9215
9216         /* Enable MAC control of LPI */
9217         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
9218                 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
9219                       TG3_CPMU_EEE_LNKIDL_UART_IDL;
9220                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9221                         val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
9222
9223                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
9224
9225                 tw32_f(TG3_CPMU_EEE_CTRL,
9226                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9227
9228                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9229                       TG3_CPMU_EEEMD_LPI_IN_TX |
9230                       TG3_CPMU_EEEMD_LPI_IN_RX |
9231                       TG3_CPMU_EEEMD_EEE_ENABLE;
9232
9233                 if (tg3_asic_rev(tp) != ASIC_REV_5717)
9234                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9235
9236                 if (tg3_flag(tp, ENABLE_APE))
9237                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9238
9239                 tw32_f(TG3_CPMU_EEE_MODE, val);
9240
9241                 tw32_f(TG3_CPMU_EEE_DBTMR1,
9242                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9243                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9244
9245                 tw32_f(TG3_CPMU_EEE_DBTMR2,
9246                        TG3_CPMU_DBTMR2_APE_TX_2047US |
9247                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9248         }
9249
9250         if (reset_phy)
9251                 tg3_phy_reset(tp);
9252
9253         err = tg3_chip_reset(tp);
9254         if (err)
9255                 return err;
9256
9257         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9258
9259         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9260                 val = tr32(TG3_CPMU_CTRL);
9261                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9262                 tw32(TG3_CPMU_CTRL, val);
9263
9264                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9265                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9266                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9267                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9268
9269                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9270                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9271                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9272                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9273
9274                 val = tr32(TG3_CPMU_HST_ACC);
9275                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9276                 val |= CPMU_HST_ACC_MACCLK_6_25;
9277                 tw32(TG3_CPMU_HST_ACC, val);
9278         }
9279
9280         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9281                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9282                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9283                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9284                 tw32(PCIE_PWR_MGMT_THRESH, val);
9285
9286                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9287                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9288
9289                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9290
9291                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9292                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9293         }
9294
9295         if (tg3_flag(tp, L1PLLPD_EN)) {
9296                 u32 grc_mode = tr32(GRC_MODE);
9297
9298                 /* Access the lower 1K of PL PCIE block registers. */
9299                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9300                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9301
9302                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9303                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9304                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9305
9306                 tw32(GRC_MODE, grc_mode);
9307         }
9308
9309         if (tg3_flag(tp, 57765_CLASS)) {
9310                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9311                         u32 grc_mode = tr32(GRC_MODE);
9312
9313                         /* Access the lower 1K of PL PCIE block registers. */
9314                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9315                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9316
9317                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9318                                    TG3_PCIE_PL_LO_PHYCTL5);
9319                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9320                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9321
9322                         tw32(GRC_MODE, grc_mode);
9323                 }
9324
9325                 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9326                         u32 grc_mode;
9327
9328                         /* Fix transmit hangs */
9329                         val = tr32(TG3_CPMU_PADRNG_CTL);
9330                         val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9331                         tw32(TG3_CPMU_PADRNG_CTL, val);
9332
9333                         grc_mode = tr32(GRC_MODE);
9334
9335                         /* Access the lower 1K of DL PCIE block registers. */
9336                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9337                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9338
9339                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9340                                    TG3_PCIE_DL_LO_FTSMAX);
9341                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9342                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9343                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9344
9345                         tw32(GRC_MODE, grc_mode);
9346                 }
9347
9348                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9349                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9350                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9351                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9352         }
9353
9354         /* This works around an issue with Athlon chipsets on
9355          * B3 tigon3 silicon.  This bit has no effect on any
9356          * other revision.  But do not set this on PCI Express
9357          * chips and don't even touch the clocks if the CPMU is present.
9358          */
9359         if (!tg3_flag(tp, CPMU_PRESENT)) {
9360                 if (!tg3_flag(tp, PCI_EXPRESS))
9361                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9362                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9363         }
9364
9365         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9366             tg3_flag(tp, PCIX_MODE)) {
9367                 val = tr32(TG3PCI_PCISTATE);
9368                 val |= PCISTATE_RETRY_SAME_DMA;
9369                 tw32(TG3PCI_PCISTATE, val);
9370         }
9371
9372         if (tg3_flag(tp, ENABLE_APE)) {
9373                 /* Allow reads and writes to the
9374                  * APE register and memory space.
9375                  */
9376                 val = tr32(TG3PCI_PCISTATE);
9377                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9378                        PCISTATE_ALLOW_APE_SHMEM_WR |
9379                        PCISTATE_ALLOW_APE_PSPACE_WR;
9380                 tw32(TG3PCI_PCISTATE, val);
9381         }
9382
9383         if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9384                 /* Enable some hw fixes.  */
9385                 val = tr32(TG3PCI_MSI_DATA);
9386                 val |= (1 << 26) | (1 << 28) | (1 << 29);
9387                 tw32(TG3PCI_MSI_DATA, val);
9388         }
9389
9390         /* Descriptor ring init may make accesses to the
9391          * NIC SRAM area to setup the TX descriptors, so we
9392          * can only do this after the hardware has been
9393          * successfully reset.
9394          */
9395         err = tg3_init_rings(tp);
9396         if (err)
9397                 return err;
9398
9399         if (tg3_flag(tp, 57765_PLUS)) {
9400                 val = tr32(TG3PCI_DMA_RW_CTRL) &
9401                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9402                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9403                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9404                 if (!tg3_flag(tp, 57765_CLASS) &&
9405                     tg3_asic_rev(tp) != ASIC_REV_5717 &&
9406                     tg3_asic_rev(tp) != ASIC_REV_5762)
9407                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
9408                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9409         } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9410                    tg3_asic_rev(tp) != ASIC_REV_5761) {
9411                 /* This value is determined during the probe time DMA
9412                  * engine test, tg3_test_dma.
9413                  */
9414                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9415         }
9416
9417         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9418                           GRC_MODE_4X_NIC_SEND_RINGS |
9419                           GRC_MODE_NO_TX_PHDR_CSUM |
9420                           GRC_MODE_NO_RX_PHDR_CSUM);
9421         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9422
9423         /* Pseudo-header checksum is done by hardware logic and not
9424          * the offload processers, so make the chip do the pseudo-
9425          * header checksums on receive.  For transmit it is more
9426          * convenient to do the pseudo-header checksum in software
9427          * as Linux does that on transmit for us in all cases.
9428          */
9429         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9430
9431         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9432         if (tp->rxptpctl)
9433                 tw32(TG3_RX_PTP_CTL,
9434                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9435
9436         if (tg3_flag(tp, PTP_CAPABLE))
9437                 val |= GRC_MODE_TIME_SYNC_ENABLE;
9438
9439         tw32(GRC_MODE, tp->grc_mode | val);
9440
9441         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
9442         val = tr32(GRC_MISC_CFG);
9443         val &= ~0xff;
9444         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9445         tw32(GRC_MISC_CFG, val);
9446
9447         /* Initialize MBUF/DESC pool. */
9448         if (tg3_flag(tp, 5750_PLUS)) {
9449                 /* Do nothing.  */
9450         } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9451                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9452                 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9453                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9454                 else
9455                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9456                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9457                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9458         } else if (tg3_flag(tp, TSO_CAPABLE)) {
9459                 int fw_len;
9460
9461                 fw_len = tp->fw_len;
9462                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9463                 tw32(BUFMGR_MB_POOL_ADDR,
9464                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9465                 tw32(BUFMGR_MB_POOL_SIZE,
9466                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9467         }
9468
9469         if (tp->dev->mtu <= ETH_DATA_LEN) {
9470                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9471                      tp->bufmgr_config.mbuf_read_dma_low_water);
9472                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9473                      tp->bufmgr_config.mbuf_mac_rx_low_water);
9474                 tw32(BUFMGR_MB_HIGH_WATER,
9475                      tp->bufmgr_config.mbuf_high_water);
9476         } else {
9477                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9478                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9479                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9480                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9481                 tw32(BUFMGR_MB_HIGH_WATER,
9482                      tp->bufmgr_config.mbuf_high_water_jumbo);
9483         }
9484         tw32(BUFMGR_DMA_LOW_WATER,
9485              tp->bufmgr_config.dma_low_water);
9486         tw32(BUFMGR_DMA_HIGH_WATER,
9487              tp->bufmgr_config.dma_high_water);
9488
9489         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9490         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9491                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9492         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9493             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9494             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9495                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9496         tw32(BUFMGR_MODE, val);
9497         for (i = 0; i < 2000; i++) {
9498                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9499                         break;
9500                 udelay(10);
9501         }
9502         if (i >= 2000) {
9503                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9504                 return -ENODEV;
9505         }
9506
9507         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9508                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9509
9510         tg3_setup_rxbd_thresholds(tp);
9511
9512         /* Initialize TG3_BDINFO's at:
9513          *  RCVDBDI_STD_BD:     standard eth size rx ring
9514          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
9515          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
9516          *
9517          * like so:
9518          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
9519          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
9520          *                              ring attribute flags
9521          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
9522          *
9523          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9524          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9525          *
9526          * The size of each ring is fixed in the firmware, but the location is
9527          * configurable.
9528          */
9529         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9530              ((u64) tpr->rx_std_mapping >> 32));
9531         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9532              ((u64) tpr->rx_std_mapping & 0xffffffff));
9533         if (!tg3_flag(tp, 5717_PLUS))
9534                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9535                      NIC_SRAM_RX_BUFFER_DESC);
9536
9537         /* Disable the mini ring */
9538         if (!tg3_flag(tp, 5705_PLUS))
9539                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9540                      BDINFO_FLAGS_DISABLED);
9541
9542         /* Program the jumbo buffer descriptor ring control
9543          * blocks on those devices that have them.
9544          */
9545         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9546             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9547
9548                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9549                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9550                              ((u64) tpr->rx_jmb_mapping >> 32));
9551                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9552                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9553                         val = TG3_RX_JMB_RING_SIZE(tp) <<
9554                               BDINFO_FLAGS_MAXLEN_SHIFT;
9555                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9556                              val | BDINFO_FLAGS_USE_EXT_RECV);
9557                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9558                             tg3_flag(tp, 57765_CLASS) ||
9559                             tg3_asic_rev(tp) == ASIC_REV_5762)
9560                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9561                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9562                 } else {
9563                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9564                              BDINFO_FLAGS_DISABLED);
9565                 }
9566
9567                 if (tg3_flag(tp, 57765_PLUS)) {
9568                         val = TG3_RX_STD_RING_SIZE(tp);
9569                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9570                         val |= (TG3_RX_STD_DMA_SZ << 2);
9571                 } else
9572                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9573         } else
9574                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9575
9576         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9577
9578         tpr->rx_std_prod_idx = tp->rx_pending;
9579         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9580
9581         tpr->rx_jmb_prod_idx =
9582                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9583         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9584
9585         tg3_rings_reset(tp);
9586
9587         /* Initialize MAC address and backoff seed. */
9588         __tg3_set_mac_addr(tp, 0);
9589
9590         /* MTU + ethernet header + FCS + optional VLAN tag */
9591         tw32(MAC_RX_MTU_SIZE,
9592              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9593
9594         /* The slot time is changed by tg3_setup_phy if we
9595          * run at gigabit with half duplex.
9596          */
9597         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9598               (6 << TX_LENGTHS_IPG_SHIFT) |
9599               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9600
9601         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9602             tg3_asic_rev(tp) == ASIC_REV_5762)
9603                 val |= tr32(MAC_TX_LENGTHS) &
9604                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
9605                         TX_LENGTHS_CNT_DWN_VAL_MSK);
9606
9607         tw32(MAC_TX_LENGTHS, val);
9608
9609         /* Receive rules. */
9610         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9611         tw32(RCVLPC_CONFIG, 0x0181);
9612
9613         /* Calculate RDMAC_MODE setting early, we need it to determine
9614          * the RCVLPC_STATE_ENABLE mask.
9615          */
9616         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9617                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9618                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9619                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9620                       RDMAC_MODE_LNGREAD_ENAB);
9621
9622         if (tg3_asic_rev(tp) == ASIC_REV_5717)
9623                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9624
9625         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
9626             tg3_asic_rev(tp) == ASIC_REV_5785 ||
9627             tg3_asic_rev(tp) == ASIC_REV_57780)
9628                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9629                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9630                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9631
9632         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9633             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9634                 if (tg3_flag(tp, TSO_CAPABLE) &&
9635                     tg3_asic_rev(tp) == ASIC_REV_5705) {
9636                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9637                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9638                            !tg3_flag(tp, IS_5788)) {
9639                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9640                 }
9641         }
9642
9643         if (tg3_flag(tp, PCI_EXPRESS))
9644                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9645
9646         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9647                 tp->dma_limit = 0;
9648                 if (tp->dev->mtu <= ETH_DATA_LEN) {
9649                         rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
9650                         tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
9651                 }
9652         }
9653
9654         if (tg3_flag(tp, HW_TSO_1) ||
9655             tg3_flag(tp, HW_TSO_2) ||
9656             tg3_flag(tp, HW_TSO_3))
9657                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9658
9659         if (tg3_flag(tp, 57765_PLUS) ||
9660             tg3_asic_rev(tp) == ASIC_REV_5785 ||
9661             tg3_asic_rev(tp) == ASIC_REV_57780)
9662                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9663
9664         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9665             tg3_asic_rev(tp) == ASIC_REV_5762)
9666                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9667
9668         if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
9669             tg3_asic_rev(tp) == ASIC_REV_5784 ||
9670             tg3_asic_rev(tp) == ASIC_REV_5785 ||
9671             tg3_asic_rev(tp) == ASIC_REV_57780 ||
9672             tg3_flag(tp, 57765_PLUS)) {
9673                 u32 tgtreg;
9674
9675                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9676                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
9677                 else
9678                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
9679
9680                 val = tr32(tgtreg);
9681                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9682                     tg3_asic_rev(tp) == ASIC_REV_5762) {
9683                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9684                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9685                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9686                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9687                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9688                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9689                 }
9690                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9691         }
9692
9693         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
9694             tg3_asic_rev(tp) == ASIC_REV_5720 ||
9695             tg3_asic_rev(tp) == ASIC_REV_5762) {
9696                 u32 tgtreg;
9697
9698                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9699                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
9700                 else
9701                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
9702
9703                 val = tr32(tgtreg);
9704                 tw32(tgtreg, val |
9705                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9706                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9707         }
9708
9709         /* Receive/send statistics. */
9710         if (tg3_flag(tp, 5750_PLUS)) {
9711                 val = tr32(RCVLPC_STATS_ENABLE);
9712                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9713                 tw32(RCVLPC_STATS_ENABLE, val);
9714         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9715                    tg3_flag(tp, TSO_CAPABLE)) {
9716                 val = tr32(RCVLPC_STATS_ENABLE);
9717                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9718                 tw32(RCVLPC_STATS_ENABLE, val);
9719         } else {
9720                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9721         }
9722         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9723         tw32(SNDDATAI_STATSENAB, 0xffffff);
9724         tw32(SNDDATAI_STATSCTRL,
9725              (SNDDATAI_SCTRL_ENABLE |
9726               SNDDATAI_SCTRL_FASTUPD));
9727
9728         /* Setup host coalescing engine. */
9729         tw32(HOSTCC_MODE, 0);
9730         for (i = 0; i < 2000; i++) {
9731                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9732                         break;
9733                 udelay(10);
9734         }
9735
9736         __tg3_set_coalesce(tp, &tp->coal);
9737
9738         if (!tg3_flag(tp, 5705_PLUS)) {
9739                 /* Status/statistics block address.  See tg3_timer,
9740                  * the tg3_periodic_fetch_stats call there, and
9741                  * tg3_get_stats to see how this works for 5705/5750 chips.
9742                  */
9743                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9744                      ((u64) tp->stats_mapping >> 32));
9745                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9746                      ((u64) tp->stats_mapping & 0xffffffff));
9747                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9748
9749                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9750
9751                 /* Clear statistics and status block memory areas */
9752                 for (i = NIC_SRAM_STATS_BLK;
9753                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9754                      i += sizeof(u32)) {
9755                         tg3_write_mem(tp, i, 0);
9756                         udelay(40);
9757                 }
9758         }
9759
9760         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9761
9762         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9763         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9764         if (!tg3_flag(tp, 5705_PLUS))
9765                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9766
9767         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9768                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9769                 /* reset to prevent losing 1st rx packet intermittently */
9770                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9771                 udelay(10);
9772         }
9773
9774         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9775                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9776                         MAC_MODE_FHDE_ENABLE;
9777         if (tg3_flag(tp, ENABLE_APE))
9778                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9779         if (!tg3_flag(tp, 5705_PLUS) &&
9780             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9781             tg3_asic_rev(tp) != ASIC_REV_5700)
9782                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9783         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9784         udelay(40);
9785
9786         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9787          * If TG3_FLAG_IS_NIC is zero, we should read the
9788          * register to preserve the GPIO settings for LOMs. The GPIOs,
9789          * whether used as inputs or outputs, are set by boot code after
9790          * reset.
9791          */
9792         if (!tg3_flag(tp, IS_NIC)) {
9793                 u32 gpio_mask;
9794
9795                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9796                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9797                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9798
9799                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
9800                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9801                                      GRC_LCLCTRL_GPIO_OUTPUT3;
9802
9803                 if (tg3_asic_rev(tp) == ASIC_REV_5755)
9804                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9805
9806                 tp->grc_local_ctrl &= ~gpio_mask;
9807                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9808
9809                 /* GPIO1 must be driven high for eeprom write protect */
9810                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9811                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9812                                                GRC_LCLCTRL_GPIO_OUTPUT1);
9813         }
9814         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9815         udelay(100);
9816
9817         if (tg3_flag(tp, USING_MSIX)) {
9818                 val = tr32(MSGINT_MODE);
9819                 val |= MSGINT_MODE_ENABLE;
9820                 if (tp->irq_cnt > 1)
9821                         val |= MSGINT_MODE_MULTIVEC_EN;
9822                 if (!tg3_flag(tp, 1SHOT_MSI))
9823                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9824                 tw32(MSGINT_MODE, val);
9825         }
9826
9827         if (!tg3_flag(tp, 5705_PLUS)) {
9828                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9829                 udelay(40);
9830         }
9831
9832         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9833                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9834                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9835                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9836                WDMAC_MODE_LNGREAD_ENAB);
9837
9838         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9839             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9840                 if (tg3_flag(tp, TSO_CAPABLE) &&
9841                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
9842                      tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
9843                         /* nothing */
9844                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9845                            !tg3_flag(tp, IS_5788)) {
9846                         val |= WDMAC_MODE_RX_ACCEL;
9847                 }
9848         }
9849
9850         /* Enable host coalescing bug fix */
9851         if (tg3_flag(tp, 5755_PLUS))
9852                 val |= WDMAC_MODE_STATUS_TAG_FIX;
9853
9854         if (tg3_asic_rev(tp) == ASIC_REV_5785)
9855                 val |= WDMAC_MODE_BURST_ALL_DATA;
9856
9857         tw32_f(WDMAC_MODE, val);
9858         udelay(40);
9859
9860         if (tg3_flag(tp, PCIX_MODE)) {
9861                 u16 pcix_cmd;
9862
9863                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9864                                      &pcix_cmd);
9865                 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
9866                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9867                         pcix_cmd |= PCI_X_CMD_READ_2K;
9868                 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
9869                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9870                         pcix_cmd |= PCI_X_CMD_READ_2K;
9871                 }
9872                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9873                                       pcix_cmd);
9874         }
9875
9876         tw32_f(RDMAC_MODE, rdmac_mode);
9877         udelay(40);
9878
9879         if (tg3_asic_rev(tp) == ASIC_REV_5719) {
9880                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9881                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9882                                 break;
9883                 }
9884                 if (i < TG3_NUM_RDMA_CHANNELS) {
9885                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9886                         val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9887                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9888                         tg3_flag_set(tp, 5719_RDMA_BUG);
9889                 }
9890         }
9891
9892         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9893         if (!tg3_flag(tp, 5705_PLUS))
9894                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9895
9896         if (tg3_asic_rev(tp) == ASIC_REV_5761)
9897                 tw32(SNDDATAC_MODE,
9898                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9899         else
9900                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9901
9902         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9903         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9904         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9905         if (tg3_flag(tp, LRG_PROD_RING_CAP))
9906                 val |= RCVDBDI_MODE_LRG_RING_SZ;
9907         tw32(RCVDBDI_MODE, val);
9908         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9909         if (tg3_flag(tp, HW_TSO_1) ||
9910             tg3_flag(tp, HW_TSO_2) ||
9911             tg3_flag(tp, HW_TSO_3))
9912                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9913         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9914         if (tg3_flag(tp, ENABLE_TSS))
9915                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9916         tw32(SNDBDI_MODE, val);
9917         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9918
9919         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
9920                 err = tg3_load_5701_a0_firmware_fix(tp);
9921                 if (err)
9922                         return err;
9923         }
9924
9925         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9926                 /* Ignore any errors for the firmware download. If download
9927                  * fails, the device will operate with EEE disabled
9928                  */
9929                 tg3_load_57766_firmware(tp);
9930         }
9931
9932         if (tg3_flag(tp, TSO_CAPABLE)) {
9933                 err = tg3_load_tso_firmware(tp);
9934                 if (err)
9935                         return err;
9936         }
9937
9938         tp->tx_mode = TX_MODE_ENABLE;
9939
9940         if (tg3_flag(tp, 5755_PLUS) ||
9941             tg3_asic_rev(tp) == ASIC_REV_5906)
9942                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9943
9944         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9945             tg3_asic_rev(tp) == ASIC_REV_5762) {
9946                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9947                 tp->tx_mode &= ~val;
9948                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9949         }
9950
9951         tw32_f(MAC_TX_MODE, tp->tx_mode);
9952         udelay(100);
9953
9954         if (tg3_flag(tp, ENABLE_RSS)) {
9955                 tg3_rss_write_indir_tbl(tp);
9956
9957                 /* Setup the "secret" hash key. */
9958                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9959                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9960                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9961                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9962                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9963                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9964                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9965                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9966                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9967                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9968         }
9969
9970         tp->rx_mode = RX_MODE_ENABLE;
9971         if (tg3_flag(tp, 5755_PLUS))
9972                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9973
9974         if (tg3_flag(tp, ENABLE_RSS))
9975                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9976                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
9977                                RX_MODE_RSS_IPV6_HASH_EN |
9978                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
9979                                RX_MODE_RSS_IPV4_HASH_EN |
9980                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
9981
9982         tw32_f(MAC_RX_MODE, tp->rx_mode);
9983         udelay(10);
9984
9985         tw32(MAC_LED_CTRL, tp->led_ctrl);
9986
9987         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9988         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9989                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9990                 udelay(10);
9991         }
9992         tw32_f(MAC_RX_MODE, tp->rx_mode);
9993         udelay(10);
9994
9995         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9996                 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
9997                     !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9998                         /* Set drive transmission level to 1.2V  */
9999                         /* only if the signal pre-emphasis bit is not set  */
10000                         val = tr32(MAC_SERDES_CFG);
10001                         val &= 0xfffff000;
10002                         val |= 0x880;
10003                         tw32(MAC_SERDES_CFG, val);
10004                 }
10005                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10006                         tw32(MAC_SERDES_CFG, 0x616000);
10007         }
10008
10009         /* Prevent chip from dropping frames when flow control
10010          * is enabled.
10011          */
10012         if (tg3_flag(tp, 57765_CLASS))
10013                 val = 1;
10014         else
10015                 val = 2;
10016         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10017
10018         if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10019             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10020                 /* Use hardware link auto-negotiation */
10021                 tg3_flag_set(tp, HW_AUTONEG);
10022         }
10023
10024         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10025             tg3_asic_rev(tp) == ASIC_REV_5714) {
10026                 u32 tmp;
10027
10028                 tmp = tr32(SERDES_RX_CTRL);
10029                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10030                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10031                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10032                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10033         }
10034
10035         if (!tg3_flag(tp, USE_PHYLIB)) {
10036                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10037                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10038
10039                 err = tg3_setup_phy(tp, 0);
10040                 if (err)
10041                         return err;
10042
10043                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10044                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10045                         u32 tmp;
10046
10047                         /* Clear CRC stats. */
10048                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10049                                 tg3_writephy(tp, MII_TG3_TEST1,
10050                                              tmp | MII_TG3_TEST1_CRC_EN);
10051                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10052                         }
10053                 }
10054         }
10055
10056         __tg3_set_rx_mode(tp->dev);
10057
10058         /* Initialize receive rules. */
10059         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10060         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10061         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10062         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10063
10064         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10065                 limit = 8;
10066         else
10067                 limit = 16;
10068         if (tg3_flag(tp, ENABLE_ASF))
10069                 limit -= 4;
10070         switch (limit) {
10071         case 16:
10072                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10073         case 15:
10074                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10075         case 14:
10076                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10077         case 13:
10078                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10079         case 12:
10080                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10081         case 11:
10082                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10083         case 10:
10084                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10085         case 9:
10086                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10087         case 8:
10088                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10089         case 7:
10090                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10091         case 6:
10092                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10093         case 5:
10094                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10095         case 4:
10096                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10097         case 3:
10098                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10099         case 2:
10100         case 1:
10101
10102         default:
10103                 break;
10104         }
10105
10106         if (tg3_flag(tp, ENABLE_APE))
10107                 /* Write our heartbeat update interval to APE. */
10108                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10109                                 APE_HOST_HEARTBEAT_INT_DISABLE);
10110
10111         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10112
10113         return 0;
10114 }
10115
10116 /* Called at device open time to get the chip ready for
10117  * packet processing.  Invoked with tp->lock held.
10118  */
10119 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
10120 {
10121         tg3_switch_clocks(tp);
10122
10123         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10124
10125         return tg3_reset_hw(tp, reset_phy);
10126 }
10127
10128 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10129 {
10130         int i;
10131
10132         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10133                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10134
10135                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10136                 off += len;
10137
10138                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10139                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10140                         memset(ocir, 0, TG3_OCIR_LEN);
10141         }
10142 }
10143
10144 /* sysfs attributes for hwmon */
10145 static ssize_t tg3_show_temp(struct device *dev,
10146                              struct device_attribute *devattr, char *buf)
10147 {
10148         struct pci_dev *pdev = to_pci_dev(dev);
10149         struct net_device *netdev = pci_get_drvdata(pdev);
10150         struct tg3 *tp = netdev_priv(netdev);
10151         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10152         u32 temperature;
10153
10154         spin_lock_bh(&tp->lock);
10155         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10156                                 sizeof(temperature));
10157         spin_unlock_bh(&tp->lock);
10158         return sprintf(buf, "%u\n", temperature);
10159 }
10160
10161
10162 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10163                           TG3_TEMP_SENSOR_OFFSET);
10164 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10165                           TG3_TEMP_CAUTION_OFFSET);
10166 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10167                           TG3_TEMP_MAX_OFFSET);
10168
10169 static struct attribute *tg3_attributes[] = {
10170         &sensor_dev_attr_temp1_input.dev_attr.attr,
10171         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10172         &sensor_dev_attr_temp1_max.dev_attr.attr,
10173         NULL
10174 };
10175
10176 static const struct attribute_group tg3_group = {
10177         .attrs = tg3_attributes,
10178 };
10179
10180 static void tg3_hwmon_close(struct tg3 *tp)
10181 {
10182         if (tp->hwmon_dev) {
10183                 hwmon_device_unregister(tp->hwmon_dev);
10184                 tp->hwmon_dev = NULL;
10185                 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10186         }
10187 }
10188
10189 static void tg3_hwmon_open(struct tg3 *tp)
10190 {
10191         int i, err;
10192         u32 size = 0;
10193         struct pci_dev *pdev = tp->pdev;
10194         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10195
10196         tg3_sd_scan_scratchpad(tp, ocirs);
10197
10198         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10199                 if (!ocirs[i].src_data_length)
10200                         continue;
10201
10202                 size += ocirs[i].src_hdr_length;
10203                 size += ocirs[i].src_data_length;
10204         }
10205
10206         if (!size)
10207                 return;
10208
10209         /* Register hwmon sysfs hooks */
10210         err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10211         if (err) {
10212                 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10213                 return;
10214         }
10215
10216         tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10217         if (IS_ERR(tp->hwmon_dev)) {
10218                 tp->hwmon_dev = NULL;
10219                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10220                 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10221         }
10222 }
10223
10224
10225 #define TG3_STAT_ADD32(PSTAT, REG) \
10226 do {    u32 __val = tr32(REG); \
10227         (PSTAT)->low += __val; \
10228         if ((PSTAT)->low < __val) \
10229                 (PSTAT)->high += 1; \
10230 } while (0)
10231
10232 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10233 {
10234         struct tg3_hw_stats *sp = tp->hw_stats;
10235
10236         if (!tp->link_up)
10237                 return;
10238
10239         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10240         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10241         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10242         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10243         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10244         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10245         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10246         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10247         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10248         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10249         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10250         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10251         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10252         if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
10253                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10254                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10255                 u32 val;
10256
10257                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10258                 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
10259                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10260                 tg3_flag_clear(tp, 5719_RDMA_BUG);
10261         }
10262
10263         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10264         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10265         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10266         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10267         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10268         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10269         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10270         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10271         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10272         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10273         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10274         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10275         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10276         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10277
10278         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10279         if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10280             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10281             tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10282                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10283         } else {
10284                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10285                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10286                 if (val) {
10287                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10288                         sp->rx_discards.low += val;
10289                         if (sp->rx_discards.low < val)
10290                                 sp->rx_discards.high += 1;
10291                 }
10292                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10293         }
10294         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10295 }
10296
10297 static void tg3_chk_missed_msi(struct tg3 *tp)
10298 {
10299         u32 i;
10300
10301         for (i = 0; i < tp->irq_cnt; i++) {
10302                 struct tg3_napi *tnapi = &tp->napi[i];
10303
10304                 if (tg3_has_work(tnapi)) {
10305                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10306                             tnapi->last_tx_cons == tnapi->tx_cons) {
10307                                 if (tnapi->chk_msi_cnt < 1) {
10308                                         tnapi->chk_msi_cnt++;
10309                                         return;
10310                                 }
10311                                 tg3_msi(0, tnapi);
10312                         }
10313                 }
10314                 tnapi->chk_msi_cnt = 0;
10315                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10316                 tnapi->last_tx_cons = tnapi->tx_cons;
10317         }
10318 }
10319
10320 static void tg3_timer(unsigned long __opaque)
10321 {
10322         struct tg3 *tp = (struct tg3 *) __opaque;
10323
10324         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10325                 goto restart_timer;
10326
10327         spin_lock(&tp->lock);
10328
10329         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10330             tg3_flag(tp, 57765_CLASS))
10331                 tg3_chk_missed_msi(tp);
10332
10333         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10334                 /* BCM4785: Flush posted writes from GbE to host memory. */
10335                 tr32(HOSTCC_MODE);
10336         }
10337
10338         if (!tg3_flag(tp, TAGGED_STATUS)) {
10339                 /* All of this garbage is because when using non-tagged
10340                  * IRQ status the mailbox/status_block protocol the chip
10341                  * uses with the cpu is race prone.
10342                  */
10343                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10344                         tw32(GRC_LOCAL_CTRL,
10345                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10346                 } else {
10347                         tw32(HOSTCC_MODE, tp->coalesce_mode |
10348                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10349                 }
10350
10351                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10352                         spin_unlock(&tp->lock);
10353                         tg3_reset_task_schedule(tp);
10354                         goto restart_timer;
10355                 }
10356         }
10357
10358         /* This part only runs once per second. */
10359         if (!--tp->timer_counter) {
10360                 if (tg3_flag(tp, 5705_PLUS))
10361                         tg3_periodic_fetch_stats(tp);
10362
10363                 if (tp->setlpicnt && !--tp->setlpicnt)
10364                         tg3_phy_eee_enable(tp);
10365
10366                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10367                         u32 mac_stat;
10368                         int phy_event;
10369
10370                         mac_stat = tr32(MAC_STATUS);
10371
10372                         phy_event = 0;
10373                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10374                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10375                                         phy_event = 1;
10376                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10377                                 phy_event = 1;
10378
10379                         if (phy_event)
10380                                 tg3_setup_phy(tp, 0);
10381                 } else if (tg3_flag(tp, POLL_SERDES)) {
10382                         u32 mac_stat = tr32(MAC_STATUS);
10383                         int need_setup = 0;
10384
10385                         if (tp->link_up &&
10386                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10387                                 need_setup = 1;
10388                         }
10389                         if (!tp->link_up &&
10390                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
10391                                          MAC_STATUS_SIGNAL_DET))) {
10392                                 need_setup = 1;
10393                         }
10394                         if (need_setup) {
10395                                 if (!tp->serdes_counter) {
10396                                         tw32_f(MAC_MODE,
10397                                              (tp->mac_mode &
10398                                               ~MAC_MODE_PORT_MODE_MASK));
10399                                         udelay(40);
10400                                         tw32_f(MAC_MODE, tp->mac_mode);
10401                                         udelay(40);
10402                                 }
10403                                 tg3_setup_phy(tp, 0);
10404                         }
10405                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10406                            tg3_flag(tp, 5780_CLASS)) {
10407                         tg3_serdes_parallel_detect(tp);
10408                 }
10409
10410                 tp->timer_counter = tp->timer_multiplier;
10411         }
10412
10413         /* Heartbeat is only sent once every 2 seconds.
10414          *
10415          * The heartbeat is to tell the ASF firmware that the host
10416          * driver is still alive.  In the event that the OS crashes,
10417          * ASF needs to reset the hardware to free up the FIFO space
10418          * that may be filled with rx packets destined for the host.
10419          * If the FIFO is full, ASF will no longer function properly.
10420          *
10421          * Unintended resets have been reported on real time kernels
10422          * where the timer doesn't run on time.  Netpoll will also have
10423          * same problem.
10424          *
10425          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10426          * to check the ring condition when the heartbeat is expiring
10427          * before doing the reset.  This will prevent most unintended
10428          * resets.
10429          */
10430         if (!--tp->asf_counter) {
10431                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10432                         tg3_wait_for_event_ack(tp);
10433
10434                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10435                                       FWCMD_NICDRV_ALIVE3);
10436                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10437                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10438                                       TG3_FW_UPDATE_TIMEOUT_SEC);
10439
10440                         tg3_generate_fw_event(tp);
10441                 }
10442                 tp->asf_counter = tp->asf_multiplier;
10443         }
10444
10445         spin_unlock(&tp->lock);
10446
10447 restart_timer:
10448         tp->timer.expires = jiffies + tp->timer_offset;
10449         add_timer(&tp->timer);
10450 }
10451
10452 static void tg3_timer_init(struct tg3 *tp)
10453 {
10454         if (tg3_flag(tp, TAGGED_STATUS) &&
10455             tg3_asic_rev(tp) != ASIC_REV_5717 &&
10456             !tg3_flag(tp, 57765_CLASS))
10457                 tp->timer_offset = HZ;
10458         else
10459                 tp->timer_offset = HZ / 10;
10460
10461         BUG_ON(tp->timer_offset > HZ);
10462
10463         tp->timer_multiplier = (HZ / tp->timer_offset);
10464         tp->asf_multiplier = (HZ / tp->timer_offset) *
10465                              TG3_FW_UPDATE_FREQ_SEC;
10466
10467         init_timer(&tp->timer);
10468         tp->timer.data = (unsigned long) tp;
10469         tp->timer.function = tg3_timer;
10470 }
10471
10472 static void tg3_timer_start(struct tg3 *tp)
10473 {
10474         tp->asf_counter   = tp->asf_multiplier;
10475         tp->timer_counter = tp->timer_multiplier;
10476
10477         tp->timer.expires = jiffies + tp->timer_offset;
10478         add_timer(&tp->timer);
10479 }
10480
10481 static void tg3_timer_stop(struct tg3 *tp)
10482 {
10483         del_timer_sync(&tp->timer);
10484 }
10485
10486 /* Restart hardware after configuration changes, self-test, etc.
10487  * Invoked with tp->lock held.
10488  */
10489 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
10490         __releases(tp->lock)
10491         __acquires(tp->lock)
10492 {
10493         int err;
10494
10495         err = tg3_init_hw(tp, reset_phy);
10496         if (err) {
10497                 netdev_err(tp->dev,
10498                            "Failed to re-initialize device, aborting\n");
10499                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10500                 tg3_full_unlock(tp);
10501                 tg3_timer_stop(tp);
10502                 tp->irq_sync = 0;
10503                 tg3_napi_enable(tp);
10504                 dev_close(tp->dev);
10505                 tg3_full_lock(tp, 0);
10506         }
10507         return err;
10508 }
10509
10510 static void tg3_reset_task(struct work_struct *work)
10511 {
10512         struct tg3 *tp = container_of(work, struct tg3, reset_task);
10513         int err;
10514
10515         tg3_full_lock(tp, 0);
10516
10517         if (!netif_running(tp->dev)) {
10518                 tg3_flag_clear(tp, RESET_TASK_PENDING);
10519                 tg3_full_unlock(tp);
10520                 return;
10521         }
10522
10523         tg3_full_unlock(tp);
10524
10525         tg3_phy_stop(tp);
10526
10527         tg3_netif_stop(tp);
10528
10529         tg3_full_lock(tp, 1);
10530
10531         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10532                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10533                 tp->write32_rx_mbox = tg3_write_flush_reg32;
10534                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10535                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10536         }
10537
10538         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10539         err = tg3_init_hw(tp, 1);
10540         if (err)
10541                 goto out;
10542
10543         tg3_netif_start(tp);
10544
10545 out:
10546         tg3_full_unlock(tp);
10547
10548         if (!err)
10549                 tg3_phy_start(tp);
10550
10551         tg3_flag_clear(tp, RESET_TASK_PENDING);
10552 }
10553
10554 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10555 {
10556         irq_handler_t fn;
10557         unsigned long flags;
10558         char *name;
10559         struct tg3_napi *tnapi = &tp->napi[irq_num];
10560
10561         if (tp->irq_cnt == 1)
10562                 name = tp->dev->name;
10563         else {
10564                 name = &tnapi->irq_lbl[0];
10565                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10566                 name[IFNAMSIZ-1] = 0;
10567         }
10568
10569         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10570                 fn = tg3_msi;
10571                 if (tg3_flag(tp, 1SHOT_MSI))
10572                         fn = tg3_msi_1shot;
10573                 flags = 0;
10574         } else {
10575                 fn = tg3_interrupt;
10576                 if (tg3_flag(tp, TAGGED_STATUS))
10577                         fn = tg3_interrupt_tagged;
10578                 flags = IRQF_SHARED;
10579         }
10580
10581         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10582 }
10583
10584 static int tg3_test_interrupt(struct tg3 *tp)
10585 {
10586         struct tg3_napi *tnapi = &tp->napi[0];
10587         struct net_device *dev = tp->dev;
10588         int err, i, intr_ok = 0;
10589         u32 val;
10590
10591         if (!netif_running(dev))
10592                 return -ENODEV;
10593
10594         tg3_disable_ints(tp);
10595
10596         free_irq(tnapi->irq_vec, tnapi);
10597
10598         /*
10599          * Turn off MSI one shot mode.  Otherwise this test has no
10600          * observable way to know whether the interrupt was delivered.
10601          */
10602         if (tg3_flag(tp, 57765_PLUS)) {
10603                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10604                 tw32(MSGINT_MODE, val);
10605         }
10606
10607         err = request_irq(tnapi->irq_vec, tg3_test_isr,
10608                           IRQF_SHARED, dev->name, tnapi);
10609         if (err)
10610                 return err;
10611
10612         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10613         tg3_enable_ints(tp);
10614
10615         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10616                tnapi->coal_now);
10617
10618         for (i = 0; i < 5; i++) {
10619                 u32 int_mbox, misc_host_ctrl;
10620
10621                 int_mbox = tr32_mailbox(tnapi->int_mbox);
10622                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10623
10624                 if ((int_mbox != 0) ||
10625                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10626                         intr_ok = 1;
10627                         break;
10628                 }
10629
10630                 if (tg3_flag(tp, 57765_PLUS) &&
10631                     tnapi->hw_status->status_tag != tnapi->last_tag)
10632                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10633
10634                 msleep(10);
10635         }
10636
10637         tg3_disable_ints(tp);
10638
10639         free_irq(tnapi->irq_vec, tnapi);
10640
10641         err = tg3_request_irq(tp, 0);
10642
10643         if (err)
10644                 return err;
10645
10646         if (intr_ok) {
10647                 /* Reenable MSI one shot mode. */
10648                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10649                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10650                         tw32(MSGINT_MODE, val);
10651                 }
10652                 return 0;
10653         }
10654
10655         return -EIO;
10656 }
10657
10658 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10659  * successfully restored
10660  */
10661 static int tg3_test_msi(struct tg3 *tp)
10662 {
10663         int err;
10664         u16 pci_cmd;
10665
10666         if (!tg3_flag(tp, USING_MSI))
10667                 return 0;
10668
10669         /* Turn off SERR reporting in case MSI terminates with Master
10670          * Abort.
10671          */
10672         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10673         pci_write_config_word(tp->pdev, PCI_COMMAND,
10674                               pci_cmd & ~PCI_COMMAND_SERR);
10675
10676         err = tg3_test_interrupt(tp);
10677
10678         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10679
10680         if (!err)
10681                 return 0;
10682
10683         /* other failures */
10684         if (err != -EIO)
10685                 return err;
10686
10687         /* MSI test failed, go back to INTx mode */
10688         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10689                     "to INTx mode. Please report this failure to the PCI "
10690                     "maintainer and include system chipset information\n");
10691
10692         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10693
10694         pci_disable_msi(tp->pdev);
10695
10696         tg3_flag_clear(tp, USING_MSI);
10697         tp->napi[0].irq_vec = tp->pdev->irq;
10698
10699         err = tg3_request_irq(tp, 0);
10700         if (err)
10701                 return err;
10702
10703         /* Need to reset the chip because the MSI cycle may have terminated
10704          * with Master Abort.
10705          */
10706         tg3_full_lock(tp, 1);
10707
10708         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10709         err = tg3_init_hw(tp, 1);
10710
10711         tg3_full_unlock(tp);
10712
10713         if (err)
10714                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10715
10716         return err;
10717 }
10718
10719 static int tg3_request_firmware(struct tg3 *tp)
10720 {
10721         const struct tg3_firmware_hdr *fw_hdr;
10722
10723         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10724                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10725                            tp->fw_needed);
10726                 return -ENOENT;
10727         }
10728
10729         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
10730
10731         /* Firmware blob starts with version numbers, followed by
10732          * start address and _full_ length including BSS sections
10733          * (which must be longer than the actual data, of course
10734          */
10735
10736         tp->fw_len = be32_to_cpu(fw_hdr->len);  /* includes bss */
10737         if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
10738                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10739                            tp->fw_len, tp->fw_needed);
10740                 release_firmware(tp->fw);
10741                 tp->fw = NULL;
10742                 return -EINVAL;
10743         }
10744
10745         /* We no longer need firmware; we have it. */
10746         tp->fw_needed = NULL;
10747         return 0;
10748 }
10749
10750 static u32 tg3_irq_count(struct tg3 *tp)
10751 {
10752         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10753
10754         if (irq_cnt > 1) {
10755                 /* We want as many rx rings enabled as there are cpus.
10756                  * In multiqueue MSI-X mode, the first MSI-X vector
10757                  * only deals with link interrupts, etc, so we add
10758                  * one to the number of vectors we are requesting.
10759                  */
10760                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
10761         }
10762
10763         return irq_cnt;
10764 }
10765
10766 static bool tg3_enable_msix(struct tg3 *tp)
10767 {
10768         int i, rc;
10769         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
10770
10771         tp->txq_cnt = tp->txq_req;
10772         tp->rxq_cnt = tp->rxq_req;
10773         if (!tp->rxq_cnt)
10774                 tp->rxq_cnt = netif_get_num_default_rss_queues();
10775         if (tp->rxq_cnt > tp->rxq_max)
10776                 tp->rxq_cnt = tp->rxq_max;
10777
10778         /* Disable multiple TX rings by default.  Simple round-robin hardware
10779          * scheduling of the TX rings can cause starvation of rings with
10780          * small packets when other rings have TSO or jumbo packets.
10781          */
10782         if (!tp->txq_req)
10783                 tp->txq_cnt = 1;
10784
10785         tp->irq_cnt = tg3_irq_count(tp);
10786
10787         for (i = 0; i < tp->irq_max; i++) {
10788                 msix_ent[i].entry  = i;
10789                 msix_ent[i].vector = 0;
10790         }
10791
10792         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10793         if (rc < 0) {
10794                 return false;
10795         } else if (rc != 0) {
10796                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10797                         return false;
10798                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10799                               tp->irq_cnt, rc);
10800                 tp->irq_cnt = rc;
10801                 tp->rxq_cnt = max(rc - 1, 1);
10802                 if (tp->txq_cnt)
10803                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
10804         }
10805
10806         for (i = 0; i < tp->irq_max; i++)
10807                 tp->napi[i].irq_vec = msix_ent[i].vector;
10808
10809         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
10810                 pci_disable_msix(tp->pdev);
10811                 return false;
10812         }
10813
10814         if (tp->irq_cnt == 1)
10815                 return true;
10816
10817         tg3_flag_set(tp, ENABLE_RSS);
10818
10819         if (tp->txq_cnt > 1)
10820                 tg3_flag_set(tp, ENABLE_TSS);
10821
10822         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
10823
10824         return true;
10825 }
10826
10827 static void tg3_ints_init(struct tg3 *tp)
10828 {
10829         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10830             !tg3_flag(tp, TAGGED_STATUS)) {
10831                 /* All MSI supporting chips should support tagged
10832                  * status.  Assert that this is the case.
10833                  */
10834                 netdev_warn(tp->dev,
10835                             "MSI without TAGGED_STATUS? Not using MSI\n");
10836                 goto defcfg;
10837         }
10838
10839         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10840                 tg3_flag_set(tp, USING_MSIX);
10841         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10842                 tg3_flag_set(tp, USING_MSI);
10843
10844         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10845                 u32 msi_mode = tr32(MSGINT_MODE);
10846                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10847                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10848                 if (!tg3_flag(tp, 1SHOT_MSI))
10849                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10850                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10851         }
10852 defcfg:
10853         if (!tg3_flag(tp, USING_MSIX)) {
10854                 tp->irq_cnt = 1;
10855                 tp->napi[0].irq_vec = tp->pdev->irq;
10856         }
10857
10858         if (tp->irq_cnt == 1) {
10859                 tp->txq_cnt = 1;
10860                 tp->rxq_cnt = 1;
10861                 netif_set_real_num_tx_queues(tp->dev, 1);
10862                 netif_set_real_num_rx_queues(tp->dev, 1);
10863         }
10864 }
10865
10866 static void tg3_ints_fini(struct tg3 *tp)
10867 {
10868         if (tg3_flag(tp, USING_MSIX))
10869                 pci_disable_msix(tp->pdev);
10870         else if (tg3_flag(tp, USING_MSI))
10871                 pci_disable_msi(tp->pdev);
10872         tg3_flag_clear(tp, USING_MSI);
10873         tg3_flag_clear(tp, USING_MSIX);
10874         tg3_flag_clear(tp, ENABLE_RSS);
10875         tg3_flag_clear(tp, ENABLE_TSS);
10876 }
10877
10878 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
10879                      bool init)
10880 {
10881         struct net_device *dev = tp->dev;
10882         int i, err;
10883
10884         /*
10885          * Setup interrupts first so we know how
10886          * many NAPI resources to allocate
10887          */
10888         tg3_ints_init(tp);
10889
10890         tg3_rss_check_indir_tbl(tp);
10891
10892         /* The placement of this call is tied
10893          * to the setup and use of Host TX descriptors.
10894          */
10895         err = tg3_alloc_consistent(tp);
10896         if (err)
10897                 goto err_out1;
10898
10899         tg3_napi_init(tp);
10900
10901         tg3_napi_enable(tp);
10902
10903         for (i = 0; i < tp->irq_cnt; i++) {
10904                 struct tg3_napi *tnapi = &tp->napi[i];
10905                 err = tg3_request_irq(tp, i);
10906                 if (err) {
10907                         for (i--; i >= 0; i--) {
10908                                 tnapi = &tp->napi[i];
10909                                 free_irq(tnapi->irq_vec, tnapi);
10910                         }
10911                         goto err_out2;
10912                 }
10913         }
10914
10915         tg3_full_lock(tp, 0);
10916
10917         err = tg3_init_hw(tp, reset_phy);
10918         if (err) {
10919                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10920                 tg3_free_rings(tp);
10921         }
10922
10923         tg3_full_unlock(tp);
10924
10925         if (err)
10926                 goto err_out3;
10927
10928         if (test_irq && tg3_flag(tp, USING_MSI)) {
10929                 err = tg3_test_msi(tp);
10930
10931                 if (err) {
10932                         tg3_full_lock(tp, 0);
10933                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10934                         tg3_free_rings(tp);
10935                         tg3_full_unlock(tp);
10936
10937                         goto err_out2;
10938                 }
10939
10940                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10941                         u32 val = tr32(PCIE_TRANSACTION_CFG);
10942
10943                         tw32(PCIE_TRANSACTION_CFG,
10944                              val | PCIE_TRANS_CFG_1SHOT_MSI);
10945                 }
10946         }
10947
10948         tg3_phy_start(tp);
10949
10950         tg3_hwmon_open(tp);
10951
10952         tg3_full_lock(tp, 0);
10953
10954         tg3_timer_start(tp);
10955         tg3_flag_set(tp, INIT_COMPLETE);
10956         tg3_enable_ints(tp);
10957
10958         if (init)
10959                 tg3_ptp_init(tp);
10960         else
10961                 tg3_ptp_resume(tp);
10962
10963
10964         tg3_full_unlock(tp);
10965
10966         netif_tx_start_all_queues(dev);
10967
10968         /*
10969          * Reset loopback feature if it was turned on while the device was down
10970          * make sure that it's installed properly now.
10971          */
10972         if (dev->features & NETIF_F_LOOPBACK)
10973                 tg3_set_loopback(dev, dev->features);
10974
10975         return 0;
10976
10977 err_out3:
10978         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10979                 struct tg3_napi *tnapi = &tp->napi[i];
10980                 free_irq(tnapi->irq_vec, tnapi);
10981         }
10982
10983 err_out2:
10984         tg3_napi_disable(tp);
10985         tg3_napi_fini(tp);
10986         tg3_free_consistent(tp);
10987
10988 err_out1:
10989         tg3_ints_fini(tp);
10990
10991         return err;
10992 }
10993
10994 static void tg3_stop(struct tg3 *tp)
10995 {
10996         int i;
10997
10998         tg3_reset_task_cancel(tp);
10999         tg3_netif_stop(tp);
11000
11001         tg3_timer_stop(tp);
11002
11003         tg3_hwmon_close(tp);
11004
11005         tg3_phy_stop(tp);
11006
11007         tg3_full_lock(tp, 1);
11008
11009         tg3_disable_ints(tp);
11010
11011         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11012         tg3_free_rings(tp);
11013         tg3_flag_clear(tp, INIT_COMPLETE);
11014
11015         tg3_full_unlock(tp);
11016
11017         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11018                 struct tg3_napi *tnapi = &tp->napi[i];
11019                 free_irq(tnapi->irq_vec, tnapi);
11020         }
11021
11022         tg3_ints_fini(tp);
11023
11024         tg3_napi_fini(tp);
11025
11026         tg3_free_consistent(tp);
11027 }
11028
11029 static int tg3_open(struct net_device *dev)
11030 {
11031         struct tg3 *tp = netdev_priv(dev);
11032         int err;
11033
11034         if (tp->fw_needed) {
11035                 err = tg3_request_firmware(tp);
11036                 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11037                         if (err) {
11038                                 netdev_warn(tp->dev, "EEE capability disabled\n");
11039                                 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11040                         } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11041                                 netdev_warn(tp->dev, "EEE capability restored\n");
11042                                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11043                         }
11044                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11045                         if (err)
11046                                 return err;
11047                 } else if (err) {
11048                         netdev_warn(tp->dev, "TSO capability disabled\n");
11049                         tg3_flag_clear(tp, TSO_CAPABLE);
11050                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11051                         netdev_notice(tp->dev, "TSO capability restored\n");
11052                         tg3_flag_set(tp, TSO_CAPABLE);
11053                 }
11054         }
11055
11056         tg3_carrier_off(tp);
11057
11058         err = tg3_power_up(tp);
11059         if (err)
11060                 return err;
11061
11062         tg3_full_lock(tp, 0);
11063
11064         tg3_disable_ints(tp);
11065         tg3_flag_clear(tp, INIT_COMPLETE);
11066
11067         tg3_full_unlock(tp);
11068
11069         err = tg3_start(tp, true, true, true);
11070         if (err) {
11071                 tg3_frob_aux_power(tp, false);
11072                 pci_set_power_state(tp->pdev, PCI_D3hot);
11073         }
11074
11075         if (tg3_flag(tp, PTP_CAPABLE)) {
11076                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11077                                                    &tp->pdev->dev);
11078                 if (IS_ERR(tp->ptp_clock))
11079                         tp->ptp_clock = NULL;
11080         }
11081
11082         return err;
11083 }
11084
11085 static int tg3_close(struct net_device *dev)
11086 {
11087         struct tg3 *tp = netdev_priv(dev);
11088
11089         tg3_ptp_fini(tp);
11090
11091         tg3_stop(tp);
11092
11093         /* Clear stats across close / open calls */
11094         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11095         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11096
11097         tg3_power_down(tp);
11098
11099         tg3_carrier_off(tp);
11100
11101         return 0;
11102 }
11103
11104 static inline u64 get_stat64(tg3_stat64_t *val)
11105 {
11106        return ((u64)val->high << 32) | ((u64)val->low);
11107 }
11108
11109 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11110 {
11111         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11112
11113         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11114             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11115              tg3_asic_rev(tp) == ASIC_REV_5701)) {
11116                 u32 val;
11117
11118                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11119                         tg3_writephy(tp, MII_TG3_TEST1,
11120                                      val | MII_TG3_TEST1_CRC_EN);
11121                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11122                 } else
11123                         val = 0;
11124
11125                 tp->phy_crc_errors += val;
11126
11127                 return tp->phy_crc_errors;
11128         }
11129
11130         return get_stat64(&hw_stats->rx_fcs_errors);
11131 }
11132
11133 #define ESTAT_ADD(member) \
11134         estats->member =        old_estats->member + \
11135                                 get_stat64(&hw_stats->member)
11136
11137 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11138 {
11139         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11140         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11141
11142         ESTAT_ADD(rx_octets);
11143         ESTAT_ADD(rx_fragments);
11144         ESTAT_ADD(rx_ucast_packets);
11145         ESTAT_ADD(rx_mcast_packets);
11146         ESTAT_ADD(rx_bcast_packets);
11147         ESTAT_ADD(rx_fcs_errors);
11148         ESTAT_ADD(rx_align_errors);
11149         ESTAT_ADD(rx_xon_pause_rcvd);
11150         ESTAT_ADD(rx_xoff_pause_rcvd);
11151         ESTAT_ADD(rx_mac_ctrl_rcvd);
11152         ESTAT_ADD(rx_xoff_entered);
11153         ESTAT_ADD(rx_frame_too_long_errors);
11154         ESTAT_ADD(rx_jabbers);
11155         ESTAT_ADD(rx_undersize_packets);
11156         ESTAT_ADD(rx_in_length_errors);
11157         ESTAT_ADD(rx_out_length_errors);
11158         ESTAT_ADD(rx_64_or_less_octet_packets);
11159         ESTAT_ADD(rx_65_to_127_octet_packets);
11160         ESTAT_ADD(rx_128_to_255_octet_packets);
11161         ESTAT_ADD(rx_256_to_511_octet_packets);
11162         ESTAT_ADD(rx_512_to_1023_octet_packets);
11163         ESTAT_ADD(rx_1024_to_1522_octet_packets);
11164         ESTAT_ADD(rx_1523_to_2047_octet_packets);
11165         ESTAT_ADD(rx_2048_to_4095_octet_packets);
11166         ESTAT_ADD(rx_4096_to_8191_octet_packets);
11167         ESTAT_ADD(rx_8192_to_9022_octet_packets);
11168
11169         ESTAT_ADD(tx_octets);
11170         ESTAT_ADD(tx_collisions);
11171         ESTAT_ADD(tx_xon_sent);
11172         ESTAT_ADD(tx_xoff_sent);
11173         ESTAT_ADD(tx_flow_control);
11174         ESTAT_ADD(tx_mac_errors);
11175         ESTAT_ADD(tx_single_collisions);
11176         ESTAT_ADD(tx_mult_collisions);
11177         ESTAT_ADD(tx_deferred);
11178         ESTAT_ADD(tx_excessive_collisions);
11179         ESTAT_ADD(tx_late_collisions);
11180         ESTAT_ADD(tx_collide_2times);
11181         ESTAT_ADD(tx_collide_3times);
11182         ESTAT_ADD(tx_collide_4times);
11183         ESTAT_ADD(tx_collide_5times);
11184         ESTAT_ADD(tx_collide_6times);
11185         ESTAT_ADD(tx_collide_7times);
11186         ESTAT_ADD(tx_collide_8times);
11187         ESTAT_ADD(tx_collide_9times);
11188         ESTAT_ADD(tx_collide_10times);
11189         ESTAT_ADD(tx_collide_11times);
11190         ESTAT_ADD(tx_collide_12times);
11191         ESTAT_ADD(tx_collide_13times);
11192         ESTAT_ADD(tx_collide_14times);
11193         ESTAT_ADD(tx_collide_15times);
11194         ESTAT_ADD(tx_ucast_packets);
11195         ESTAT_ADD(tx_mcast_packets);
11196         ESTAT_ADD(tx_bcast_packets);
11197         ESTAT_ADD(tx_carrier_sense_errors);
11198         ESTAT_ADD(tx_discards);
11199         ESTAT_ADD(tx_errors);
11200
11201         ESTAT_ADD(dma_writeq_full);
11202         ESTAT_ADD(dma_write_prioq_full);
11203         ESTAT_ADD(rxbds_empty);
11204         ESTAT_ADD(rx_discards);
11205         ESTAT_ADD(rx_errors);
11206         ESTAT_ADD(rx_threshold_hit);
11207
11208         ESTAT_ADD(dma_readq_full);
11209         ESTAT_ADD(dma_read_prioq_full);
11210         ESTAT_ADD(tx_comp_queue_full);
11211
11212         ESTAT_ADD(ring_set_send_prod_index);
11213         ESTAT_ADD(ring_status_update);
11214         ESTAT_ADD(nic_irqs);
11215         ESTAT_ADD(nic_avoided_irqs);
11216         ESTAT_ADD(nic_tx_threshold_hit);
11217
11218         ESTAT_ADD(mbuf_lwm_thresh_hit);
11219 }
11220
11221 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11222 {
11223         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11224         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11225
11226         stats->rx_packets = old_stats->rx_packets +
11227                 get_stat64(&hw_stats->rx_ucast_packets) +
11228                 get_stat64(&hw_stats->rx_mcast_packets) +
11229                 get_stat64(&hw_stats->rx_bcast_packets);
11230
11231         stats->tx_packets = old_stats->tx_packets +
11232                 get_stat64(&hw_stats->tx_ucast_packets) +
11233                 get_stat64(&hw_stats->tx_mcast_packets) +
11234                 get_stat64(&hw_stats->tx_bcast_packets);
11235
11236         stats->rx_bytes = old_stats->rx_bytes +
11237                 get_stat64(&hw_stats->rx_octets);
11238         stats->tx_bytes = old_stats->tx_bytes +
11239                 get_stat64(&hw_stats->tx_octets);
11240
11241         stats->rx_errors = old_stats->rx_errors +
11242                 get_stat64(&hw_stats->rx_errors);
11243         stats->tx_errors = old_stats->tx_errors +
11244                 get_stat64(&hw_stats->tx_errors) +
11245                 get_stat64(&hw_stats->tx_mac_errors) +
11246                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11247                 get_stat64(&hw_stats->tx_discards);
11248
11249         stats->multicast = old_stats->multicast +
11250                 get_stat64(&hw_stats->rx_mcast_packets);
11251         stats->collisions = old_stats->collisions +
11252                 get_stat64(&hw_stats->tx_collisions);
11253
11254         stats->rx_length_errors = old_stats->rx_length_errors +
11255                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11256                 get_stat64(&hw_stats->rx_undersize_packets);
11257
11258         stats->rx_over_errors = old_stats->rx_over_errors +
11259                 get_stat64(&hw_stats->rxbds_empty);
11260         stats->rx_frame_errors = old_stats->rx_frame_errors +
11261                 get_stat64(&hw_stats->rx_align_errors);
11262         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11263                 get_stat64(&hw_stats->tx_discards);
11264         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11265                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11266
11267         stats->rx_crc_errors = old_stats->rx_crc_errors +
11268                 tg3_calc_crc_errors(tp);
11269
11270         stats->rx_missed_errors = old_stats->rx_missed_errors +
11271                 get_stat64(&hw_stats->rx_discards);
11272
11273         stats->rx_dropped = tp->rx_dropped;
11274         stats->tx_dropped = tp->tx_dropped;
11275 }
11276
11277 static int tg3_get_regs_len(struct net_device *dev)
11278 {
11279         return TG3_REG_BLK_SIZE;
11280 }
11281
11282 static void tg3_get_regs(struct net_device *dev,
11283                 struct ethtool_regs *regs, void *_p)
11284 {
11285         struct tg3 *tp = netdev_priv(dev);
11286
11287         regs->version = 0;
11288
11289         memset(_p, 0, TG3_REG_BLK_SIZE);
11290
11291         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11292                 return;
11293
11294         tg3_full_lock(tp, 0);
11295
11296         tg3_dump_legacy_regs(tp, (u32 *)_p);
11297
11298         tg3_full_unlock(tp);
11299 }
11300
11301 static int tg3_get_eeprom_len(struct net_device *dev)
11302 {
11303         struct tg3 *tp = netdev_priv(dev);
11304
11305         return tp->nvram_size;
11306 }
11307
11308 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11309 {
11310         struct tg3 *tp = netdev_priv(dev);
11311         int ret;
11312         u8  *pd;
11313         u32 i, offset, len, b_offset, b_count;
11314         __be32 val;
11315
11316         if (tg3_flag(tp, NO_NVRAM))
11317                 return -EINVAL;
11318
11319         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11320                 return -EAGAIN;
11321
11322         offset = eeprom->offset;
11323         len = eeprom->len;
11324         eeprom->len = 0;
11325
11326         eeprom->magic = TG3_EEPROM_MAGIC;
11327
11328         if (offset & 3) {
11329                 /* adjustments to start on required 4 byte boundary */
11330                 b_offset = offset & 3;
11331                 b_count = 4 - b_offset;
11332                 if (b_count > len) {
11333                         /* i.e. offset=1 len=2 */
11334                         b_count = len;
11335                 }
11336                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11337                 if (ret)
11338                         return ret;
11339                 memcpy(data, ((char *)&val) + b_offset, b_count);
11340                 len -= b_count;
11341                 offset += b_count;
11342                 eeprom->len += b_count;
11343         }
11344
11345         /* read bytes up to the last 4 byte boundary */
11346         pd = &data[eeprom->len];
11347         for (i = 0; i < (len - (len & 3)); i += 4) {
11348                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11349                 if (ret) {
11350                         eeprom->len += i;
11351                         return ret;
11352                 }
11353                 memcpy(pd + i, &val, 4);
11354         }
11355         eeprom->len += i;
11356
11357         if (len & 3) {
11358                 /* read last bytes not ending on 4 byte boundary */
11359                 pd = &data[eeprom->len];
11360                 b_count = len & 3;
11361                 b_offset = offset + len - b_count;
11362                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11363                 if (ret)
11364                         return ret;
11365                 memcpy(pd, &val, b_count);
11366                 eeprom->len += b_count;
11367         }
11368         return 0;
11369 }
11370
11371 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11372 {
11373         struct tg3 *tp = netdev_priv(dev);
11374         int ret;
11375         u32 offset, len, b_offset, odd_len;
11376         u8 *buf;
11377         __be32 start, end;
11378
11379         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11380                 return -EAGAIN;
11381
11382         if (tg3_flag(tp, NO_NVRAM) ||
11383             eeprom->magic != TG3_EEPROM_MAGIC)
11384                 return -EINVAL;
11385
11386         offset = eeprom->offset;
11387         len = eeprom->len;
11388
11389         if ((b_offset = (offset & 3))) {
11390                 /* adjustments to start on required 4 byte boundary */
11391                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11392                 if (ret)
11393                         return ret;
11394                 len += b_offset;
11395                 offset &= ~3;
11396                 if (len < 4)
11397                         len = 4;
11398         }
11399
11400         odd_len = 0;
11401         if (len & 3) {
11402                 /* adjustments to end on required 4 byte boundary */
11403                 odd_len = 1;
11404                 len = (len + 3) & ~3;
11405                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11406                 if (ret)
11407                         return ret;
11408         }
11409
11410         buf = data;
11411         if (b_offset || odd_len) {
11412                 buf = kmalloc(len, GFP_KERNEL);
11413                 if (!buf)
11414                         return -ENOMEM;
11415                 if (b_offset)
11416                         memcpy(buf, &start, 4);
11417                 if (odd_len)
11418                         memcpy(buf+len-4, &end, 4);
11419                 memcpy(buf + b_offset, data, eeprom->len);
11420         }
11421
11422         ret = tg3_nvram_write_block(tp, offset, len, buf);
11423
11424         if (buf != data)
11425                 kfree(buf);
11426
11427         return ret;
11428 }
11429
11430 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11431 {
11432         struct tg3 *tp = netdev_priv(dev);
11433
11434         if (tg3_flag(tp, USE_PHYLIB)) {
11435                 struct phy_device *phydev;
11436                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11437                         return -EAGAIN;
11438                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11439                 return phy_ethtool_gset(phydev, cmd);
11440         }
11441
11442         cmd->supported = (SUPPORTED_Autoneg);
11443
11444         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11445                 cmd->supported |= (SUPPORTED_1000baseT_Half |
11446                                    SUPPORTED_1000baseT_Full);
11447
11448         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11449                 cmd->supported |= (SUPPORTED_100baseT_Half |
11450                                   SUPPORTED_100baseT_Full |
11451                                   SUPPORTED_10baseT_Half |
11452                                   SUPPORTED_10baseT_Full |
11453                                   SUPPORTED_TP);
11454                 cmd->port = PORT_TP;
11455         } else {
11456                 cmd->supported |= SUPPORTED_FIBRE;
11457                 cmd->port = PORT_FIBRE;
11458         }
11459
11460         cmd->advertising = tp->link_config.advertising;
11461         if (tg3_flag(tp, PAUSE_AUTONEG)) {
11462                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11463                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11464                                 cmd->advertising |= ADVERTISED_Pause;
11465                         } else {
11466                                 cmd->advertising |= ADVERTISED_Pause |
11467                                                     ADVERTISED_Asym_Pause;
11468                         }
11469                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11470                         cmd->advertising |= ADVERTISED_Asym_Pause;
11471                 }
11472         }
11473         if (netif_running(dev) && tp->link_up) {
11474                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11475                 cmd->duplex = tp->link_config.active_duplex;
11476                 cmd->lp_advertising = tp->link_config.rmt_adv;
11477                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11478                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11479                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11480                         else
11481                                 cmd->eth_tp_mdix = ETH_TP_MDI;
11482                 }
11483         } else {
11484                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11485                 cmd->duplex = DUPLEX_UNKNOWN;
11486                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11487         }
11488         cmd->phy_address = tp->phy_addr;
11489         cmd->transceiver = XCVR_INTERNAL;
11490         cmd->autoneg = tp->link_config.autoneg;
11491         cmd->maxtxpkt = 0;
11492         cmd->maxrxpkt = 0;
11493         return 0;
11494 }
11495
11496 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11497 {
11498         struct tg3 *tp = netdev_priv(dev);
11499         u32 speed = ethtool_cmd_speed(cmd);
11500
11501         if (tg3_flag(tp, USE_PHYLIB)) {
11502                 struct phy_device *phydev;
11503                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11504                         return -EAGAIN;
11505                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11506                 return phy_ethtool_sset(phydev, cmd);
11507         }
11508
11509         if (cmd->autoneg != AUTONEG_ENABLE &&
11510             cmd->autoneg != AUTONEG_DISABLE)
11511                 return -EINVAL;
11512
11513         if (cmd->autoneg == AUTONEG_DISABLE &&
11514             cmd->duplex != DUPLEX_FULL &&
11515             cmd->duplex != DUPLEX_HALF)
11516                 return -EINVAL;
11517
11518         if (cmd->autoneg == AUTONEG_ENABLE) {
11519                 u32 mask = ADVERTISED_Autoneg |
11520                            ADVERTISED_Pause |
11521                            ADVERTISED_Asym_Pause;
11522
11523                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11524                         mask |= ADVERTISED_1000baseT_Half |
11525                                 ADVERTISED_1000baseT_Full;
11526
11527                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11528                         mask |= ADVERTISED_100baseT_Half |
11529                                 ADVERTISED_100baseT_Full |
11530                                 ADVERTISED_10baseT_Half |
11531                                 ADVERTISED_10baseT_Full |
11532                                 ADVERTISED_TP;
11533                 else
11534                         mask |= ADVERTISED_FIBRE;
11535
11536                 if (cmd->advertising & ~mask)
11537                         return -EINVAL;
11538
11539                 mask &= (ADVERTISED_1000baseT_Half |
11540                          ADVERTISED_1000baseT_Full |
11541                          ADVERTISED_100baseT_Half |
11542                          ADVERTISED_100baseT_Full |
11543                          ADVERTISED_10baseT_Half |
11544                          ADVERTISED_10baseT_Full);
11545
11546                 cmd->advertising &= mask;
11547         } else {
11548                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11549                         if (speed != SPEED_1000)
11550                                 return -EINVAL;
11551
11552                         if (cmd->duplex != DUPLEX_FULL)
11553                                 return -EINVAL;
11554                 } else {
11555                         if (speed != SPEED_100 &&
11556                             speed != SPEED_10)
11557                                 return -EINVAL;
11558                 }
11559         }
11560
11561         tg3_full_lock(tp, 0);
11562
11563         tp->link_config.autoneg = cmd->autoneg;
11564         if (cmd->autoneg == AUTONEG_ENABLE) {
11565                 tp->link_config.advertising = (cmd->advertising |
11566                                               ADVERTISED_Autoneg);
11567                 tp->link_config.speed = SPEED_UNKNOWN;
11568                 tp->link_config.duplex = DUPLEX_UNKNOWN;
11569         } else {
11570                 tp->link_config.advertising = 0;
11571                 tp->link_config.speed = speed;
11572                 tp->link_config.duplex = cmd->duplex;
11573         }
11574
11575         tg3_warn_mgmt_link_flap(tp);
11576
11577         if (netif_running(dev))
11578                 tg3_setup_phy(tp, 1);
11579
11580         tg3_full_unlock(tp);
11581
11582         return 0;
11583 }
11584
11585 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11586 {
11587         struct tg3 *tp = netdev_priv(dev);
11588
11589         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11590         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11591         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11592         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11593 }
11594
11595 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11596 {
11597         struct tg3 *tp = netdev_priv(dev);
11598
11599         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11600                 wol->supported = WAKE_MAGIC;
11601         else
11602                 wol->supported = 0;
11603         wol->wolopts = 0;
11604         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11605                 wol->wolopts = WAKE_MAGIC;
11606         memset(&wol->sopass, 0, sizeof(wol->sopass));
11607 }
11608
11609 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11610 {
11611         struct tg3 *tp = netdev_priv(dev);
11612         struct device *dp = &tp->pdev->dev;
11613
11614         if (wol->wolopts & ~WAKE_MAGIC)
11615                 return -EINVAL;
11616         if ((wol->wolopts & WAKE_MAGIC) &&
11617             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11618                 return -EINVAL;
11619
11620         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11621
11622         spin_lock_bh(&tp->lock);
11623         if (device_may_wakeup(dp))
11624                 tg3_flag_set(tp, WOL_ENABLE);
11625         else
11626                 tg3_flag_clear(tp, WOL_ENABLE);
11627         spin_unlock_bh(&tp->lock);
11628
11629         return 0;
11630 }
11631
11632 static u32 tg3_get_msglevel(struct net_device *dev)
11633 {
11634         struct tg3 *tp = netdev_priv(dev);
11635         return tp->msg_enable;
11636 }
11637
11638 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11639 {
11640         struct tg3 *tp = netdev_priv(dev);
11641         tp->msg_enable = value;
11642 }
11643
11644 static int tg3_nway_reset(struct net_device *dev)
11645 {
11646         struct tg3 *tp = netdev_priv(dev);
11647         int r;
11648
11649         if (!netif_running(dev))
11650                 return -EAGAIN;
11651
11652         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11653                 return -EINVAL;
11654
11655         tg3_warn_mgmt_link_flap(tp);
11656
11657         if (tg3_flag(tp, USE_PHYLIB)) {
11658                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11659                         return -EAGAIN;
11660                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11661         } else {
11662                 u32 bmcr;
11663
11664                 spin_lock_bh(&tp->lock);
11665                 r = -EINVAL;
11666                 tg3_readphy(tp, MII_BMCR, &bmcr);
11667                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11668                     ((bmcr & BMCR_ANENABLE) ||
11669                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11670                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11671                                                    BMCR_ANENABLE);
11672                         r = 0;
11673                 }
11674                 spin_unlock_bh(&tp->lock);
11675         }
11676
11677         return r;
11678 }
11679
11680 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11681 {
11682         struct tg3 *tp = netdev_priv(dev);
11683
11684         ering->rx_max_pending = tp->rx_std_ring_mask;
11685         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11686                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11687         else
11688                 ering->rx_jumbo_max_pending = 0;
11689
11690         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11691
11692         ering->rx_pending = tp->rx_pending;
11693         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11694                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11695         else
11696                 ering->rx_jumbo_pending = 0;
11697
11698         ering->tx_pending = tp->napi[0].tx_pending;
11699 }
11700
11701 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11702 {
11703         struct tg3 *tp = netdev_priv(dev);
11704         int i, irq_sync = 0, err = 0;
11705
11706         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11707             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11708             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11709             (ering->tx_pending <= MAX_SKB_FRAGS) ||
11710             (tg3_flag(tp, TSO_BUG) &&
11711              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11712                 return -EINVAL;
11713
11714         if (netif_running(dev)) {
11715                 tg3_phy_stop(tp);
11716                 tg3_netif_stop(tp);
11717                 irq_sync = 1;
11718         }
11719
11720         tg3_full_lock(tp, irq_sync);
11721
11722         tp->rx_pending = ering->rx_pending;
11723
11724         if (tg3_flag(tp, MAX_RXPEND_64) &&
11725             tp->rx_pending > 63)
11726                 tp->rx_pending = 63;
11727         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11728
11729         for (i = 0; i < tp->irq_max; i++)
11730                 tp->napi[i].tx_pending = ering->tx_pending;
11731
11732         if (netif_running(dev)) {
11733                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11734                 err = tg3_restart_hw(tp, 0);
11735                 if (!err)
11736                         tg3_netif_start(tp);
11737         }
11738
11739         tg3_full_unlock(tp);
11740
11741         if (irq_sync && !err)
11742                 tg3_phy_start(tp);
11743
11744         return err;
11745 }
11746
11747 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11748 {
11749         struct tg3 *tp = netdev_priv(dev);
11750
11751         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11752
11753         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11754                 epause->rx_pause = 1;
11755         else
11756                 epause->rx_pause = 0;
11757
11758         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11759                 epause->tx_pause = 1;
11760         else
11761                 epause->tx_pause = 0;
11762 }
11763
11764 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11765 {
11766         struct tg3 *tp = netdev_priv(dev);
11767         int err = 0;
11768
11769         if (tp->link_config.autoneg == AUTONEG_ENABLE)
11770                 tg3_warn_mgmt_link_flap(tp);
11771
11772         if (tg3_flag(tp, USE_PHYLIB)) {
11773                 u32 newadv;
11774                 struct phy_device *phydev;
11775
11776                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11777
11778                 if (!(phydev->supported & SUPPORTED_Pause) ||
11779                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11780                      (epause->rx_pause != epause->tx_pause)))
11781                         return -EINVAL;
11782
11783                 tp->link_config.flowctrl = 0;
11784                 if (epause->rx_pause) {
11785                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11786
11787                         if (epause->tx_pause) {
11788                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11789                                 newadv = ADVERTISED_Pause;
11790                         } else
11791                                 newadv = ADVERTISED_Pause |
11792                                          ADVERTISED_Asym_Pause;
11793                 } else if (epause->tx_pause) {
11794                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11795                         newadv = ADVERTISED_Asym_Pause;
11796                 } else
11797                         newadv = 0;
11798
11799                 if (epause->autoneg)
11800                         tg3_flag_set(tp, PAUSE_AUTONEG);
11801                 else
11802                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11803
11804                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11805                         u32 oldadv = phydev->advertising &
11806                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11807                         if (oldadv != newadv) {
11808                                 phydev->advertising &=
11809                                         ~(ADVERTISED_Pause |
11810                                           ADVERTISED_Asym_Pause);
11811                                 phydev->advertising |= newadv;
11812                                 if (phydev->autoneg) {
11813                                         /*
11814                                          * Always renegotiate the link to
11815                                          * inform our link partner of our
11816                                          * flow control settings, even if the
11817                                          * flow control is forced.  Let
11818                                          * tg3_adjust_link() do the final
11819                                          * flow control setup.
11820                                          */
11821                                         return phy_start_aneg(phydev);
11822                                 }
11823                         }
11824
11825                         if (!epause->autoneg)
11826                                 tg3_setup_flow_control(tp, 0, 0);
11827                 } else {
11828                         tp->link_config.advertising &=
11829                                         ~(ADVERTISED_Pause |
11830                                           ADVERTISED_Asym_Pause);
11831                         tp->link_config.advertising |= newadv;
11832                 }
11833         } else {
11834                 int irq_sync = 0;
11835
11836                 if (netif_running(dev)) {
11837                         tg3_netif_stop(tp);
11838                         irq_sync = 1;
11839                 }
11840
11841                 tg3_full_lock(tp, irq_sync);
11842
11843                 if (epause->autoneg)
11844                         tg3_flag_set(tp, PAUSE_AUTONEG);
11845                 else
11846                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11847                 if (epause->rx_pause)
11848                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11849                 else
11850                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11851                 if (epause->tx_pause)
11852                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11853                 else
11854                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11855
11856                 if (netif_running(dev)) {
11857                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11858                         err = tg3_restart_hw(tp, 0);
11859                         if (!err)
11860                                 tg3_netif_start(tp);
11861                 }
11862
11863                 tg3_full_unlock(tp);
11864         }
11865
11866         return err;
11867 }
11868
11869 static int tg3_get_sset_count(struct net_device *dev, int sset)
11870 {
11871         switch (sset) {
11872         case ETH_SS_TEST:
11873                 return TG3_NUM_TEST;
11874         case ETH_SS_STATS:
11875                 return TG3_NUM_STATS;
11876         default:
11877                 return -EOPNOTSUPP;
11878         }
11879 }
11880
11881 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11882                          u32 *rules __always_unused)
11883 {
11884         struct tg3 *tp = netdev_priv(dev);
11885
11886         if (!tg3_flag(tp, SUPPORT_MSIX))
11887                 return -EOPNOTSUPP;
11888
11889         switch (info->cmd) {
11890         case ETHTOOL_GRXRINGS:
11891                 if (netif_running(tp->dev))
11892                         info->data = tp->rxq_cnt;
11893                 else {
11894                         info->data = num_online_cpus();
11895                         if (info->data > TG3_RSS_MAX_NUM_QS)
11896                                 info->data = TG3_RSS_MAX_NUM_QS;
11897                 }
11898
11899                 /* The first interrupt vector only
11900                  * handles link interrupts.
11901                  */
11902                 info->data -= 1;
11903                 return 0;
11904
11905         default:
11906                 return -EOPNOTSUPP;
11907         }
11908 }
11909
11910 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11911 {
11912         u32 size = 0;
11913         struct tg3 *tp = netdev_priv(dev);
11914
11915         if (tg3_flag(tp, SUPPORT_MSIX))
11916                 size = TG3_RSS_INDIR_TBL_SIZE;
11917
11918         return size;
11919 }
11920
11921 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11922 {
11923         struct tg3 *tp = netdev_priv(dev);
11924         int i;
11925
11926         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11927                 indir[i] = tp->rss_ind_tbl[i];
11928
11929         return 0;
11930 }
11931
11932 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11933 {
11934         struct tg3 *tp = netdev_priv(dev);
11935         size_t i;
11936
11937         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11938                 tp->rss_ind_tbl[i] = indir[i];
11939
11940         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11941                 return 0;
11942
11943         /* It is legal to write the indirection
11944          * table while the device is running.
11945          */
11946         tg3_full_lock(tp, 0);
11947         tg3_rss_write_indir_tbl(tp);
11948         tg3_full_unlock(tp);
11949
11950         return 0;
11951 }
11952
11953 static void tg3_get_channels(struct net_device *dev,
11954                              struct ethtool_channels *channel)
11955 {
11956         struct tg3 *tp = netdev_priv(dev);
11957         u32 deflt_qs = netif_get_num_default_rss_queues();
11958
11959         channel->max_rx = tp->rxq_max;
11960         channel->max_tx = tp->txq_max;
11961
11962         if (netif_running(dev)) {
11963                 channel->rx_count = tp->rxq_cnt;
11964                 channel->tx_count = tp->txq_cnt;
11965         } else {
11966                 if (tp->rxq_req)
11967                         channel->rx_count = tp->rxq_req;
11968                 else
11969                         channel->rx_count = min(deflt_qs, tp->rxq_max);
11970
11971                 if (tp->txq_req)
11972                         channel->tx_count = tp->txq_req;
11973                 else
11974                         channel->tx_count = min(deflt_qs, tp->txq_max);
11975         }
11976 }
11977
11978 static int tg3_set_channels(struct net_device *dev,
11979                             struct ethtool_channels *channel)
11980 {
11981         struct tg3 *tp = netdev_priv(dev);
11982
11983         if (!tg3_flag(tp, SUPPORT_MSIX))
11984                 return -EOPNOTSUPP;
11985
11986         if (channel->rx_count > tp->rxq_max ||
11987             channel->tx_count > tp->txq_max)
11988                 return -EINVAL;
11989
11990         tp->rxq_req = channel->rx_count;
11991         tp->txq_req = channel->tx_count;
11992
11993         if (!netif_running(dev))
11994                 return 0;
11995
11996         tg3_stop(tp);
11997
11998         tg3_carrier_off(tp);
11999
12000         tg3_start(tp, true, false, false);
12001
12002         return 0;
12003 }
12004
12005 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12006 {
12007         switch (stringset) {
12008         case ETH_SS_STATS:
12009                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12010                 break;
12011         case ETH_SS_TEST:
12012                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12013                 break;
12014         default:
12015                 WARN_ON(1);     /* we need a WARN() */
12016                 break;
12017         }
12018 }
12019
12020 static int tg3_set_phys_id(struct net_device *dev,
12021                             enum ethtool_phys_id_state state)
12022 {
12023         struct tg3 *tp = netdev_priv(dev);
12024
12025         if (!netif_running(tp->dev))
12026                 return -EAGAIN;
12027
12028         switch (state) {
12029         case ETHTOOL_ID_ACTIVE:
12030                 return 1;       /* cycle on/off once per second */
12031
12032         case ETHTOOL_ID_ON:
12033                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12034                      LED_CTRL_1000MBPS_ON |
12035                      LED_CTRL_100MBPS_ON |
12036                      LED_CTRL_10MBPS_ON |
12037                      LED_CTRL_TRAFFIC_OVERRIDE |
12038                      LED_CTRL_TRAFFIC_BLINK |
12039                      LED_CTRL_TRAFFIC_LED);
12040                 break;
12041
12042         case ETHTOOL_ID_OFF:
12043                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12044                      LED_CTRL_TRAFFIC_OVERRIDE);
12045                 break;
12046
12047         case ETHTOOL_ID_INACTIVE:
12048                 tw32(MAC_LED_CTRL, tp->led_ctrl);
12049                 break;
12050         }
12051
12052         return 0;
12053 }
12054
12055 static void tg3_get_ethtool_stats(struct net_device *dev,
12056                                    struct ethtool_stats *estats, u64 *tmp_stats)
12057 {
12058         struct tg3 *tp = netdev_priv(dev);
12059
12060         if (tp->hw_stats)
12061                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12062         else
12063                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12064 }
12065
12066 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12067 {
12068         int i;
12069         __be32 *buf;
12070         u32 offset = 0, len = 0;
12071         u32 magic, val;
12072
12073         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12074                 return NULL;
12075
12076         if (magic == TG3_EEPROM_MAGIC) {
12077                 for (offset = TG3_NVM_DIR_START;
12078                      offset < TG3_NVM_DIR_END;
12079                      offset += TG3_NVM_DIRENT_SIZE) {
12080                         if (tg3_nvram_read(tp, offset, &val))
12081                                 return NULL;
12082
12083                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12084                             TG3_NVM_DIRTYPE_EXTVPD)
12085                                 break;
12086                 }
12087
12088                 if (offset != TG3_NVM_DIR_END) {
12089                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12090                         if (tg3_nvram_read(tp, offset + 4, &offset))
12091                                 return NULL;
12092
12093                         offset = tg3_nvram_logical_addr(tp, offset);
12094                 }
12095         }
12096
12097         if (!offset || !len) {
12098                 offset = TG3_NVM_VPD_OFF;
12099                 len = TG3_NVM_VPD_LEN;
12100         }
12101
12102         buf = kmalloc(len, GFP_KERNEL);
12103         if (buf == NULL)
12104                 return NULL;
12105
12106         if (magic == TG3_EEPROM_MAGIC) {
12107                 for (i = 0; i < len; i += 4) {
12108                         /* The data is in little-endian format in NVRAM.
12109                          * Use the big-endian read routines to preserve
12110                          * the byte order as it exists in NVRAM.
12111                          */
12112                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12113                                 goto error;
12114                 }
12115         } else {
12116                 u8 *ptr;
12117                 ssize_t cnt;
12118                 unsigned int pos = 0;
12119
12120                 ptr = (u8 *)&buf[0];
12121                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12122                         cnt = pci_read_vpd(tp->pdev, pos,
12123                                            len - pos, ptr);
12124                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
12125                                 cnt = 0;
12126                         else if (cnt < 0)
12127                                 goto error;
12128                 }
12129                 if (pos != len)
12130                         goto error;
12131         }
12132
12133         *vpdlen = len;
12134
12135         return buf;
12136
12137 error:
12138         kfree(buf);
12139         return NULL;
12140 }
12141
12142 #define NVRAM_TEST_SIZE 0x100
12143 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
12144 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
12145 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
12146 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
12147 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
12148 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
12149 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12150 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12151
12152 static int tg3_test_nvram(struct tg3 *tp)
12153 {
12154         u32 csum, magic, len;
12155         __be32 *buf;
12156         int i, j, k, err = 0, size;
12157
12158         if (tg3_flag(tp, NO_NVRAM))
12159                 return 0;
12160
12161         if (tg3_nvram_read(tp, 0, &magic) != 0)
12162                 return -EIO;
12163
12164         if (magic == TG3_EEPROM_MAGIC)
12165                 size = NVRAM_TEST_SIZE;
12166         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12167                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12168                     TG3_EEPROM_SB_FORMAT_1) {
12169                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12170                         case TG3_EEPROM_SB_REVISION_0:
12171                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12172                                 break;
12173                         case TG3_EEPROM_SB_REVISION_2:
12174                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12175                                 break;
12176                         case TG3_EEPROM_SB_REVISION_3:
12177                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12178                                 break;
12179                         case TG3_EEPROM_SB_REVISION_4:
12180                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12181                                 break;
12182                         case TG3_EEPROM_SB_REVISION_5:
12183                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12184                                 break;
12185                         case TG3_EEPROM_SB_REVISION_6:
12186                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12187                                 break;
12188                         default:
12189                                 return -EIO;
12190                         }
12191                 } else
12192                         return 0;
12193         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12194                 size = NVRAM_SELFBOOT_HW_SIZE;
12195         else
12196                 return -EIO;
12197
12198         buf = kmalloc(size, GFP_KERNEL);
12199         if (buf == NULL)
12200                 return -ENOMEM;
12201
12202         err = -EIO;
12203         for (i = 0, j = 0; i < size; i += 4, j++) {
12204                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12205                 if (err)
12206                         break;
12207         }
12208         if (i < size)
12209                 goto out;
12210
12211         /* Selfboot format */
12212         magic = be32_to_cpu(buf[0]);
12213         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12214             TG3_EEPROM_MAGIC_FW) {
12215                 u8 *buf8 = (u8 *) buf, csum8 = 0;
12216
12217                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12218                     TG3_EEPROM_SB_REVISION_2) {
12219                         /* For rev 2, the csum doesn't include the MBA. */
12220                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12221                                 csum8 += buf8[i];
12222                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12223                                 csum8 += buf8[i];
12224                 } else {
12225                         for (i = 0; i < size; i++)
12226                                 csum8 += buf8[i];
12227                 }
12228
12229                 if (csum8 == 0) {
12230                         err = 0;
12231                         goto out;
12232                 }
12233
12234                 err = -EIO;
12235                 goto out;
12236         }
12237
12238         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12239             TG3_EEPROM_MAGIC_HW) {
12240                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12241                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12242                 u8 *buf8 = (u8 *) buf;
12243
12244                 /* Separate the parity bits and the data bytes.  */
12245                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12246                         if ((i == 0) || (i == 8)) {
12247                                 int l;
12248                                 u8 msk;
12249
12250                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12251                                         parity[k++] = buf8[i] & msk;
12252                                 i++;
12253                         } else if (i == 16) {
12254                                 int l;
12255                                 u8 msk;
12256
12257                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12258                                         parity[k++] = buf8[i] & msk;
12259                                 i++;
12260
12261                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12262                                         parity[k++] = buf8[i] & msk;
12263                                 i++;
12264                         }
12265                         data[j++] = buf8[i];
12266                 }
12267
12268                 err = -EIO;
12269                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12270                         u8 hw8 = hweight8(data[i]);
12271
12272                         if ((hw8 & 0x1) && parity[i])
12273                                 goto out;
12274                         else if (!(hw8 & 0x1) && !parity[i])
12275                                 goto out;
12276                 }
12277                 err = 0;
12278                 goto out;
12279         }
12280
12281         err = -EIO;
12282
12283         /* Bootstrap checksum at offset 0x10 */
12284         csum = calc_crc((unsigned char *) buf, 0x10);
12285         if (csum != le32_to_cpu(buf[0x10/4]))
12286                 goto out;
12287
12288         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12289         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12290         if (csum != le32_to_cpu(buf[0xfc/4]))
12291                 goto out;
12292
12293         kfree(buf);
12294
12295         buf = tg3_vpd_readblock(tp, &len);
12296         if (!buf)
12297                 return -ENOMEM;
12298
12299         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12300         if (i > 0) {
12301                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12302                 if (j < 0)
12303                         goto out;
12304
12305                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12306                         goto out;
12307
12308                 i += PCI_VPD_LRDT_TAG_SIZE;
12309                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12310                                               PCI_VPD_RO_KEYWORD_CHKSUM);
12311                 if (j > 0) {
12312                         u8 csum8 = 0;
12313
12314                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
12315
12316                         for (i = 0; i <= j; i++)
12317                                 csum8 += ((u8 *)buf)[i];
12318
12319                         if (csum8)
12320                                 goto out;
12321                 }
12322         }
12323
12324         err = 0;
12325
12326 out:
12327         kfree(buf);
12328         return err;
12329 }
12330
12331 #define TG3_SERDES_TIMEOUT_SEC  2
12332 #define TG3_COPPER_TIMEOUT_SEC  6
12333
12334 static int tg3_test_link(struct tg3 *tp)
12335 {
12336         int i, max;
12337
12338         if (!netif_running(tp->dev))
12339                 return -ENODEV;
12340
12341         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12342                 max = TG3_SERDES_TIMEOUT_SEC;
12343         else
12344                 max = TG3_COPPER_TIMEOUT_SEC;
12345
12346         for (i = 0; i < max; i++) {
12347                 if (tp->link_up)
12348                         return 0;
12349
12350                 if (msleep_interruptible(1000))
12351                         break;
12352         }
12353
12354         return -EIO;
12355 }
12356
12357 /* Only test the commonly used registers */
12358 static int tg3_test_registers(struct tg3 *tp)
12359 {
12360         int i, is_5705, is_5750;
12361         u32 offset, read_mask, write_mask, val, save_val, read_val;
12362         static struct {
12363                 u16 offset;
12364                 u16 flags;
12365 #define TG3_FL_5705     0x1
12366 #define TG3_FL_NOT_5705 0x2
12367 #define TG3_FL_NOT_5788 0x4
12368 #define TG3_FL_NOT_5750 0x8
12369                 u32 read_mask;
12370                 u32 write_mask;
12371         } reg_tbl[] = {
12372                 /* MAC Control Registers */
12373                 { MAC_MODE, TG3_FL_NOT_5705,
12374                         0x00000000, 0x00ef6f8c },
12375                 { MAC_MODE, TG3_FL_5705,
12376                         0x00000000, 0x01ef6b8c },
12377                 { MAC_STATUS, TG3_FL_NOT_5705,
12378                         0x03800107, 0x00000000 },
12379                 { MAC_STATUS, TG3_FL_5705,
12380                         0x03800100, 0x00000000 },
12381                 { MAC_ADDR_0_HIGH, 0x0000,
12382                         0x00000000, 0x0000ffff },
12383                 { MAC_ADDR_0_LOW, 0x0000,
12384                         0x00000000, 0xffffffff },
12385                 { MAC_RX_MTU_SIZE, 0x0000,
12386                         0x00000000, 0x0000ffff },
12387                 { MAC_TX_MODE, 0x0000,
12388                         0x00000000, 0x00000070 },
12389                 { MAC_TX_LENGTHS, 0x0000,
12390                         0x00000000, 0x00003fff },
12391                 { MAC_RX_MODE, TG3_FL_NOT_5705,
12392                         0x00000000, 0x000007fc },
12393                 { MAC_RX_MODE, TG3_FL_5705,
12394                         0x00000000, 0x000007dc },
12395                 { MAC_HASH_REG_0, 0x0000,
12396                         0x00000000, 0xffffffff },
12397                 { MAC_HASH_REG_1, 0x0000,
12398                         0x00000000, 0xffffffff },
12399                 { MAC_HASH_REG_2, 0x0000,
12400                         0x00000000, 0xffffffff },
12401                 { MAC_HASH_REG_3, 0x0000,
12402                         0x00000000, 0xffffffff },
12403
12404                 /* Receive Data and Receive BD Initiator Control Registers. */
12405                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12406                         0x00000000, 0xffffffff },
12407                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12408                         0x00000000, 0xffffffff },
12409                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12410                         0x00000000, 0x00000003 },
12411                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12412                         0x00000000, 0xffffffff },
12413                 { RCVDBDI_STD_BD+0, 0x0000,
12414                         0x00000000, 0xffffffff },
12415                 { RCVDBDI_STD_BD+4, 0x0000,
12416                         0x00000000, 0xffffffff },
12417                 { RCVDBDI_STD_BD+8, 0x0000,
12418                         0x00000000, 0xffff0002 },
12419                 { RCVDBDI_STD_BD+0xc, 0x0000,
12420                         0x00000000, 0xffffffff },
12421
12422                 /* Receive BD Initiator Control Registers. */
12423                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12424                         0x00000000, 0xffffffff },
12425                 { RCVBDI_STD_THRESH, TG3_FL_5705,
12426                         0x00000000, 0x000003ff },
12427                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12428                         0x00000000, 0xffffffff },
12429
12430                 /* Host Coalescing Control Registers. */
12431                 { HOSTCC_MODE, TG3_FL_NOT_5705,
12432                         0x00000000, 0x00000004 },
12433                 { HOSTCC_MODE, TG3_FL_5705,
12434                         0x00000000, 0x000000f6 },
12435                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12436                         0x00000000, 0xffffffff },
12437                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12438                         0x00000000, 0x000003ff },
12439                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12440                         0x00000000, 0xffffffff },
12441                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12442                         0x00000000, 0x000003ff },
12443                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12444                         0x00000000, 0xffffffff },
12445                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12446                         0x00000000, 0x000000ff },
12447                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12448                         0x00000000, 0xffffffff },
12449                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12450                         0x00000000, 0x000000ff },
12451                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12452                         0x00000000, 0xffffffff },
12453                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12454                         0x00000000, 0xffffffff },
12455                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12456                         0x00000000, 0xffffffff },
12457                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12458                         0x00000000, 0x000000ff },
12459                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12460                         0x00000000, 0xffffffff },
12461                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12462                         0x00000000, 0x000000ff },
12463                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12464                         0x00000000, 0xffffffff },
12465                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12466                         0x00000000, 0xffffffff },
12467                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12468                         0x00000000, 0xffffffff },
12469                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12470                         0x00000000, 0xffffffff },
12471                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12472                         0x00000000, 0xffffffff },
12473                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12474                         0xffffffff, 0x00000000 },
12475                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12476                         0xffffffff, 0x00000000 },
12477
12478                 /* Buffer Manager Control Registers. */
12479                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12480                         0x00000000, 0x007fff80 },
12481                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12482                         0x00000000, 0x007fffff },
12483                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12484                         0x00000000, 0x0000003f },
12485                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12486                         0x00000000, 0x000001ff },
12487                 { BUFMGR_MB_HIGH_WATER, 0x0000,
12488                         0x00000000, 0x000001ff },
12489                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12490                         0xffffffff, 0x00000000 },
12491                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12492                         0xffffffff, 0x00000000 },
12493
12494                 /* Mailbox Registers */
12495                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12496                         0x00000000, 0x000001ff },
12497                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12498                         0x00000000, 0x000001ff },
12499                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12500                         0x00000000, 0x000007ff },
12501                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12502                         0x00000000, 0x000001ff },
12503
12504                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12505         };
12506
12507         is_5705 = is_5750 = 0;
12508         if (tg3_flag(tp, 5705_PLUS)) {
12509                 is_5705 = 1;
12510                 if (tg3_flag(tp, 5750_PLUS))
12511                         is_5750 = 1;
12512         }
12513
12514         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12515                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12516                         continue;
12517
12518                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12519                         continue;
12520
12521                 if (tg3_flag(tp, IS_5788) &&
12522                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
12523                         continue;
12524
12525                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12526                         continue;
12527
12528                 offset = (u32) reg_tbl[i].offset;
12529                 read_mask = reg_tbl[i].read_mask;
12530                 write_mask = reg_tbl[i].write_mask;
12531
12532                 /* Save the original register content */
12533                 save_val = tr32(offset);
12534
12535                 /* Determine the read-only value. */
12536                 read_val = save_val & read_mask;
12537
12538                 /* Write zero to the register, then make sure the read-only bits
12539                  * are not changed and the read/write bits are all zeros.
12540                  */
12541                 tw32(offset, 0);
12542
12543                 val = tr32(offset);
12544
12545                 /* Test the read-only and read/write bits. */
12546                 if (((val & read_mask) != read_val) || (val & write_mask))
12547                         goto out;
12548
12549                 /* Write ones to all the bits defined by RdMask and WrMask, then
12550                  * make sure the read-only bits are not changed and the
12551                  * read/write bits are all ones.
12552                  */
12553                 tw32(offset, read_mask | write_mask);
12554
12555                 val = tr32(offset);
12556
12557                 /* Test the read-only bits. */
12558                 if ((val & read_mask) != read_val)
12559                         goto out;
12560
12561                 /* Test the read/write bits. */
12562                 if ((val & write_mask) != write_mask)
12563                         goto out;
12564
12565                 tw32(offset, save_val);
12566         }
12567
12568         return 0;
12569
12570 out:
12571         if (netif_msg_hw(tp))
12572                 netdev_err(tp->dev,
12573                            "Register test failed at offset %x\n", offset);
12574         tw32(offset, save_val);
12575         return -EIO;
12576 }
12577
12578 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12579 {
12580         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12581         int i;
12582         u32 j;
12583
12584         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12585                 for (j = 0; j < len; j += 4) {
12586                         u32 val;
12587
12588                         tg3_write_mem(tp, offset + j, test_pattern[i]);
12589                         tg3_read_mem(tp, offset + j, &val);
12590                         if (val != test_pattern[i])
12591                                 return -EIO;
12592                 }
12593         }
12594         return 0;
12595 }
12596
12597 static int tg3_test_memory(struct tg3 *tp)
12598 {
12599         static struct mem_entry {
12600                 u32 offset;
12601                 u32 len;
12602         } mem_tbl_570x[] = {
12603                 { 0x00000000, 0x00b50},
12604                 { 0x00002000, 0x1c000},
12605                 { 0xffffffff, 0x00000}
12606         }, mem_tbl_5705[] = {
12607                 { 0x00000100, 0x0000c},
12608                 { 0x00000200, 0x00008},
12609                 { 0x00004000, 0x00800},
12610                 { 0x00006000, 0x01000},
12611                 { 0x00008000, 0x02000},
12612                 { 0x00010000, 0x0e000},
12613                 { 0xffffffff, 0x00000}
12614         }, mem_tbl_5755[] = {
12615                 { 0x00000200, 0x00008},
12616                 { 0x00004000, 0x00800},
12617                 { 0x00006000, 0x00800},
12618                 { 0x00008000, 0x02000},
12619                 { 0x00010000, 0x0c000},
12620                 { 0xffffffff, 0x00000}
12621         }, mem_tbl_5906[] = {
12622                 { 0x00000200, 0x00008},
12623                 { 0x00004000, 0x00400},
12624                 { 0x00006000, 0x00400},
12625                 { 0x00008000, 0x01000},
12626                 { 0x00010000, 0x01000},
12627                 { 0xffffffff, 0x00000}
12628         }, mem_tbl_5717[] = {
12629                 { 0x00000200, 0x00008},
12630                 { 0x00010000, 0x0a000},
12631                 { 0x00020000, 0x13c00},
12632                 { 0xffffffff, 0x00000}
12633         }, mem_tbl_57765[] = {
12634                 { 0x00000200, 0x00008},
12635                 { 0x00004000, 0x00800},
12636                 { 0x00006000, 0x09800},
12637                 { 0x00010000, 0x0a000},
12638                 { 0xffffffff, 0x00000}
12639         };
12640         struct mem_entry *mem_tbl;
12641         int err = 0;
12642         int i;
12643
12644         if (tg3_flag(tp, 5717_PLUS))
12645                 mem_tbl = mem_tbl_5717;
12646         else if (tg3_flag(tp, 57765_CLASS) ||
12647                  tg3_asic_rev(tp) == ASIC_REV_5762)
12648                 mem_tbl = mem_tbl_57765;
12649         else if (tg3_flag(tp, 5755_PLUS))
12650                 mem_tbl = mem_tbl_5755;
12651         else if (tg3_asic_rev(tp) == ASIC_REV_5906)
12652                 mem_tbl = mem_tbl_5906;
12653         else if (tg3_flag(tp, 5705_PLUS))
12654                 mem_tbl = mem_tbl_5705;
12655         else
12656                 mem_tbl = mem_tbl_570x;
12657
12658         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12659                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12660                 if (err)
12661                         break;
12662         }
12663
12664         return err;
12665 }
12666
12667 #define TG3_TSO_MSS             500
12668
12669 #define TG3_TSO_IP_HDR_LEN      20
12670 #define TG3_TSO_TCP_HDR_LEN     20
12671 #define TG3_TSO_TCP_OPT_LEN     12
12672
12673 static const u8 tg3_tso_header[] = {
12674 0x08, 0x00,
12675 0x45, 0x00, 0x00, 0x00,
12676 0x00, 0x00, 0x40, 0x00,
12677 0x40, 0x06, 0x00, 0x00,
12678 0x0a, 0x00, 0x00, 0x01,
12679 0x0a, 0x00, 0x00, 0x02,
12680 0x0d, 0x00, 0xe0, 0x00,
12681 0x00, 0x00, 0x01, 0x00,
12682 0x00, 0x00, 0x02, 0x00,
12683 0x80, 0x10, 0x10, 0x00,
12684 0x14, 0x09, 0x00, 0x00,
12685 0x01, 0x01, 0x08, 0x0a,
12686 0x11, 0x11, 0x11, 0x11,
12687 0x11, 0x11, 0x11, 0x11,
12688 };
12689
12690 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12691 {
12692         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12693         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12694         u32 budget;
12695         struct sk_buff *skb;
12696         u8 *tx_data, *rx_data;
12697         dma_addr_t map;
12698         int num_pkts, tx_len, rx_len, i, err;
12699         struct tg3_rx_buffer_desc *desc;
12700         struct tg3_napi *tnapi, *rnapi;
12701         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12702
12703         tnapi = &tp->napi[0];
12704         rnapi = &tp->napi[0];
12705         if (tp->irq_cnt > 1) {
12706                 if (tg3_flag(tp, ENABLE_RSS))
12707                         rnapi = &tp->napi[1];
12708                 if (tg3_flag(tp, ENABLE_TSS))
12709                         tnapi = &tp->napi[1];
12710         }
12711         coal_now = tnapi->coal_now | rnapi->coal_now;
12712
12713         err = -EIO;
12714
12715         tx_len = pktsz;
12716         skb = netdev_alloc_skb(tp->dev, tx_len);
12717         if (!skb)
12718                 return -ENOMEM;
12719
12720         tx_data = skb_put(skb, tx_len);
12721         memcpy(tx_data, tp->dev->dev_addr, 6);
12722         memset(tx_data + 6, 0x0, 8);
12723
12724         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12725
12726         if (tso_loopback) {
12727                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12728
12729                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12730                               TG3_TSO_TCP_OPT_LEN;
12731
12732                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12733                        sizeof(tg3_tso_header));
12734                 mss = TG3_TSO_MSS;
12735
12736                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12737                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12738
12739                 /* Set the total length field in the IP header */
12740                 iph->tot_len = htons((u16)(mss + hdr_len));
12741
12742                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12743                               TXD_FLAG_CPU_POST_DMA);
12744
12745                 if (tg3_flag(tp, HW_TSO_1) ||
12746                     tg3_flag(tp, HW_TSO_2) ||
12747                     tg3_flag(tp, HW_TSO_3)) {
12748                         struct tcphdr *th;
12749                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12750                         th = (struct tcphdr *)&tx_data[val];
12751                         th->check = 0;
12752                 } else
12753                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
12754
12755                 if (tg3_flag(tp, HW_TSO_3)) {
12756                         mss |= (hdr_len & 0xc) << 12;
12757                         if (hdr_len & 0x10)
12758                                 base_flags |= 0x00000010;
12759                         base_flags |= (hdr_len & 0x3e0) << 5;
12760                 } else if (tg3_flag(tp, HW_TSO_2))
12761                         mss |= hdr_len << 9;
12762                 else if (tg3_flag(tp, HW_TSO_1) ||
12763                          tg3_asic_rev(tp) == ASIC_REV_5705) {
12764                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12765                 } else {
12766                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12767                 }
12768
12769                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12770         } else {
12771                 num_pkts = 1;
12772                 data_off = ETH_HLEN;
12773
12774                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12775                     tx_len > VLAN_ETH_FRAME_LEN)
12776                         base_flags |= TXD_FLAG_JMB_PKT;
12777         }
12778
12779         for (i = data_off; i < tx_len; i++)
12780                 tx_data[i] = (u8) (i & 0xff);
12781
12782         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12783         if (pci_dma_mapping_error(tp->pdev, map)) {
12784                 dev_kfree_skb(skb);
12785                 return -EIO;
12786         }
12787
12788         val = tnapi->tx_prod;
12789         tnapi->tx_buffers[val].skb = skb;
12790         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12791
12792         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12793                rnapi->coal_now);
12794
12795         udelay(10);
12796
12797         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12798
12799         budget = tg3_tx_avail(tnapi);
12800         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12801                             base_flags | TXD_FLAG_END, mss, 0)) {
12802                 tnapi->tx_buffers[val].skb = NULL;
12803                 dev_kfree_skb(skb);
12804                 return -EIO;
12805         }
12806
12807         tnapi->tx_prod++;
12808
12809         /* Sync BD data before updating mailbox */
12810         wmb();
12811
12812         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12813         tr32_mailbox(tnapi->prodmbox);
12814
12815         udelay(10);
12816
12817         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
12818         for (i = 0; i < 35; i++) {
12819                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12820                        coal_now);
12821
12822                 udelay(10);
12823
12824                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12825                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
12826                 if ((tx_idx == tnapi->tx_prod) &&
12827                     (rx_idx == (rx_start_idx + num_pkts)))
12828                         break;
12829         }
12830
12831         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12832         dev_kfree_skb(skb);
12833
12834         if (tx_idx != tnapi->tx_prod)
12835                 goto out;
12836
12837         if (rx_idx != rx_start_idx + num_pkts)
12838                 goto out;
12839
12840         val = data_off;
12841         while (rx_idx != rx_start_idx) {
12842                 desc = &rnapi->rx_rcb[rx_start_idx++];
12843                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12844                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12845
12846                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12847                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12848                         goto out;
12849
12850                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12851                          - ETH_FCS_LEN;
12852
12853                 if (!tso_loopback) {
12854                         if (rx_len != tx_len)
12855                                 goto out;
12856
12857                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12858                                 if (opaque_key != RXD_OPAQUE_RING_STD)
12859                                         goto out;
12860                         } else {
12861                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12862                                         goto out;
12863                         }
12864                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12865                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12866                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
12867                         goto out;
12868                 }
12869
12870                 if (opaque_key == RXD_OPAQUE_RING_STD) {
12871                         rx_data = tpr->rx_std_buffers[desc_idx].data;
12872                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12873                                              mapping);
12874                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12875                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12876                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12877                                              mapping);
12878                 } else
12879                         goto out;
12880
12881                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12882                                             PCI_DMA_FROMDEVICE);
12883
12884                 rx_data += TG3_RX_OFFSET(tp);
12885                 for (i = data_off; i < rx_len; i++, val++) {
12886                         if (*(rx_data + i) != (u8) (val & 0xff))
12887                                 goto out;
12888                 }
12889         }
12890
12891         err = 0;
12892
12893         /* tg3_free_rings will unmap and free the rx_data */
12894 out:
12895         return err;
12896 }
12897
12898 #define TG3_STD_LOOPBACK_FAILED         1
12899 #define TG3_JMB_LOOPBACK_FAILED         2
12900 #define TG3_TSO_LOOPBACK_FAILED         4
12901 #define TG3_LOOPBACK_FAILED \
12902         (TG3_STD_LOOPBACK_FAILED | \
12903          TG3_JMB_LOOPBACK_FAILED | \
12904          TG3_TSO_LOOPBACK_FAILED)
12905
12906 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12907 {
12908         int err = -EIO;
12909         u32 eee_cap;
12910         u32 jmb_pkt_sz = 9000;
12911
12912         if (tp->dma_limit)
12913                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12914
12915         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12916         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12917
12918         if (!netif_running(tp->dev)) {
12919                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12920                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12921                 if (do_extlpbk)
12922                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12923                 goto done;
12924         }
12925
12926         err = tg3_reset_hw(tp, 1);
12927         if (err) {
12928                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12929                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12930                 if (do_extlpbk)
12931                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12932                 goto done;
12933         }
12934
12935         if (tg3_flag(tp, ENABLE_RSS)) {
12936                 int i;
12937
12938                 /* Reroute all rx packets to the 1st queue */
12939                 for (i = MAC_RSS_INDIR_TBL_0;
12940                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12941                         tw32(i, 0x0);
12942         }
12943
12944         /* HW errata - mac loopback fails in some cases on 5780.
12945          * Normal traffic and PHY loopback are not affected by
12946          * errata.  Also, the MAC loopback test is deprecated for
12947          * all newer ASIC revisions.
12948          */
12949         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
12950             !tg3_flag(tp, CPMU_PRESENT)) {
12951                 tg3_mac_loopback(tp, true);
12952
12953                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12954                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12955
12956                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12957                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12958                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12959
12960                 tg3_mac_loopback(tp, false);
12961         }
12962
12963         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12964             !tg3_flag(tp, USE_PHYLIB)) {
12965                 int i;
12966
12967                 tg3_phy_lpbk_set(tp, 0, false);
12968
12969                 /* Wait for link */
12970                 for (i = 0; i < 100; i++) {
12971                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12972                                 break;
12973                         mdelay(1);
12974                 }
12975
12976                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12977                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12978                 if (tg3_flag(tp, TSO_CAPABLE) &&
12979                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12980                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
12981                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12982                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12983                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12984
12985                 if (do_extlpbk) {
12986                         tg3_phy_lpbk_set(tp, 0, true);
12987
12988                         /* All link indications report up, but the hardware
12989                          * isn't really ready for about 20 msec.  Double it
12990                          * to be sure.
12991                          */
12992                         mdelay(40);
12993
12994                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12995                                 data[TG3_EXT_LOOPB_TEST] |=
12996                                                         TG3_STD_LOOPBACK_FAILED;
12997                         if (tg3_flag(tp, TSO_CAPABLE) &&
12998                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12999                                 data[TG3_EXT_LOOPB_TEST] |=
13000                                                         TG3_TSO_LOOPBACK_FAILED;
13001                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13002                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13003                                 data[TG3_EXT_LOOPB_TEST] |=
13004                                                         TG3_JMB_LOOPBACK_FAILED;
13005                 }
13006
13007                 /* Re-enable gphy autopowerdown. */
13008                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13009                         tg3_phy_toggle_apd(tp, true);
13010         }
13011
13012         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13013                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13014
13015 done:
13016         tp->phy_flags |= eee_cap;
13017
13018         return err;
13019 }
13020
13021 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13022                           u64 *data)
13023 {
13024         struct tg3 *tp = netdev_priv(dev);
13025         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13026
13027         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
13028             tg3_power_up(tp)) {
13029                 etest->flags |= ETH_TEST_FL_FAILED;
13030                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13031                 return;
13032         }
13033
13034         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13035
13036         if (tg3_test_nvram(tp) != 0) {
13037                 etest->flags |= ETH_TEST_FL_FAILED;
13038                 data[TG3_NVRAM_TEST] = 1;
13039         }
13040         if (!doextlpbk && tg3_test_link(tp)) {
13041                 etest->flags |= ETH_TEST_FL_FAILED;
13042                 data[TG3_LINK_TEST] = 1;
13043         }
13044         if (etest->flags & ETH_TEST_FL_OFFLINE) {
13045                 int err, err2 = 0, irq_sync = 0;
13046
13047                 if (netif_running(dev)) {
13048                         tg3_phy_stop(tp);
13049                         tg3_netif_stop(tp);
13050                         irq_sync = 1;
13051                 }
13052
13053                 tg3_full_lock(tp, irq_sync);
13054                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13055                 err = tg3_nvram_lock(tp);
13056                 tg3_halt_cpu(tp, RX_CPU_BASE);
13057                 if (!tg3_flag(tp, 5705_PLUS))
13058                         tg3_halt_cpu(tp, TX_CPU_BASE);
13059                 if (!err)
13060                         tg3_nvram_unlock(tp);
13061
13062                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13063                         tg3_phy_reset(tp);
13064
13065                 if (tg3_test_registers(tp) != 0) {
13066                         etest->flags |= ETH_TEST_FL_FAILED;
13067                         data[TG3_REGISTER_TEST] = 1;
13068                 }
13069
13070                 if (tg3_test_memory(tp) != 0) {
13071                         etest->flags |= ETH_TEST_FL_FAILED;
13072                         data[TG3_MEMORY_TEST] = 1;
13073                 }
13074
13075                 if (doextlpbk)
13076                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13077
13078                 if (tg3_test_loopback(tp, data, doextlpbk))
13079                         etest->flags |= ETH_TEST_FL_FAILED;
13080
13081                 tg3_full_unlock(tp);
13082
13083                 if (tg3_test_interrupt(tp) != 0) {
13084                         etest->flags |= ETH_TEST_FL_FAILED;
13085                         data[TG3_INTERRUPT_TEST] = 1;
13086                 }
13087
13088                 tg3_full_lock(tp, 0);
13089
13090                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13091                 if (netif_running(dev)) {
13092                         tg3_flag_set(tp, INIT_COMPLETE);
13093                         err2 = tg3_restart_hw(tp, 1);
13094                         if (!err2)
13095                                 tg3_netif_start(tp);
13096                 }
13097
13098                 tg3_full_unlock(tp);
13099
13100                 if (irq_sync && !err2)
13101                         tg3_phy_start(tp);
13102         }
13103         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13104                 tg3_power_down(tp);
13105
13106 }
13107
13108 static int tg3_hwtstamp_ioctl(struct net_device *dev,
13109                               struct ifreq *ifr, int cmd)
13110 {
13111         struct tg3 *tp = netdev_priv(dev);
13112         struct hwtstamp_config stmpconf;
13113
13114         if (!tg3_flag(tp, PTP_CAPABLE))
13115                 return -EINVAL;
13116
13117         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13118                 return -EFAULT;
13119
13120         if (stmpconf.flags)
13121                 return -EINVAL;
13122
13123         switch (stmpconf.tx_type) {
13124         case HWTSTAMP_TX_ON:
13125                 tg3_flag_set(tp, TX_TSTAMP_EN);
13126                 break;
13127         case HWTSTAMP_TX_OFF:
13128                 tg3_flag_clear(tp, TX_TSTAMP_EN);
13129                 break;
13130         default:
13131                 return -ERANGE;
13132         }
13133
13134         switch (stmpconf.rx_filter) {
13135         case HWTSTAMP_FILTER_NONE:
13136                 tp->rxptpctl = 0;
13137                 break;
13138         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13139                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13140                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13141                 break;
13142         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13143                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13144                                TG3_RX_PTP_CTL_SYNC_EVNT;
13145                 break;
13146         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13147                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13148                                TG3_RX_PTP_CTL_DELAY_REQ;
13149                 break;
13150         case HWTSTAMP_FILTER_PTP_V2_EVENT:
13151                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13152                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13153                 break;
13154         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13155                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13156                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13157                 break;
13158         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13159                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13160                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13161                 break;
13162         case HWTSTAMP_FILTER_PTP_V2_SYNC:
13163                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13164                                TG3_RX_PTP_CTL_SYNC_EVNT;
13165                 break;
13166         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13167                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13168                                TG3_RX_PTP_CTL_SYNC_EVNT;
13169                 break;
13170         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13171                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13172                                TG3_RX_PTP_CTL_SYNC_EVNT;
13173                 break;
13174         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13175                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13176                                TG3_RX_PTP_CTL_DELAY_REQ;
13177                 break;
13178         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13179                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13180                                TG3_RX_PTP_CTL_DELAY_REQ;
13181                 break;
13182         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13183                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13184                                TG3_RX_PTP_CTL_DELAY_REQ;
13185                 break;
13186         default:
13187                 return -ERANGE;
13188         }
13189
13190         if (netif_running(dev) && tp->rxptpctl)
13191                 tw32(TG3_RX_PTP_CTL,
13192                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13193
13194         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13195                 -EFAULT : 0;
13196 }
13197
13198 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13199 {
13200         struct mii_ioctl_data *data = if_mii(ifr);
13201         struct tg3 *tp = netdev_priv(dev);
13202         int err;
13203
13204         if (tg3_flag(tp, USE_PHYLIB)) {
13205                 struct phy_device *phydev;
13206                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13207                         return -EAGAIN;
13208                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13209                 return phy_mii_ioctl(phydev, ifr, cmd);
13210         }
13211
13212         switch (cmd) {
13213         case SIOCGMIIPHY:
13214                 data->phy_id = tp->phy_addr;
13215
13216                 /* fallthru */
13217         case SIOCGMIIREG: {
13218                 u32 mii_regval;
13219
13220                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13221                         break;                  /* We have no PHY */
13222
13223                 if (!netif_running(dev))
13224                         return -EAGAIN;
13225
13226                 spin_lock_bh(&tp->lock);
13227                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13228                                     data->reg_num & 0x1f, &mii_regval);
13229                 spin_unlock_bh(&tp->lock);
13230
13231                 data->val_out = mii_regval;
13232
13233                 return err;
13234         }
13235
13236         case SIOCSMIIREG:
13237                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13238                         break;                  /* We have no PHY */
13239
13240                 if (!netif_running(dev))
13241                         return -EAGAIN;
13242
13243                 spin_lock_bh(&tp->lock);
13244                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13245                                      data->reg_num & 0x1f, data->val_in);
13246                 spin_unlock_bh(&tp->lock);
13247
13248                 return err;
13249
13250         case SIOCSHWTSTAMP:
13251                 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13252
13253         default:
13254                 /* do nothing */
13255                 break;
13256         }
13257         return -EOPNOTSUPP;
13258 }
13259
13260 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13261 {
13262         struct tg3 *tp = netdev_priv(dev);
13263
13264         memcpy(ec, &tp->coal, sizeof(*ec));
13265         return 0;
13266 }
13267
13268 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13269 {
13270         struct tg3 *tp = netdev_priv(dev);
13271         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13272         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13273
13274         if (!tg3_flag(tp, 5705_PLUS)) {
13275                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13276                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13277                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13278                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13279         }
13280
13281         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13282             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13283             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13284             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13285             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13286             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13287             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13288             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13289             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13290             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13291                 return -EINVAL;
13292
13293         /* No rx interrupts will be generated if both are zero */
13294         if ((ec->rx_coalesce_usecs == 0) &&
13295             (ec->rx_max_coalesced_frames == 0))
13296                 return -EINVAL;
13297
13298         /* No tx interrupts will be generated if both are zero */
13299         if ((ec->tx_coalesce_usecs == 0) &&
13300             (ec->tx_max_coalesced_frames == 0))
13301                 return -EINVAL;
13302
13303         /* Only copy relevant parameters, ignore all others. */
13304         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13305         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13306         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13307         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13308         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13309         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13310         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13311         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13312         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13313
13314         if (netif_running(dev)) {
13315                 tg3_full_lock(tp, 0);
13316                 __tg3_set_coalesce(tp, &tp->coal);
13317                 tg3_full_unlock(tp);
13318         }
13319         return 0;
13320 }
13321
13322 static const struct ethtool_ops tg3_ethtool_ops = {
13323         .get_settings           = tg3_get_settings,
13324         .set_settings           = tg3_set_settings,
13325         .get_drvinfo            = tg3_get_drvinfo,
13326         .get_regs_len           = tg3_get_regs_len,
13327         .get_regs               = tg3_get_regs,
13328         .get_wol                = tg3_get_wol,
13329         .set_wol                = tg3_set_wol,
13330         .get_msglevel           = tg3_get_msglevel,
13331         .set_msglevel           = tg3_set_msglevel,
13332         .nway_reset             = tg3_nway_reset,
13333         .get_link               = ethtool_op_get_link,
13334         .get_eeprom_len         = tg3_get_eeprom_len,
13335         .get_eeprom             = tg3_get_eeprom,
13336         .set_eeprom             = tg3_set_eeprom,
13337         .get_ringparam          = tg3_get_ringparam,
13338         .set_ringparam          = tg3_set_ringparam,
13339         .get_pauseparam         = tg3_get_pauseparam,
13340         .set_pauseparam         = tg3_set_pauseparam,
13341         .self_test              = tg3_self_test,
13342         .get_strings            = tg3_get_strings,
13343         .set_phys_id            = tg3_set_phys_id,
13344         .get_ethtool_stats      = tg3_get_ethtool_stats,
13345         .get_coalesce           = tg3_get_coalesce,
13346         .set_coalesce           = tg3_set_coalesce,
13347         .get_sset_count         = tg3_get_sset_count,
13348         .get_rxnfc              = tg3_get_rxnfc,
13349         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
13350         .get_rxfh_indir         = tg3_get_rxfh_indir,
13351         .set_rxfh_indir         = tg3_set_rxfh_indir,
13352         .get_channels           = tg3_get_channels,
13353         .set_channels           = tg3_set_channels,
13354         .get_ts_info            = tg3_get_ts_info,
13355 };
13356
13357 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13358                                                 struct rtnl_link_stats64 *stats)
13359 {
13360         struct tg3 *tp = netdev_priv(dev);
13361
13362         spin_lock_bh(&tp->lock);
13363         if (!tp->hw_stats) {
13364                 spin_unlock_bh(&tp->lock);
13365                 return &tp->net_stats_prev;
13366         }
13367
13368         tg3_get_nstats(tp, stats);
13369         spin_unlock_bh(&tp->lock);
13370
13371         return stats;
13372 }
13373
13374 static void tg3_set_rx_mode(struct net_device *dev)
13375 {
13376         struct tg3 *tp = netdev_priv(dev);
13377
13378         if (!netif_running(dev))
13379                 return;
13380
13381         tg3_full_lock(tp, 0);
13382         __tg3_set_rx_mode(dev);
13383         tg3_full_unlock(tp);
13384 }
13385
13386 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13387                                int new_mtu)
13388 {
13389         dev->mtu = new_mtu;
13390
13391         if (new_mtu > ETH_DATA_LEN) {
13392                 if (tg3_flag(tp, 5780_CLASS)) {
13393                         netdev_update_features(dev);
13394                         tg3_flag_clear(tp, TSO_CAPABLE);
13395                 } else {
13396                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
13397                 }
13398         } else {
13399                 if (tg3_flag(tp, 5780_CLASS)) {
13400                         tg3_flag_set(tp, TSO_CAPABLE);
13401                         netdev_update_features(dev);
13402                 }
13403                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13404         }
13405 }
13406
13407 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13408 {
13409         struct tg3 *tp = netdev_priv(dev);
13410         int err, reset_phy = 0;
13411
13412         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13413                 return -EINVAL;
13414
13415         if (!netif_running(dev)) {
13416                 /* We'll just catch it later when the
13417                  * device is up'd.
13418                  */
13419                 tg3_set_mtu(dev, tp, new_mtu);
13420                 return 0;
13421         }
13422
13423         tg3_phy_stop(tp);
13424
13425         tg3_netif_stop(tp);
13426
13427         tg3_full_lock(tp, 1);
13428
13429         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13430
13431         tg3_set_mtu(dev, tp, new_mtu);
13432
13433         /* Reset PHY, otherwise the read DMA engine will be in a mode that
13434          * breaks all requests to 256 bytes.
13435          */
13436         if (tg3_asic_rev(tp) == ASIC_REV_57766)
13437                 reset_phy = 1;
13438
13439         err = tg3_restart_hw(tp, reset_phy);
13440
13441         if (!err)
13442                 tg3_netif_start(tp);
13443
13444         tg3_full_unlock(tp);
13445
13446         if (!err)
13447                 tg3_phy_start(tp);
13448
13449         return err;
13450 }
13451
13452 static const struct net_device_ops tg3_netdev_ops = {
13453         .ndo_open               = tg3_open,
13454         .ndo_stop               = tg3_close,
13455         .ndo_start_xmit         = tg3_start_xmit,
13456         .ndo_get_stats64        = tg3_get_stats64,
13457         .ndo_validate_addr      = eth_validate_addr,
13458         .ndo_set_rx_mode        = tg3_set_rx_mode,
13459         .ndo_set_mac_address    = tg3_set_mac_addr,
13460         .ndo_do_ioctl           = tg3_ioctl,
13461         .ndo_tx_timeout         = tg3_tx_timeout,
13462         .ndo_change_mtu         = tg3_change_mtu,
13463         .ndo_fix_features       = tg3_fix_features,
13464         .ndo_set_features       = tg3_set_features,
13465 #ifdef CONFIG_NET_POLL_CONTROLLER
13466         .ndo_poll_controller    = tg3_poll_controller,
13467 #endif
13468 };
13469
13470 static void tg3_get_eeprom_size(struct tg3 *tp)
13471 {
13472         u32 cursize, val, magic;
13473
13474         tp->nvram_size = EEPROM_CHIP_SIZE;
13475
13476         if (tg3_nvram_read(tp, 0, &magic) != 0)
13477                 return;
13478
13479         if ((magic != TG3_EEPROM_MAGIC) &&
13480             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13481             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13482                 return;
13483
13484         /*
13485          * Size the chip by reading offsets at increasing powers of two.
13486          * When we encounter our validation signature, we know the addressing
13487          * has wrapped around, and thus have our chip size.
13488          */
13489         cursize = 0x10;
13490
13491         while (cursize < tp->nvram_size) {
13492                 if (tg3_nvram_read(tp, cursize, &val) != 0)
13493                         return;
13494
13495                 if (val == magic)
13496                         break;
13497
13498                 cursize <<= 1;
13499         }
13500
13501         tp->nvram_size = cursize;
13502 }
13503
13504 static void tg3_get_nvram_size(struct tg3 *tp)
13505 {
13506         u32 val;
13507
13508         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13509                 return;
13510
13511         /* Selfboot format */
13512         if (val != TG3_EEPROM_MAGIC) {
13513                 tg3_get_eeprom_size(tp);
13514                 return;
13515         }
13516
13517         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13518                 if (val != 0) {
13519                         /* This is confusing.  We want to operate on the
13520                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
13521                          * call will read from NVRAM and byteswap the data
13522                          * according to the byteswapping settings for all
13523                          * other register accesses.  This ensures the data we
13524                          * want will always reside in the lower 16-bits.
13525                          * However, the data in NVRAM is in LE format, which
13526                          * means the data from the NVRAM read will always be
13527                          * opposite the endianness of the CPU.  The 16-bit
13528                          * byteswap then brings the data to CPU endianness.
13529                          */
13530                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13531                         return;
13532                 }
13533         }
13534         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13535 }
13536
13537 static void tg3_get_nvram_info(struct tg3 *tp)
13538 {
13539         u32 nvcfg1;
13540
13541         nvcfg1 = tr32(NVRAM_CFG1);
13542         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13543                 tg3_flag_set(tp, FLASH);
13544         } else {
13545                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13546                 tw32(NVRAM_CFG1, nvcfg1);
13547         }
13548
13549         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
13550             tg3_flag(tp, 5780_CLASS)) {
13551                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13552                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13553                         tp->nvram_jedecnum = JEDEC_ATMEL;
13554                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13555                         tg3_flag_set(tp, NVRAM_BUFFERED);
13556                         break;
13557                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13558                         tp->nvram_jedecnum = JEDEC_ATMEL;
13559                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13560                         break;
13561                 case FLASH_VENDOR_ATMEL_EEPROM:
13562                         tp->nvram_jedecnum = JEDEC_ATMEL;
13563                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13564                         tg3_flag_set(tp, NVRAM_BUFFERED);
13565                         break;
13566                 case FLASH_VENDOR_ST:
13567                         tp->nvram_jedecnum = JEDEC_ST;
13568                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13569                         tg3_flag_set(tp, NVRAM_BUFFERED);
13570                         break;
13571                 case FLASH_VENDOR_SAIFUN:
13572                         tp->nvram_jedecnum = JEDEC_SAIFUN;
13573                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13574                         break;
13575                 case FLASH_VENDOR_SST_SMALL:
13576                 case FLASH_VENDOR_SST_LARGE:
13577                         tp->nvram_jedecnum = JEDEC_SST;
13578                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13579                         break;
13580                 }
13581         } else {
13582                 tp->nvram_jedecnum = JEDEC_ATMEL;
13583                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13584                 tg3_flag_set(tp, NVRAM_BUFFERED);
13585         }
13586 }
13587
13588 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13589 {
13590         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13591         case FLASH_5752PAGE_SIZE_256:
13592                 tp->nvram_pagesize = 256;
13593                 break;
13594         case FLASH_5752PAGE_SIZE_512:
13595                 tp->nvram_pagesize = 512;
13596                 break;
13597         case FLASH_5752PAGE_SIZE_1K:
13598                 tp->nvram_pagesize = 1024;
13599                 break;
13600         case FLASH_5752PAGE_SIZE_2K:
13601                 tp->nvram_pagesize = 2048;
13602                 break;
13603         case FLASH_5752PAGE_SIZE_4K:
13604                 tp->nvram_pagesize = 4096;
13605                 break;
13606         case FLASH_5752PAGE_SIZE_264:
13607                 tp->nvram_pagesize = 264;
13608                 break;
13609         case FLASH_5752PAGE_SIZE_528:
13610                 tp->nvram_pagesize = 528;
13611                 break;
13612         }
13613 }
13614
13615 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13616 {
13617         u32 nvcfg1;
13618
13619         nvcfg1 = tr32(NVRAM_CFG1);
13620
13621         /* NVRAM protection for TPM */
13622         if (nvcfg1 & (1 << 27))
13623                 tg3_flag_set(tp, PROTECTED_NVRAM);
13624
13625         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13626         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13627         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13628                 tp->nvram_jedecnum = JEDEC_ATMEL;
13629                 tg3_flag_set(tp, NVRAM_BUFFERED);
13630                 break;
13631         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13632                 tp->nvram_jedecnum = JEDEC_ATMEL;
13633                 tg3_flag_set(tp, NVRAM_BUFFERED);
13634                 tg3_flag_set(tp, FLASH);
13635                 break;
13636         case FLASH_5752VENDOR_ST_M45PE10:
13637         case FLASH_5752VENDOR_ST_M45PE20:
13638         case FLASH_5752VENDOR_ST_M45PE40:
13639                 tp->nvram_jedecnum = JEDEC_ST;
13640                 tg3_flag_set(tp, NVRAM_BUFFERED);
13641                 tg3_flag_set(tp, FLASH);
13642                 break;
13643         }
13644
13645         if (tg3_flag(tp, FLASH)) {
13646                 tg3_nvram_get_pagesize(tp, nvcfg1);
13647         } else {
13648                 /* For eeprom, set pagesize to maximum eeprom size */
13649                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13650
13651                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13652                 tw32(NVRAM_CFG1, nvcfg1);
13653         }
13654 }
13655
13656 static void tg3_get_5755_nvram_info(struct tg3 *tp)
13657 {
13658         u32 nvcfg1, protect = 0;
13659
13660         nvcfg1 = tr32(NVRAM_CFG1);
13661
13662         /* NVRAM protection for TPM */
13663         if (nvcfg1 & (1 << 27)) {
13664                 tg3_flag_set(tp, PROTECTED_NVRAM);
13665                 protect = 1;
13666         }
13667
13668         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13669         switch (nvcfg1) {
13670         case FLASH_5755VENDOR_ATMEL_FLASH_1:
13671         case FLASH_5755VENDOR_ATMEL_FLASH_2:
13672         case FLASH_5755VENDOR_ATMEL_FLASH_3:
13673         case FLASH_5755VENDOR_ATMEL_FLASH_5:
13674                 tp->nvram_jedecnum = JEDEC_ATMEL;
13675                 tg3_flag_set(tp, NVRAM_BUFFERED);
13676                 tg3_flag_set(tp, FLASH);
13677                 tp->nvram_pagesize = 264;
13678                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13679                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13680                         tp->nvram_size = (protect ? 0x3e200 :
13681                                           TG3_NVRAM_SIZE_512KB);
13682                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13683                         tp->nvram_size = (protect ? 0x1f200 :
13684                                           TG3_NVRAM_SIZE_256KB);
13685                 else
13686                         tp->nvram_size = (protect ? 0x1f200 :
13687                                           TG3_NVRAM_SIZE_128KB);
13688                 break;
13689         case FLASH_5752VENDOR_ST_M45PE10:
13690         case FLASH_5752VENDOR_ST_M45PE20:
13691         case FLASH_5752VENDOR_ST_M45PE40:
13692                 tp->nvram_jedecnum = JEDEC_ST;
13693                 tg3_flag_set(tp, NVRAM_BUFFERED);
13694                 tg3_flag_set(tp, FLASH);
13695                 tp->nvram_pagesize = 256;
13696                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13697                         tp->nvram_size = (protect ?
13698                                           TG3_NVRAM_SIZE_64KB :
13699                                           TG3_NVRAM_SIZE_128KB);
13700                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13701                         tp->nvram_size = (protect ?
13702                                           TG3_NVRAM_SIZE_64KB :
13703                                           TG3_NVRAM_SIZE_256KB);
13704                 else
13705                         tp->nvram_size = (protect ?
13706                                           TG3_NVRAM_SIZE_128KB :
13707                                           TG3_NVRAM_SIZE_512KB);
13708                 break;
13709         }
13710 }
13711
13712 static void tg3_get_5787_nvram_info(struct tg3 *tp)
13713 {
13714         u32 nvcfg1;
13715
13716         nvcfg1 = tr32(NVRAM_CFG1);
13717
13718         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13719         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13720         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13721         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13722         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13723                 tp->nvram_jedecnum = JEDEC_ATMEL;
13724                 tg3_flag_set(tp, NVRAM_BUFFERED);
13725                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13726
13727                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13728                 tw32(NVRAM_CFG1, nvcfg1);
13729                 break;
13730         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13731         case FLASH_5755VENDOR_ATMEL_FLASH_1:
13732         case FLASH_5755VENDOR_ATMEL_FLASH_2:
13733         case FLASH_5755VENDOR_ATMEL_FLASH_3:
13734                 tp->nvram_jedecnum = JEDEC_ATMEL;
13735                 tg3_flag_set(tp, NVRAM_BUFFERED);
13736                 tg3_flag_set(tp, FLASH);
13737                 tp->nvram_pagesize = 264;
13738                 break;
13739         case FLASH_5752VENDOR_ST_M45PE10:
13740         case FLASH_5752VENDOR_ST_M45PE20:
13741         case FLASH_5752VENDOR_ST_M45PE40:
13742                 tp->nvram_jedecnum = JEDEC_ST;
13743                 tg3_flag_set(tp, NVRAM_BUFFERED);
13744                 tg3_flag_set(tp, FLASH);
13745                 tp->nvram_pagesize = 256;
13746                 break;
13747         }
13748 }
13749
13750 static void tg3_get_5761_nvram_info(struct tg3 *tp)
13751 {
13752         u32 nvcfg1, protect = 0;
13753
13754         nvcfg1 = tr32(NVRAM_CFG1);
13755
13756         /* NVRAM protection for TPM */
13757         if (nvcfg1 & (1 << 27)) {
13758                 tg3_flag_set(tp, PROTECTED_NVRAM);
13759                 protect = 1;
13760         }
13761
13762         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13763         switch (nvcfg1) {
13764         case FLASH_5761VENDOR_ATMEL_ADB021D:
13765         case FLASH_5761VENDOR_ATMEL_ADB041D:
13766         case FLASH_5761VENDOR_ATMEL_ADB081D:
13767         case FLASH_5761VENDOR_ATMEL_ADB161D:
13768         case FLASH_5761VENDOR_ATMEL_MDB021D:
13769         case FLASH_5761VENDOR_ATMEL_MDB041D:
13770         case FLASH_5761VENDOR_ATMEL_MDB081D:
13771         case FLASH_5761VENDOR_ATMEL_MDB161D:
13772                 tp->nvram_jedecnum = JEDEC_ATMEL;
13773                 tg3_flag_set(tp, NVRAM_BUFFERED);
13774                 tg3_flag_set(tp, FLASH);
13775                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13776                 tp->nvram_pagesize = 256;
13777                 break;
13778         case FLASH_5761VENDOR_ST_A_M45PE20:
13779         case FLASH_5761VENDOR_ST_A_M45PE40:
13780         case FLASH_5761VENDOR_ST_A_M45PE80:
13781         case FLASH_5761VENDOR_ST_A_M45PE16:
13782         case FLASH_5761VENDOR_ST_M_M45PE20:
13783         case FLASH_5761VENDOR_ST_M_M45PE40:
13784         case FLASH_5761VENDOR_ST_M_M45PE80:
13785         case FLASH_5761VENDOR_ST_M_M45PE16:
13786                 tp->nvram_jedecnum = JEDEC_ST;
13787                 tg3_flag_set(tp, NVRAM_BUFFERED);
13788                 tg3_flag_set(tp, FLASH);
13789                 tp->nvram_pagesize = 256;
13790                 break;
13791         }
13792
13793         if (protect) {
13794                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13795         } else {
13796                 switch (nvcfg1) {
13797                 case FLASH_5761VENDOR_ATMEL_ADB161D:
13798                 case FLASH_5761VENDOR_ATMEL_MDB161D:
13799                 case FLASH_5761VENDOR_ST_A_M45PE16:
13800                 case FLASH_5761VENDOR_ST_M_M45PE16:
13801                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13802                         break;
13803                 case FLASH_5761VENDOR_ATMEL_ADB081D:
13804                 case FLASH_5761VENDOR_ATMEL_MDB081D:
13805                 case FLASH_5761VENDOR_ST_A_M45PE80:
13806                 case FLASH_5761VENDOR_ST_M_M45PE80:
13807                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13808                         break;
13809                 case FLASH_5761VENDOR_ATMEL_ADB041D:
13810                 case FLASH_5761VENDOR_ATMEL_MDB041D:
13811                 case FLASH_5761VENDOR_ST_A_M45PE40:
13812                 case FLASH_5761VENDOR_ST_M_M45PE40:
13813                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13814                         break;
13815                 case FLASH_5761VENDOR_ATMEL_ADB021D:
13816                 case FLASH_5761VENDOR_ATMEL_MDB021D:
13817                 case FLASH_5761VENDOR_ST_A_M45PE20:
13818                 case FLASH_5761VENDOR_ST_M_M45PE20:
13819                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13820                         break;
13821                 }
13822         }
13823 }
13824
13825 static void tg3_get_5906_nvram_info(struct tg3 *tp)
13826 {
13827         tp->nvram_jedecnum = JEDEC_ATMEL;
13828         tg3_flag_set(tp, NVRAM_BUFFERED);
13829         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13830 }
13831
13832 static void tg3_get_57780_nvram_info(struct tg3 *tp)
13833 {
13834         u32 nvcfg1;
13835
13836         nvcfg1 = tr32(NVRAM_CFG1);
13837
13838         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13839         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13840         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13841                 tp->nvram_jedecnum = JEDEC_ATMEL;
13842                 tg3_flag_set(tp, NVRAM_BUFFERED);
13843                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13844
13845                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13846                 tw32(NVRAM_CFG1, nvcfg1);
13847                 return;
13848         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13849         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13850         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13851         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13852         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13853         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13854         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13855                 tp->nvram_jedecnum = JEDEC_ATMEL;
13856                 tg3_flag_set(tp, NVRAM_BUFFERED);
13857                 tg3_flag_set(tp, FLASH);
13858
13859                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13860                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13861                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13862                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13863                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13864                         break;
13865                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13866                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13867                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13868                         break;
13869                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13870                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13871                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13872                         break;
13873                 }
13874                 break;
13875         case FLASH_5752VENDOR_ST_M45PE10:
13876         case FLASH_5752VENDOR_ST_M45PE20:
13877         case FLASH_5752VENDOR_ST_M45PE40:
13878                 tp->nvram_jedecnum = JEDEC_ST;
13879                 tg3_flag_set(tp, NVRAM_BUFFERED);
13880                 tg3_flag_set(tp, FLASH);
13881
13882                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13883                 case FLASH_5752VENDOR_ST_M45PE10:
13884                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13885                         break;
13886                 case FLASH_5752VENDOR_ST_M45PE20:
13887                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13888                         break;
13889                 case FLASH_5752VENDOR_ST_M45PE40:
13890                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13891                         break;
13892                 }
13893                 break;
13894         default:
13895                 tg3_flag_set(tp, NO_NVRAM);
13896                 return;
13897         }
13898
13899         tg3_nvram_get_pagesize(tp, nvcfg1);
13900         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13901                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13902 }
13903
13904
13905 static void tg3_get_5717_nvram_info(struct tg3 *tp)
13906 {
13907         u32 nvcfg1;
13908
13909         nvcfg1 = tr32(NVRAM_CFG1);
13910
13911         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13912         case FLASH_5717VENDOR_ATMEL_EEPROM:
13913         case FLASH_5717VENDOR_MICRO_EEPROM:
13914                 tp->nvram_jedecnum = JEDEC_ATMEL;
13915                 tg3_flag_set(tp, NVRAM_BUFFERED);
13916                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13917
13918                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13919                 tw32(NVRAM_CFG1, nvcfg1);
13920                 return;
13921         case FLASH_5717VENDOR_ATMEL_MDB011D:
13922         case FLASH_5717VENDOR_ATMEL_ADB011B:
13923         case FLASH_5717VENDOR_ATMEL_ADB011D:
13924         case FLASH_5717VENDOR_ATMEL_MDB021D:
13925         case FLASH_5717VENDOR_ATMEL_ADB021B:
13926         case FLASH_5717VENDOR_ATMEL_ADB021D:
13927         case FLASH_5717VENDOR_ATMEL_45USPT:
13928                 tp->nvram_jedecnum = JEDEC_ATMEL;
13929                 tg3_flag_set(tp, NVRAM_BUFFERED);
13930                 tg3_flag_set(tp, FLASH);
13931
13932                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13933                 case FLASH_5717VENDOR_ATMEL_MDB021D:
13934                         /* Detect size with tg3_nvram_get_size() */
13935                         break;
13936                 case FLASH_5717VENDOR_ATMEL_ADB021B:
13937                 case FLASH_5717VENDOR_ATMEL_ADB021D:
13938                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13939                         break;
13940                 default:
13941                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13942                         break;
13943                 }
13944                 break;
13945         case FLASH_5717VENDOR_ST_M_M25PE10:
13946         case FLASH_5717VENDOR_ST_A_M25PE10:
13947         case FLASH_5717VENDOR_ST_M_M45PE10:
13948         case FLASH_5717VENDOR_ST_A_M45PE10:
13949         case FLASH_5717VENDOR_ST_M_M25PE20:
13950         case FLASH_5717VENDOR_ST_A_M25PE20:
13951         case FLASH_5717VENDOR_ST_M_M45PE20:
13952         case FLASH_5717VENDOR_ST_A_M45PE20:
13953         case FLASH_5717VENDOR_ST_25USPT:
13954         case FLASH_5717VENDOR_ST_45USPT:
13955                 tp->nvram_jedecnum = JEDEC_ST;
13956                 tg3_flag_set(tp, NVRAM_BUFFERED);
13957                 tg3_flag_set(tp, FLASH);
13958
13959                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13960                 case FLASH_5717VENDOR_ST_M_M25PE20:
13961                 case FLASH_5717VENDOR_ST_M_M45PE20:
13962                         /* Detect size with tg3_nvram_get_size() */
13963                         break;
13964                 case FLASH_5717VENDOR_ST_A_M25PE20:
13965                 case FLASH_5717VENDOR_ST_A_M45PE20:
13966                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13967                         break;
13968                 default:
13969                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13970                         break;
13971                 }
13972                 break;
13973         default:
13974                 tg3_flag_set(tp, NO_NVRAM);
13975                 return;
13976         }
13977
13978         tg3_nvram_get_pagesize(tp, nvcfg1);
13979         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13980                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13981 }
13982
13983 static void tg3_get_5720_nvram_info(struct tg3 *tp)
13984 {
13985         u32 nvcfg1, nvmpinstrp;
13986
13987         nvcfg1 = tr32(NVRAM_CFG1);
13988         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13989
13990         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
13991                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
13992                         tg3_flag_set(tp, NO_NVRAM);
13993                         return;
13994                 }
13995
13996                 switch (nvmpinstrp) {
13997                 case FLASH_5762_EEPROM_HD:
13998                         nvmpinstrp = FLASH_5720_EEPROM_HD;
13999                         break;
14000                 case FLASH_5762_EEPROM_LD:
14001                         nvmpinstrp = FLASH_5720_EEPROM_LD;
14002                         break;
14003                 case FLASH_5720VENDOR_M_ST_M45PE20:
14004                         /* This pinstrap supports multiple sizes, so force it
14005                          * to read the actual size from location 0xf0.
14006                          */
14007                         nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14008                         break;
14009                 }
14010         }
14011
14012         switch (nvmpinstrp) {
14013         case FLASH_5720_EEPROM_HD:
14014         case FLASH_5720_EEPROM_LD:
14015                 tp->nvram_jedecnum = JEDEC_ATMEL;
14016                 tg3_flag_set(tp, NVRAM_BUFFERED);
14017
14018                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14019                 tw32(NVRAM_CFG1, nvcfg1);
14020                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14021                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14022                 else
14023                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14024                 return;
14025         case FLASH_5720VENDOR_M_ATMEL_DB011D:
14026         case FLASH_5720VENDOR_A_ATMEL_DB011B:
14027         case FLASH_5720VENDOR_A_ATMEL_DB011D:
14028         case FLASH_5720VENDOR_M_ATMEL_DB021D:
14029         case FLASH_5720VENDOR_A_ATMEL_DB021B:
14030         case FLASH_5720VENDOR_A_ATMEL_DB021D:
14031         case FLASH_5720VENDOR_M_ATMEL_DB041D:
14032         case FLASH_5720VENDOR_A_ATMEL_DB041B:
14033         case FLASH_5720VENDOR_A_ATMEL_DB041D:
14034         case FLASH_5720VENDOR_M_ATMEL_DB081D:
14035         case FLASH_5720VENDOR_A_ATMEL_DB081D:
14036         case FLASH_5720VENDOR_ATMEL_45USPT:
14037                 tp->nvram_jedecnum = JEDEC_ATMEL;
14038                 tg3_flag_set(tp, NVRAM_BUFFERED);
14039                 tg3_flag_set(tp, FLASH);
14040
14041                 switch (nvmpinstrp) {
14042                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14043                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14044                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14045                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14046                         break;
14047                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14048                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14049                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14050                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14051                         break;
14052                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14053                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14054                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14055                         break;
14056                 default:
14057                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14058                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14059                         break;
14060                 }
14061                 break;
14062         case FLASH_5720VENDOR_M_ST_M25PE10:
14063         case FLASH_5720VENDOR_M_ST_M45PE10:
14064         case FLASH_5720VENDOR_A_ST_M25PE10:
14065         case FLASH_5720VENDOR_A_ST_M45PE10:
14066         case FLASH_5720VENDOR_M_ST_M25PE20:
14067         case FLASH_5720VENDOR_M_ST_M45PE20:
14068         case FLASH_5720VENDOR_A_ST_M25PE20:
14069         case FLASH_5720VENDOR_A_ST_M45PE20:
14070         case FLASH_5720VENDOR_M_ST_M25PE40:
14071         case FLASH_5720VENDOR_M_ST_M45PE40:
14072         case FLASH_5720VENDOR_A_ST_M25PE40:
14073         case FLASH_5720VENDOR_A_ST_M45PE40:
14074         case FLASH_5720VENDOR_M_ST_M25PE80:
14075         case FLASH_5720VENDOR_M_ST_M45PE80:
14076         case FLASH_5720VENDOR_A_ST_M25PE80:
14077         case FLASH_5720VENDOR_A_ST_M45PE80:
14078         case FLASH_5720VENDOR_ST_25USPT:
14079         case FLASH_5720VENDOR_ST_45USPT:
14080                 tp->nvram_jedecnum = JEDEC_ST;
14081                 tg3_flag_set(tp, NVRAM_BUFFERED);
14082                 tg3_flag_set(tp, FLASH);
14083
14084                 switch (nvmpinstrp) {
14085                 case FLASH_5720VENDOR_M_ST_M25PE20:
14086                 case FLASH_5720VENDOR_M_ST_M45PE20:
14087                 case FLASH_5720VENDOR_A_ST_M25PE20:
14088                 case FLASH_5720VENDOR_A_ST_M45PE20:
14089                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14090                         break;
14091                 case FLASH_5720VENDOR_M_ST_M25PE40:
14092                 case FLASH_5720VENDOR_M_ST_M45PE40:
14093                 case FLASH_5720VENDOR_A_ST_M25PE40:
14094                 case FLASH_5720VENDOR_A_ST_M45PE40:
14095                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14096                         break;
14097                 case FLASH_5720VENDOR_M_ST_M25PE80:
14098                 case FLASH_5720VENDOR_M_ST_M45PE80:
14099                 case FLASH_5720VENDOR_A_ST_M25PE80:
14100                 case FLASH_5720VENDOR_A_ST_M45PE80:
14101                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14102                         break;
14103                 default:
14104                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14105                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14106                         break;
14107                 }
14108                 break;
14109         default:
14110                 tg3_flag_set(tp, NO_NVRAM);
14111                 return;
14112         }
14113
14114         tg3_nvram_get_pagesize(tp, nvcfg1);
14115         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14116                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14117
14118         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14119                 u32 val;
14120
14121                 if (tg3_nvram_read(tp, 0, &val))
14122                         return;
14123
14124                 if (val != TG3_EEPROM_MAGIC &&
14125                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14126                         tg3_flag_set(tp, NO_NVRAM);
14127         }
14128 }
14129
14130 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14131 static void tg3_nvram_init(struct tg3 *tp)
14132 {
14133         if (tg3_flag(tp, IS_SSB_CORE)) {
14134                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14135                 tg3_flag_clear(tp, NVRAM);
14136                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14137                 tg3_flag_set(tp, NO_NVRAM);
14138                 return;
14139         }
14140
14141         tw32_f(GRC_EEPROM_ADDR,
14142              (EEPROM_ADDR_FSM_RESET |
14143               (EEPROM_DEFAULT_CLOCK_PERIOD <<
14144                EEPROM_ADDR_CLKPERD_SHIFT)));
14145
14146         msleep(1);
14147
14148         /* Enable seeprom accesses. */
14149         tw32_f(GRC_LOCAL_CTRL,
14150              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14151         udelay(100);
14152
14153         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14154             tg3_asic_rev(tp) != ASIC_REV_5701) {
14155                 tg3_flag_set(tp, NVRAM);
14156
14157                 if (tg3_nvram_lock(tp)) {
14158                         netdev_warn(tp->dev,
14159                                     "Cannot get nvram lock, %s failed\n",
14160                                     __func__);
14161                         return;
14162                 }
14163                 tg3_enable_nvram_access(tp);
14164
14165                 tp->nvram_size = 0;
14166
14167                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14168                         tg3_get_5752_nvram_info(tp);
14169                 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14170                         tg3_get_5755_nvram_info(tp);
14171                 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14172                          tg3_asic_rev(tp) == ASIC_REV_5784 ||
14173                          tg3_asic_rev(tp) == ASIC_REV_5785)
14174                         tg3_get_5787_nvram_info(tp);
14175                 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14176                         tg3_get_5761_nvram_info(tp);
14177                 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14178                         tg3_get_5906_nvram_info(tp);
14179                 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14180                          tg3_flag(tp, 57765_CLASS))
14181                         tg3_get_57780_nvram_info(tp);
14182                 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14183                          tg3_asic_rev(tp) == ASIC_REV_5719)
14184                         tg3_get_5717_nvram_info(tp);
14185                 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14186                          tg3_asic_rev(tp) == ASIC_REV_5762)
14187                         tg3_get_5720_nvram_info(tp);
14188                 else
14189                         tg3_get_nvram_info(tp);
14190
14191                 if (tp->nvram_size == 0)
14192                         tg3_get_nvram_size(tp);
14193
14194                 tg3_disable_nvram_access(tp);
14195                 tg3_nvram_unlock(tp);
14196
14197         } else {
14198                 tg3_flag_clear(tp, NVRAM);
14199                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14200
14201                 tg3_get_eeprom_size(tp);
14202         }
14203 }
14204
14205 struct subsys_tbl_ent {
14206         u16 subsys_vendor, subsys_devid;
14207         u32 phy_id;
14208 };
14209
14210 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14211         /* Broadcom boards. */
14212         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14213           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14214         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14215           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14216         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14217           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14218         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14219           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14220         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14221           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14222         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14223           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14224         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14225           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14226         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14227           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14228         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14229           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14230         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14231           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14232         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14233           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14234
14235         /* 3com boards. */
14236         { TG3PCI_SUBVENDOR_ID_3COM,
14237           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14238         { TG3PCI_SUBVENDOR_ID_3COM,
14239           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14240         { TG3PCI_SUBVENDOR_ID_3COM,
14241           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14242         { TG3PCI_SUBVENDOR_ID_3COM,
14243           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14244         { TG3PCI_SUBVENDOR_ID_3COM,
14245           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14246
14247         /* DELL boards. */
14248         { TG3PCI_SUBVENDOR_ID_DELL,
14249           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14250         { TG3PCI_SUBVENDOR_ID_DELL,
14251           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14252         { TG3PCI_SUBVENDOR_ID_DELL,
14253           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14254         { TG3PCI_SUBVENDOR_ID_DELL,
14255           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14256
14257         /* Compaq boards. */
14258         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14259           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14260         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14261           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14262         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14263           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14264         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14265           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14266         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14267           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14268
14269         /* IBM boards. */
14270         { TG3PCI_SUBVENDOR_ID_IBM,
14271           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14272 };
14273
14274 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14275 {
14276         int i;
14277
14278         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14279                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14280                      tp->pdev->subsystem_vendor) &&
14281                     (subsys_id_to_phy_id[i].subsys_devid ==
14282                      tp->pdev->subsystem_device))
14283                         return &subsys_id_to_phy_id[i];
14284         }
14285         return NULL;
14286 }
14287
14288 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14289 {
14290         u32 val;
14291
14292         tp->phy_id = TG3_PHY_ID_INVALID;
14293         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14294
14295         /* Assume an onboard device and WOL capable by default.  */
14296         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14297         tg3_flag_set(tp, WOL_CAP);
14298
14299         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14300                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14301                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14302                         tg3_flag_set(tp, IS_NIC);
14303                 }
14304                 val = tr32(VCPU_CFGSHDW);
14305                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14306                         tg3_flag_set(tp, ASPM_WORKAROUND);
14307                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14308                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14309                         tg3_flag_set(tp, WOL_ENABLE);
14310                         device_set_wakeup_enable(&tp->pdev->dev, true);
14311                 }
14312                 goto done;
14313         }
14314
14315         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14316         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14317                 u32 nic_cfg, led_cfg;
14318                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14319                 int eeprom_phy_serdes = 0;
14320
14321                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14322                 tp->nic_sram_data_cfg = nic_cfg;
14323
14324                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14325                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14326                 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14327                     tg3_asic_rev(tp) != ASIC_REV_5701 &&
14328                     tg3_asic_rev(tp) != ASIC_REV_5703 &&
14329                     (ver > 0) && (ver < 0x100))
14330                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14331
14332                 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14333                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14334
14335                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14336                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14337                         eeprom_phy_serdes = 1;
14338
14339                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14340                 if (nic_phy_id != 0) {
14341                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14342                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14343
14344                         eeprom_phy_id  = (id1 >> 16) << 10;
14345                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
14346                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
14347                 } else
14348                         eeprom_phy_id = 0;
14349
14350                 tp->phy_id = eeprom_phy_id;
14351                 if (eeprom_phy_serdes) {
14352                         if (!tg3_flag(tp, 5705_PLUS))
14353                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14354                         else
14355                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14356                 }
14357
14358                 if (tg3_flag(tp, 5750_PLUS))
14359                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14360                                     SHASTA_EXT_LED_MODE_MASK);
14361                 else
14362                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14363
14364                 switch (led_cfg) {
14365                 default:
14366                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14367                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14368                         break;
14369
14370                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14371                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14372                         break;
14373
14374                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14375                         tp->led_ctrl = LED_CTRL_MODE_MAC;
14376
14377                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14378                          * read on some older 5700/5701 bootcode.
14379                          */
14380                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14381                             tg3_asic_rev(tp) == ASIC_REV_5701)
14382                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14383
14384                         break;
14385
14386                 case SHASTA_EXT_LED_SHARED:
14387                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
14388                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14389                             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14390                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14391                                                  LED_CTRL_MODE_PHY_2);
14392                         break;
14393
14394                 case SHASTA_EXT_LED_MAC:
14395                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14396                         break;
14397
14398                 case SHASTA_EXT_LED_COMBO:
14399                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
14400                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14401                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14402                                                  LED_CTRL_MODE_PHY_2);
14403                         break;
14404
14405                 }
14406
14407                 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14408                      tg3_asic_rev(tp) == ASIC_REV_5701) &&
14409                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14410                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14411
14412                 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14413                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14414
14415                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14416                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14417                         if ((tp->pdev->subsystem_vendor ==
14418                              PCI_VENDOR_ID_ARIMA) &&
14419                             (tp->pdev->subsystem_device == 0x205a ||
14420                              tp->pdev->subsystem_device == 0x2063))
14421                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14422                 } else {
14423                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14424                         tg3_flag_set(tp, IS_NIC);
14425                 }
14426
14427                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14428                         tg3_flag_set(tp, ENABLE_ASF);
14429                         if (tg3_flag(tp, 5750_PLUS))
14430                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14431                 }
14432
14433                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14434                     tg3_flag(tp, 5750_PLUS))
14435                         tg3_flag_set(tp, ENABLE_APE);
14436
14437                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14438                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14439                         tg3_flag_clear(tp, WOL_CAP);
14440
14441                 if (tg3_flag(tp, WOL_CAP) &&
14442                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14443                         tg3_flag_set(tp, WOL_ENABLE);
14444                         device_set_wakeup_enable(&tp->pdev->dev, true);
14445                 }
14446
14447                 if (cfg2 & (1 << 17))
14448                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14449
14450                 /* serdes signal pre-emphasis in register 0x590 set by */
14451                 /* bootcode if bit 18 is set */
14452                 if (cfg2 & (1 << 18))
14453                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14454
14455                 if ((tg3_flag(tp, 57765_PLUS) ||
14456                      (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14457                       tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14458                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14459                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14460
14461                 if (tg3_flag(tp, PCI_EXPRESS) &&
14462                     tg3_asic_rev(tp) != ASIC_REV_5785 &&
14463                     !tg3_flag(tp, 57765_PLUS)) {
14464                         u32 cfg3;
14465
14466                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14467                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
14468                                 tg3_flag_set(tp, ASPM_WORKAROUND);
14469                 }
14470
14471                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14472                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14473                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14474                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14475                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14476                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14477         }
14478 done:
14479         if (tg3_flag(tp, WOL_CAP))
14480                 device_set_wakeup_enable(&tp->pdev->dev,
14481                                          tg3_flag(tp, WOL_ENABLE));
14482         else
14483                 device_set_wakeup_capable(&tp->pdev->dev, false);
14484 }
14485
14486 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14487 {
14488         int i, err;
14489         u32 val2, off = offset * 8;
14490
14491         err = tg3_nvram_lock(tp);
14492         if (err)
14493                 return err;
14494
14495         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14496         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14497                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14498         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14499         udelay(10);
14500
14501         for (i = 0; i < 100; i++) {
14502                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14503                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14504                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14505                         break;
14506                 }
14507                 udelay(10);
14508         }
14509
14510         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14511
14512         tg3_nvram_unlock(tp);
14513         if (val2 & APE_OTP_STATUS_CMD_DONE)
14514                 return 0;
14515
14516         return -EBUSY;
14517 }
14518
14519 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14520 {
14521         int i;
14522         u32 val;
14523
14524         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14525         tw32(OTP_CTRL, cmd);
14526
14527         /* Wait for up to 1 ms for command to execute. */
14528         for (i = 0; i < 100; i++) {
14529                 val = tr32(OTP_STATUS);
14530                 if (val & OTP_STATUS_CMD_DONE)
14531                         break;
14532                 udelay(10);
14533         }
14534
14535         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14536 }
14537
14538 /* Read the gphy configuration from the OTP region of the chip.  The gphy
14539  * configuration is a 32-bit value that straddles the alignment boundary.
14540  * We do two 32-bit reads and then shift and merge the results.
14541  */
14542 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14543 {
14544         u32 bhalf_otp, thalf_otp;
14545
14546         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14547
14548         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14549                 return 0;
14550
14551         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14552
14553         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14554                 return 0;
14555
14556         thalf_otp = tr32(OTP_READ_DATA);
14557
14558         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14559
14560         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14561                 return 0;
14562
14563         bhalf_otp = tr32(OTP_READ_DATA);
14564
14565         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14566 }
14567
14568 static void tg3_phy_init_link_config(struct tg3 *tp)
14569 {
14570         u32 adv = ADVERTISED_Autoneg;
14571
14572         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14573                 adv |= ADVERTISED_1000baseT_Half |
14574                        ADVERTISED_1000baseT_Full;
14575
14576         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14577                 adv |= ADVERTISED_100baseT_Half |
14578                        ADVERTISED_100baseT_Full |
14579                        ADVERTISED_10baseT_Half |
14580                        ADVERTISED_10baseT_Full |
14581                        ADVERTISED_TP;
14582         else
14583                 adv |= ADVERTISED_FIBRE;
14584
14585         tp->link_config.advertising = adv;
14586         tp->link_config.speed = SPEED_UNKNOWN;
14587         tp->link_config.duplex = DUPLEX_UNKNOWN;
14588         tp->link_config.autoneg = AUTONEG_ENABLE;
14589         tp->link_config.active_speed = SPEED_UNKNOWN;
14590         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14591
14592         tp->old_link = -1;
14593 }
14594
14595 static int tg3_phy_probe(struct tg3 *tp)
14596 {
14597         u32 hw_phy_id_1, hw_phy_id_2;
14598         u32 hw_phy_id, hw_phy_id_masked;
14599         int err;
14600
14601         /* flow control autonegotiation is default behavior */
14602         tg3_flag_set(tp, PAUSE_AUTONEG);
14603         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14604
14605         if (tg3_flag(tp, ENABLE_APE)) {
14606                 switch (tp->pci_fn) {
14607                 case 0:
14608                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14609                         break;
14610                 case 1:
14611                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14612                         break;
14613                 case 2:
14614                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14615                         break;
14616                 case 3:
14617                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14618                         break;
14619                 }
14620         }
14621
14622         if (tg3_flag(tp, USE_PHYLIB))
14623                 return tg3_phy_init(tp);
14624
14625         /* Reading the PHY ID register can conflict with ASF
14626          * firmware access to the PHY hardware.
14627          */
14628         err = 0;
14629         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14630                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14631         } else {
14632                 /* Now read the physical PHY_ID from the chip and verify
14633                  * that it is sane.  If it doesn't look good, we fall back
14634                  * to either the hard-coded table based PHY_ID and failing
14635                  * that the value found in the eeprom area.
14636                  */
14637                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
14638                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
14639
14640                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
14641                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
14642                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
14643
14644                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
14645         }
14646
14647         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
14648                 tp->phy_id = hw_phy_id;
14649                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
14650                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14651                 else
14652                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
14653         } else {
14654                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
14655                         /* Do nothing, phy ID already set up in
14656                          * tg3_get_eeprom_hw_cfg().
14657                          */
14658                 } else {
14659                         struct subsys_tbl_ent *p;
14660
14661                         /* No eeprom signature?  Try the hardcoded
14662                          * subsys device table.
14663                          */
14664                         p = tg3_lookup_by_subsys(tp);
14665                         if (p) {
14666                                 tp->phy_id = p->phy_id;
14667                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
14668                                 /* For now we saw the IDs 0xbc050cd0,
14669                                  * 0xbc050f80 and 0xbc050c30 on devices
14670                                  * connected to an BCM4785 and there are
14671                                  * probably more. Just assume that the phy is
14672                                  * supported when it is connected to a SSB core
14673                                  * for now.
14674                                  */
14675                                 return -ENODEV;
14676                         }
14677
14678                         if (!tp->phy_id ||
14679                             tp->phy_id == TG3_PHY_ID_BCM8002)
14680                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14681                 }
14682         }
14683
14684         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14685             (tg3_asic_rev(tp) == ASIC_REV_5719 ||
14686              tg3_asic_rev(tp) == ASIC_REV_5720 ||
14687              tg3_asic_rev(tp) == ASIC_REV_57766 ||
14688              tg3_asic_rev(tp) == ASIC_REV_5762 ||
14689              (tg3_asic_rev(tp) == ASIC_REV_5717 &&
14690               tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
14691              (tg3_asic_rev(tp) == ASIC_REV_57765 &&
14692               tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0)))
14693                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
14694
14695         tg3_phy_init_link_config(tp);
14696
14697         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14698             !tg3_flag(tp, ENABLE_APE) &&
14699             !tg3_flag(tp, ENABLE_ASF)) {
14700                 u32 bmsr, dummy;
14701
14702                 tg3_readphy(tp, MII_BMSR, &bmsr);
14703                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
14704                     (bmsr & BMSR_LSTATUS))
14705                         goto skip_phy_reset;
14706
14707                 err = tg3_phy_reset(tp);
14708                 if (err)
14709                         return err;
14710
14711                 tg3_phy_set_wirespeed(tp);
14712
14713                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
14714                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
14715                                             tp->link_config.flowctrl);
14716
14717                         tg3_writephy(tp, MII_BMCR,
14718                                      BMCR_ANENABLE | BMCR_ANRESTART);
14719                 }
14720         }
14721
14722 skip_phy_reset:
14723         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
14724                 err = tg3_init_5401phy_dsp(tp);
14725                 if (err)
14726                         return err;
14727
14728                 err = tg3_init_5401phy_dsp(tp);
14729         }
14730
14731         return err;
14732 }
14733
14734 static void tg3_read_vpd(struct tg3 *tp)
14735 {
14736         u8 *vpd_data;
14737         unsigned int block_end, rosize, len;
14738         u32 vpdlen;
14739         int j, i = 0;
14740
14741         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
14742         if (!vpd_data)
14743                 goto out_no_vpd;
14744
14745         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
14746         if (i < 0)
14747                 goto out_not_found;
14748
14749         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
14750         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
14751         i += PCI_VPD_LRDT_TAG_SIZE;
14752
14753         if (block_end > vpdlen)
14754                 goto out_not_found;
14755
14756         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14757                                       PCI_VPD_RO_KEYWORD_MFR_ID);
14758         if (j > 0) {
14759                 len = pci_vpd_info_field_size(&vpd_data[j]);
14760
14761                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14762                 if (j + len > block_end || len != 4 ||
14763                     memcmp(&vpd_data[j], "1028", 4))
14764                         goto partno;
14765
14766                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14767                                               PCI_VPD_RO_KEYWORD_VENDOR0);
14768                 if (j < 0)
14769                         goto partno;
14770
14771                 len = pci_vpd_info_field_size(&vpd_data[j]);
14772
14773                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14774                 if (j + len > block_end)
14775                         goto partno;
14776
14777                 if (len >= sizeof(tp->fw_ver))
14778                         len = sizeof(tp->fw_ver) - 1;
14779                 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
14780                 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
14781                          &vpd_data[j]);
14782         }
14783
14784 partno:
14785         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14786                                       PCI_VPD_RO_KEYWORD_PARTNO);
14787         if (i < 0)
14788                 goto out_not_found;
14789
14790         len = pci_vpd_info_field_size(&vpd_data[i]);
14791
14792         i += PCI_VPD_INFO_FLD_HDR_SIZE;
14793         if (len > TG3_BPN_SIZE ||
14794             (len + i) > vpdlen)
14795                 goto out_not_found;
14796
14797         memcpy(tp->board_part_number, &vpd_data[i], len);
14798
14799 out_not_found:
14800         kfree(vpd_data);
14801         if (tp->board_part_number[0])
14802                 return;
14803
14804 out_no_vpd:
14805         if (tg3_asic_rev(tp) == ASIC_REV_5717) {
14806                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14807                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
14808                         strcpy(tp->board_part_number, "BCM5717");
14809                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14810                         strcpy(tp->board_part_number, "BCM5718");
14811                 else
14812                         goto nomatch;
14813         } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
14814                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14815                         strcpy(tp->board_part_number, "BCM57780");
14816                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14817                         strcpy(tp->board_part_number, "BCM57760");
14818                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14819                         strcpy(tp->board_part_number, "BCM57790");
14820                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14821                         strcpy(tp->board_part_number, "BCM57788");
14822                 else
14823                         goto nomatch;
14824         } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
14825                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14826                         strcpy(tp->board_part_number, "BCM57761");
14827                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14828                         strcpy(tp->board_part_number, "BCM57765");
14829                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14830                         strcpy(tp->board_part_number, "BCM57781");
14831                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14832                         strcpy(tp->board_part_number, "BCM57785");
14833                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14834                         strcpy(tp->board_part_number, "BCM57791");
14835                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14836                         strcpy(tp->board_part_number, "BCM57795");
14837                 else
14838                         goto nomatch;
14839         } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
14840                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14841                         strcpy(tp->board_part_number, "BCM57762");
14842                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14843                         strcpy(tp->board_part_number, "BCM57766");
14844                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14845                         strcpy(tp->board_part_number, "BCM57782");
14846                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14847                         strcpy(tp->board_part_number, "BCM57786");
14848                 else
14849                         goto nomatch;
14850         } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14851                 strcpy(tp->board_part_number, "BCM95906");
14852         } else {
14853 nomatch:
14854                 strcpy(tp->board_part_number, "none");
14855         }
14856 }
14857
14858 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14859 {
14860         u32 val;
14861
14862         if (tg3_nvram_read(tp, offset, &val) ||
14863             (val & 0xfc000000) != 0x0c000000 ||
14864             tg3_nvram_read(tp, offset + 4, &val) ||
14865             val != 0)
14866                 return 0;
14867
14868         return 1;
14869 }
14870
14871 static void tg3_read_bc_ver(struct tg3 *tp)
14872 {
14873         u32 val, offset, start, ver_offset;
14874         int i, dst_off;
14875         bool newver = false;
14876
14877         if (tg3_nvram_read(tp, 0xc, &offset) ||
14878             tg3_nvram_read(tp, 0x4, &start))
14879                 return;
14880
14881         offset = tg3_nvram_logical_addr(tp, offset);
14882
14883         if (tg3_nvram_read(tp, offset, &val))
14884                 return;
14885
14886         if ((val & 0xfc000000) == 0x0c000000) {
14887                 if (tg3_nvram_read(tp, offset + 4, &val))
14888                         return;
14889
14890                 if (val == 0)
14891                         newver = true;
14892         }
14893
14894         dst_off = strlen(tp->fw_ver);
14895
14896         if (newver) {
14897                 if (TG3_VER_SIZE - dst_off < 16 ||
14898                     tg3_nvram_read(tp, offset + 8, &ver_offset))
14899                         return;
14900
14901                 offset = offset + ver_offset - start;
14902                 for (i = 0; i < 16; i += 4) {
14903                         __be32 v;
14904                         if (tg3_nvram_read_be32(tp, offset + i, &v))
14905                                 return;
14906
14907                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
14908                 }
14909         } else {
14910                 u32 major, minor;
14911
14912                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14913                         return;
14914
14915                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14916                         TG3_NVM_BCVER_MAJSFT;
14917                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
14918                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14919                          "v%d.%02d", major, minor);
14920         }
14921 }
14922
14923 static void tg3_read_hwsb_ver(struct tg3 *tp)
14924 {
14925         u32 val, major, minor;
14926
14927         /* Use native endian representation */
14928         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14929                 return;
14930
14931         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14932                 TG3_NVM_HWSB_CFG1_MAJSFT;
14933         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14934                 TG3_NVM_HWSB_CFG1_MINSFT;
14935
14936         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14937 }
14938
14939 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
14940 {
14941         u32 offset, major, minor, build;
14942
14943         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
14944
14945         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14946                 return;
14947
14948         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14949         case TG3_EEPROM_SB_REVISION_0:
14950                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14951                 break;
14952         case TG3_EEPROM_SB_REVISION_2:
14953                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14954                 break;
14955         case TG3_EEPROM_SB_REVISION_3:
14956                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14957                 break;
14958         case TG3_EEPROM_SB_REVISION_4:
14959                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14960                 break;
14961         case TG3_EEPROM_SB_REVISION_5:
14962                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14963                 break;
14964         case TG3_EEPROM_SB_REVISION_6:
14965                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14966                 break;
14967         default:
14968                 return;
14969         }
14970
14971         if (tg3_nvram_read(tp, offset, &val))
14972                 return;
14973
14974         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14975                 TG3_EEPROM_SB_EDH_BLD_SHFT;
14976         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14977                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
14978         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
14979
14980         if (minor > 99 || build > 26)
14981                 return;
14982
14983         offset = strlen(tp->fw_ver);
14984         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14985                  " v%d.%02d", major, minor);
14986
14987         if (build > 0) {
14988                 offset = strlen(tp->fw_ver);
14989                 if (offset < TG3_VER_SIZE - 1)
14990                         tp->fw_ver[offset] = 'a' + build - 1;
14991         }
14992 }
14993
14994 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
14995 {
14996         u32 val, offset, start;
14997         int i, vlen;
14998
14999         for (offset = TG3_NVM_DIR_START;
15000              offset < TG3_NVM_DIR_END;
15001              offset += TG3_NVM_DIRENT_SIZE) {
15002                 if (tg3_nvram_read(tp, offset, &val))
15003                         return;
15004
15005                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15006                         break;
15007         }
15008
15009         if (offset == TG3_NVM_DIR_END)
15010                 return;
15011
15012         if (!tg3_flag(tp, 5705_PLUS))
15013                 start = 0x08000000;
15014         else if (tg3_nvram_read(tp, offset - 4, &start))
15015                 return;
15016
15017         if (tg3_nvram_read(tp, offset + 4, &offset) ||
15018             !tg3_fw_img_is_valid(tp, offset) ||
15019             tg3_nvram_read(tp, offset + 8, &val))
15020                 return;
15021
15022         offset += val - start;
15023
15024         vlen = strlen(tp->fw_ver);
15025
15026         tp->fw_ver[vlen++] = ',';
15027         tp->fw_ver[vlen++] = ' ';
15028
15029         for (i = 0; i < 4; i++) {
15030                 __be32 v;
15031                 if (tg3_nvram_read_be32(tp, offset, &v))
15032                         return;
15033
15034                 offset += sizeof(v);
15035
15036                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15037                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15038                         break;
15039                 }
15040
15041                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15042                 vlen += sizeof(v);
15043         }
15044 }
15045
15046 static void tg3_probe_ncsi(struct tg3 *tp)
15047 {
15048         u32 apedata;
15049
15050         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15051         if (apedata != APE_SEG_SIG_MAGIC)
15052                 return;
15053
15054         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15055         if (!(apedata & APE_FW_STATUS_READY))
15056                 return;
15057
15058         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15059                 tg3_flag_set(tp, APE_HAS_NCSI);
15060 }
15061
15062 static void tg3_read_dash_ver(struct tg3 *tp)
15063 {
15064         int vlen;
15065         u32 apedata;
15066         char *fwtype;
15067
15068         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15069
15070         if (tg3_flag(tp, APE_HAS_NCSI))
15071                 fwtype = "NCSI";
15072         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15073                 fwtype = "SMASH";
15074         else
15075                 fwtype = "DASH";
15076
15077         vlen = strlen(tp->fw_ver);
15078
15079         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15080                  fwtype,
15081                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15082                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15083                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15084                  (apedata & APE_FW_VERSION_BLDMSK));
15085 }
15086
15087 static void tg3_read_otp_ver(struct tg3 *tp)
15088 {
15089         u32 val, val2;
15090
15091         if (tg3_asic_rev(tp) != ASIC_REV_5762)
15092                 return;
15093
15094         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15095             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15096             TG3_OTP_MAGIC0_VALID(val)) {
15097                 u64 val64 = (u64) val << 32 | val2;
15098                 u32 ver = 0;
15099                 int i, vlen;
15100
15101                 for (i = 0; i < 7; i++) {
15102                         if ((val64 & 0xff) == 0)
15103                                 break;
15104                         ver = val64 & 0xff;
15105                         val64 >>= 8;
15106                 }
15107                 vlen = strlen(tp->fw_ver);
15108                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15109         }
15110 }
15111
15112 static void tg3_read_fw_ver(struct tg3 *tp)
15113 {
15114         u32 val;
15115         bool vpd_vers = false;
15116
15117         if (tp->fw_ver[0] != 0)
15118                 vpd_vers = true;
15119
15120         if (tg3_flag(tp, NO_NVRAM)) {
15121                 strcat(tp->fw_ver, "sb");
15122                 tg3_read_otp_ver(tp);
15123                 return;
15124         }
15125
15126         if (tg3_nvram_read(tp, 0, &val))
15127                 return;
15128
15129         if (val == TG3_EEPROM_MAGIC)
15130                 tg3_read_bc_ver(tp);
15131         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15132                 tg3_read_sb_ver(tp, val);
15133         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15134                 tg3_read_hwsb_ver(tp);
15135
15136         if (tg3_flag(tp, ENABLE_ASF)) {
15137                 if (tg3_flag(tp, ENABLE_APE)) {
15138                         tg3_probe_ncsi(tp);
15139                         if (!vpd_vers)
15140                                 tg3_read_dash_ver(tp);
15141                 } else if (!vpd_vers) {
15142                         tg3_read_mgmtfw_ver(tp);
15143                 }
15144         }
15145
15146         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15147 }
15148
15149 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15150 {
15151         if (tg3_flag(tp, LRG_PROD_RING_CAP))
15152                 return TG3_RX_RET_MAX_SIZE_5717;
15153         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15154                 return TG3_RX_RET_MAX_SIZE_5700;
15155         else
15156                 return TG3_RX_RET_MAX_SIZE_5705;
15157 }
15158
15159 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15160         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15161         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15162         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15163         { },
15164 };
15165
15166 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15167 {
15168         struct pci_dev *peer;
15169         unsigned int func, devnr = tp->pdev->devfn & ~7;
15170
15171         for (func = 0; func < 8; func++) {
15172                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15173                 if (peer && peer != tp->pdev)
15174                         break;
15175                 pci_dev_put(peer);
15176         }
15177         /* 5704 can be configured in single-port mode, set peer to
15178          * tp->pdev in that case.
15179          */
15180         if (!peer) {
15181                 peer = tp->pdev;
15182                 return peer;
15183         }
15184
15185         /*
15186          * We don't need to keep the refcount elevated; there's no way
15187          * to remove one half of this device without removing the other
15188          */
15189         pci_dev_put(peer);
15190
15191         return peer;
15192 }
15193
15194 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15195 {
15196         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15197         if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15198                 u32 reg;
15199
15200                 /* All devices that use the alternate
15201                  * ASIC REV location have a CPMU.
15202                  */
15203                 tg3_flag_set(tp, CPMU_PRESENT);
15204
15205                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15206                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15207                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15208                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15209                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15210                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15211                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15212                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15213                         reg = TG3PCI_GEN2_PRODID_ASICREV;
15214                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15215                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15216                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15217                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15218                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15219                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15220                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15221                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15222                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15223                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15224                         reg = TG3PCI_GEN15_PRODID_ASICREV;
15225                 else
15226                         reg = TG3PCI_PRODID_ASICREV;
15227
15228                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15229         }
15230
15231         /* Wrong chip ID in 5752 A0. This code can be removed later
15232          * as A0 is not in production.
15233          */
15234         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15235                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15236
15237         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15238                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15239
15240         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15241             tg3_asic_rev(tp) == ASIC_REV_5719 ||
15242             tg3_asic_rev(tp) == ASIC_REV_5720)
15243                 tg3_flag_set(tp, 5717_PLUS);
15244
15245         if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15246             tg3_asic_rev(tp) == ASIC_REV_57766)
15247                 tg3_flag_set(tp, 57765_CLASS);
15248
15249         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15250              tg3_asic_rev(tp) == ASIC_REV_5762)
15251                 tg3_flag_set(tp, 57765_PLUS);
15252
15253         /* Intentionally exclude ASIC_REV_5906 */
15254         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15255             tg3_asic_rev(tp) == ASIC_REV_5787 ||
15256             tg3_asic_rev(tp) == ASIC_REV_5784 ||
15257             tg3_asic_rev(tp) == ASIC_REV_5761 ||
15258             tg3_asic_rev(tp) == ASIC_REV_5785 ||
15259             tg3_asic_rev(tp) == ASIC_REV_57780 ||
15260             tg3_flag(tp, 57765_PLUS))
15261                 tg3_flag_set(tp, 5755_PLUS);
15262
15263         if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15264             tg3_asic_rev(tp) == ASIC_REV_5714)
15265                 tg3_flag_set(tp, 5780_CLASS);
15266
15267         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15268             tg3_asic_rev(tp) == ASIC_REV_5752 ||
15269             tg3_asic_rev(tp) == ASIC_REV_5906 ||
15270             tg3_flag(tp, 5755_PLUS) ||
15271             tg3_flag(tp, 5780_CLASS))
15272                 tg3_flag_set(tp, 5750_PLUS);
15273
15274         if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15275             tg3_flag(tp, 5750_PLUS))
15276                 tg3_flag_set(tp, 5705_PLUS);
15277 }
15278
15279 static bool tg3_10_100_only_device(struct tg3 *tp,
15280                                    const struct pci_device_id *ent)
15281 {
15282         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15283
15284         if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15285              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15286             (tp->phy_flags & TG3_PHYFLG_IS_FET))
15287                 return true;
15288
15289         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15290                 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15291                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15292                                 return true;
15293                 } else {
15294                         return true;
15295                 }
15296         }
15297
15298         return false;
15299 }
15300
15301 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15302 {
15303         u32 misc_ctrl_reg;
15304         u32 pci_state_reg, grc_misc_cfg;
15305         u32 val;
15306         u16 pci_cmd;
15307         int err;
15308
15309         /* Force memory write invalidate off.  If we leave it on,
15310          * then on 5700_BX chips we have to enable a workaround.
15311          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15312          * to match the cacheline size.  The Broadcom driver have this
15313          * workaround but turns MWI off all the times so never uses
15314          * it.  This seems to suggest that the workaround is insufficient.
15315          */
15316         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15317         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15318         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15319
15320         /* Important! -- Make sure register accesses are byteswapped
15321          * correctly.  Also, for those chips that require it, make
15322          * sure that indirect register accesses are enabled before
15323          * the first operation.
15324          */
15325         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15326                               &misc_ctrl_reg);
15327         tp->misc_host_ctrl |= (misc_ctrl_reg &
15328                                MISC_HOST_CTRL_CHIPREV);
15329         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15330                                tp->misc_host_ctrl);
15331
15332         tg3_detect_asic_rev(tp, misc_ctrl_reg);
15333
15334         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15335          * we need to disable memory and use config. cycles
15336          * only to access all registers. The 5702/03 chips
15337          * can mistakenly decode the special cycles from the
15338          * ICH chipsets as memory write cycles, causing corruption
15339          * of register and memory space. Only certain ICH bridges
15340          * will drive special cycles with non-zero data during the
15341          * address phase which can fall within the 5703's address
15342          * range. This is not an ICH bug as the PCI spec allows
15343          * non-zero address during special cycles. However, only
15344          * these ICH bridges are known to drive non-zero addresses
15345          * during special cycles.
15346          *
15347          * Since special cycles do not cross PCI bridges, we only
15348          * enable this workaround if the 5703 is on the secondary
15349          * bus of these ICH bridges.
15350          */
15351         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15352             (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15353                 static struct tg3_dev_id {
15354                         u32     vendor;
15355                         u32     device;
15356                         u32     rev;
15357                 } ich_chipsets[] = {
15358                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15359                           PCI_ANY_ID },
15360                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15361                           PCI_ANY_ID },
15362                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15363                           0xa },
15364                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15365                           PCI_ANY_ID },
15366                         { },
15367                 };
15368                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15369                 struct pci_dev *bridge = NULL;
15370
15371                 while (pci_id->vendor != 0) {
15372                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
15373                                                 bridge);
15374                         if (!bridge) {
15375                                 pci_id++;
15376                                 continue;
15377                         }
15378                         if (pci_id->rev != PCI_ANY_ID) {
15379                                 if (bridge->revision > pci_id->rev)
15380                                         continue;
15381                         }
15382                         if (bridge->subordinate &&
15383                             (bridge->subordinate->number ==
15384                              tp->pdev->bus->number)) {
15385                                 tg3_flag_set(tp, ICH_WORKAROUND);
15386                                 pci_dev_put(bridge);
15387                                 break;
15388                         }
15389                 }
15390         }
15391
15392         if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15393                 static struct tg3_dev_id {
15394                         u32     vendor;
15395                         u32     device;
15396                 } bridge_chipsets[] = {
15397                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15398                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15399                         { },
15400                 };
15401                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15402                 struct pci_dev *bridge = NULL;
15403
15404                 while (pci_id->vendor != 0) {
15405                         bridge = pci_get_device(pci_id->vendor,
15406                                                 pci_id->device,
15407                                                 bridge);
15408                         if (!bridge) {
15409                                 pci_id++;
15410                                 continue;
15411                         }
15412                         if (bridge->subordinate &&
15413                             (bridge->subordinate->number <=
15414                              tp->pdev->bus->number) &&
15415                             (bridge->subordinate->busn_res.end >=
15416                              tp->pdev->bus->number)) {
15417                                 tg3_flag_set(tp, 5701_DMA_BUG);
15418                                 pci_dev_put(bridge);
15419                                 break;
15420                         }
15421                 }
15422         }
15423
15424         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15425          * DMA addresses > 40-bit. This bridge may have other additional
15426          * 57xx devices behind it in some 4-port NIC designs for example.
15427          * Any tg3 device found behind the bridge will also need the 40-bit
15428          * DMA workaround.
15429          */
15430         if (tg3_flag(tp, 5780_CLASS)) {
15431                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15432                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15433         } else {
15434                 struct pci_dev *bridge = NULL;
15435
15436                 do {
15437                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15438                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
15439                                                 bridge);
15440                         if (bridge && bridge->subordinate &&
15441                             (bridge->subordinate->number <=
15442                              tp->pdev->bus->number) &&
15443                             (bridge->subordinate->busn_res.end >=
15444                              tp->pdev->bus->number)) {
15445                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15446                                 pci_dev_put(bridge);
15447                                 break;
15448                         }
15449                 } while (bridge);
15450         }
15451
15452         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15453             tg3_asic_rev(tp) == ASIC_REV_5714)
15454                 tp->pdev_peer = tg3_find_peer(tp);
15455
15456         /* Determine TSO capabilities */
15457         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
15458                 ; /* Do nothing. HW bug. */
15459         else if (tg3_flag(tp, 57765_PLUS))
15460                 tg3_flag_set(tp, HW_TSO_3);
15461         else if (tg3_flag(tp, 5755_PLUS) ||
15462                  tg3_asic_rev(tp) == ASIC_REV_5906)
15463                 tg3_flag_set(tp, HW_TSO_2);
15464         else if (tg3_flag(tp, 5750_PLUS)) {
15465                 tg3_flag_set(tp, HW_TSO_1);
15466                 tg3_flag_set(tp, TSO_BUG);
15467                 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
15468                     tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
15469                         tg3_flag_clear(tp, TSO_BUG);
15470         } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15471                    tg3_asic_rev(tp) != ASIC_REV_5701 &&
15472                    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
15473                 tg3_flag_set(tp, FW_TSO);
15474                 tg3_flag_set(tp, TSO_BUG);
15475                 if (tg3_asic_rev(tp) == ASIC_REV_5705)
15476                         tp->fw_needed = FIRMWARE_TG3TSO5;
15477                 else
15478                         tp->fw_needed = FIRMWARE_TG3TSO;
15479         }
15480
15481         /* Selectively allow TSO based on operating conditions */
15482         if (tg3_flag(tp, HW_TSO_1) ||
15483             tg3_flag(tp, HW_TSO_2) ||
15484             tg3_flag(tp, HW_TSO_3) ||
15485             tg3_flag(tp, FW_TSO)) {
15486                 /* For firmware TSO, assume ASF is disabled.
15487                  * We'll disable TSO later if we discover ASF
15488                  * is enabled in tg3_get_eeprom_hw_cfg().
15489                  */
15490                 tg3_flag_set(tp, TSO_CAPABLE);
15491         } else {
15492                 tg3_flag_clear(tp, TSO_CAPABLE);
15493                 tg3_flag_clear(tp, TSO_BUG);
15494                 tp->fw_needed = NULL;
15495         }
15496
15497         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
15498                 tp->fw_needed = FIRMWARE_TG3;
15499
15500         if (tg3_asic_rev(tp) == ASIC_REV_57766)
15501                 tp->fw_needed = FIRMWARE_TG357766;
15502
15503         tp->irq_max = 1;
15504
15505         if (tg3_flag(tp, 5750_PLUS)) {
15506                 tg3_flag_set(tp, SUPPORT_MSI);
15507                 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
15508                     tg3_chip_rev(tp) == CHIPREV_5750_BX ||
15509                     (tg3_asic_rev(tp) == ASIC_REV_5714 &&
15510                      tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
15511                      tp->pdev_peer == tp->pdev))
15512                         tg3_flag_clear(tp, SUPPORT_MSI);
15513
15514                 if (tg3_flag(tp, 5755_PLUS) ||
15515                     tg3_asic_rev(tp) == ASIC_REV_5906) {
15516                         tg3_flag_set(tp, 1SHOT_MSI);
15517                 }
15518
15519                 if (tg3_flag(tp, 57765_PLUS)) {
15520                         tg3_flag_set(tp, SUPPORT_MSIX);
15521                         tp->irq_max = TG3_IRQ_MAX_VECS;
15522                 }
15523         }
15524
15525         tp->txq_max = 1;
15526         tp->rxq_max = 1;
15527         if (tp->irq_max > 1) {
15528                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15529                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15530
15531                 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15532                     tg3_asic_rev(tp) == ASIC_REV_5720)
15533                         tp->txq_max = tp->irq_max - 1;
15534         }
15535
15536         if (tg3_flag(tp, 5755_PLUS) ||
15537             tg3_asic_rev(tp) == ASIC_REV_5906)
15538                 tg3_flag_set(tp, SHORT_DMA_BUG);
15539
15540         if (tg3_asic_rev(tp) == ASIC_REV_5719)
15541                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15542
15543         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15544             tg3_asic_rev(tp) == ASIC_REV_5719 ||
15545             tg3_asic_rev(tp) == ASIC_REV_5720 ||
15546             tg3_asic_rev(tp) == ASIC_REV_5762)
15547                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15548
15549         if (tg3_flag(tp, 57765_PLUS) &&
15550             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
15551                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15552
15553         if (!tg3_flag(tp, 5705_PLUS) ||
15554             tg3_flag(tp, 5780_CLASS) ||
15555             tg3_flag(tp, USE_JUMBO_BDFLAG))
15556                 tg3_flag_set(tp, JUMBO_CAPABLE);
15557
15558         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15559                               &pci_state_reg);
15560
15561         if (pci_is_pcie(tp->pdev)) {
15562                 u16 lnkctl;
15563
15564                 tg3_flag_set(tp, PCI_EXPRESS);
15565
15566                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15567                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15568                         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15569                                 tg3_flag_clear(tp, HW_TSO_2);
15570                                 tg3_flag_clear(tp, TSO_CAPABLE);
15571                         }
15572                         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
15573                             tg3_asic_rev(tp) == ASIC_REV_5761 ||
15574                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
15575                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
15576                                 tg3_flag_set(tp, CLKREQ_BUG);
15577                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
15578                         tg3_flag_set(tp, L1PLLPD_EN);
15579                 }
15580         } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
15581                 /* BCM5785 devices are effectively PCIe devices, and should
15582                  * follow PCIe codepaths, but do not have a PCIe capabilities
15583                  * section.
15584                  */
15585                 tg3_flag_set(tp, PCI_EXPRESS);
15586         } else if (!tg3_flag(tp, 5705_PLUS) ||
15587                    tg3_flag(tp, 5780_CLASS)) {
15588                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15589                 if (!tp->pcix_cap) {
15590                         dev_err(&tp->pdev->dev,
15591                                 "Cannot find PCI-X capability, aborting\n");
15592                         return -EIO;
15593                 }
15594
15595                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15596                         tg3_flag_set(tp, PCIX_MODE);
15597         }
15598
15599         /* If we have an AMD 762 or VIA K8T800 chipset, write
15600          * reordering to the mailbox registers done by the host
15601          * controller can cause major troubles.  We read back from
15602          * every mailbox register write to force the writes to be
15603          * posted to the chip in order.
15604          */
15605         if (pci_dev_present(tg3_write_reorder_chipsets) &&
15606             !tg3_flag(tp, PCI_EXPRESS))
15607                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
15608
15609         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15610                              &tp->pci_cacheline_sz);
15611         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15612                              &tp->pci_lat_timer);
15613         if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
15614             tp->pci_lat_timer < 64) {
15615                 tp->pci_lat_timer = 64;
15616                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15617                                       tp->pci_lat_timer);
15618         }
15619
15620         /* Important! -- It is critical that the PCI-X hw workaround
15621          * situation is decided before the first MMIO register access.
15622          */
15623         if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
15624                 /* 5700 BX chips need to have their TX producer index
15625                  * mailboxes written twice to workaround a bug.
15626                  */
15627                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
15628
15629                 /* If we are in PCI-X mode, enable register write workaround.
15630                  *
15631                  * The workaround is to use indirect register accesses
15632                  * for all chip writes not to mailbox registers.
15633                  */
15634                 if (tg3_flag(tp, PCIX_MODE)) {
15635                         u32 pm_reg;
15636
15637                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15638
15639                         /* The chip can have it's power management PCI config
15640                          * space registers clobbered due to this bug.
15641                          * So explicitly force the chip into D0 here.
15642                          */
15643                         pci_read_config_dword(tp->pdev,
15644                                               tp->pm_cap + PCI_PM_CTRL,
15645                                               &pm_reg);
15646                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
15647                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
15648                         pci_write_config_dword(tp->pdev,
15649                                                tp->pm_cap + PCI_PM_CTRL,
15650                                                pm_reg);
15651
15652                         /* Also, force SERR#/PERR# in PCI command. */
15653                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15654                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
15655                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15656                 }
15657         }
15658
15659         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
15660                 tg3_flag_set(tp, PCI_HIGH_SPEED);
15661         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
15662                 tg3_flag_set(tp, PCI_32BIT);
15663
15664         /* Chip-specific fixup from Broadcom driver */
15665         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
15666             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
15667                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
15668                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
15669         }
15670
15671         /* Default fast path register access methods */
15672         tp->read32 = tg3_read32;
15673         tp->write32 = tg3_write32;
15674         tp->read32_mbox = tg3_read32;
15675         tp->write32_mbox = tg3_write32;
15676         tp->write32_tx_mbox = tg3_write32;
15677         tp->write32_rx_mbox = tg3_write32;
15678
15679         /* Various workaround register access methods */
15680         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
15681                 tp->write32 = tg3_write_indirect_reg32;
15682         else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
15683                  (tg3_flag(tp, PCI_EXPRESS) &&
15684                   tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
15685                 /*
15686                  * Back to back register writes can cause problems on these
15687                  * chips, the workaround is to read back all reg writes
15688                  * except those to mailbox regs.
15689                  *
15690                  * See tg3_write_indirect_reg32().
15691                  */
15692                 tp->write32 = tg3_write_flush_reg32;
15693         }
15694
15695         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
15696                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
15697                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
15698                         tp->write32_rx_mbox = tg3_write_flush_reg32;
15699         }
15700
15701         if (tg3_flag(tp, ICH_WORKAROUND)) {
15702                 tp->read32 = tg3_read_indirect_reg32;
15703                 tp->write32 = tg3_write_indirect_reg32;
15704                 tp->read32_mbox = tg3_read_indirect_mbox;
15705                 tp->write32_mbox = tg3_write_indirect_mbox;
15706                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
15707                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
15708
15709                 iounmap(tp->regs);
15710                 tp->regs = NULL;
15711
15712                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15713                 pci_cmd &= ~PCI_COMMAND_MEMORY;
15714                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15715         }
15716         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15717                 tp->read32_mbox = tg3_read32_mbox_5906;
15718                 tp->write32_mbox = tg3_write32_mbox_5906;
15719                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
15720                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
15721         }
15722
15723         if (tp->write32 == tg3_write_indirect_reg32 ||
15724             (tg3_flag(tp, PCIX_MODE) &&
15725              (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15726               tg3_asic_rev(tp) == ASIC_REV_5701)))
15727                 tg3_flag_set(tp, SRAM_USE_CONFIG);
15728
15729         /* The memory arbiter has to be enabled in order for SRAM accesses
15730          * to succeed.  Normally on powerup the tg3 chip firmware will make
15731          * sure it is enabled, but other entities such as system netboot
15732          * code might disable it.
15733          */
15734         val = tr32(MEMARB_MODE);
15735         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
15736
15737         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
15738         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15739             tg3_flag(tp, 5780_CLASS)) {
15740                 if (tg3_flag(tp, PCIX_MODE)) {
15741                         pci_read_config_dword(tp->pdev,
15742                                               tp->pcix_cap + PCI_X_STATUS,
15743                                               &val);
15744                         tp->pci_fn = val & 0x7;
15745                 }
15746         } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15747                    tg3_asic_rev(tp) == ASIC_REV_5719 ||
15748                    tg3_asic_rev(tp) == ASIC_REV_5720) {
15749                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
15750                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
15751                         val = tr32(TG3_CPMU_STATUS);
15752
15753                 if (tg3_asic_rev(tp) == ASIC_REV_5717)
15754                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
15755                 else
15756                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
15757                                      TG3_CPMU_STATUS_FSHFT_5719;
15758         }
15759
15760         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
15761                 tp->write32_tx_mbox = tg3_write_flush_reg32;
15762                 tp->write32_rx_mbox = tg3_write_flush_reg32;
15763         }
15764
15765         /* Get eeprom hw config before calling tg3_set_power_state().
15766          * In particular, the TG3_FLAG_IS_NIC flag must be
15767          * determined before calling tg3_set_power_state() so that
15768          * we know whether or not to switch out of Vaux power.
15769          * When the flag is set, it means that GPIO1 is used for eeprom
15770          * write protect and also implies that it is a LOM where GPIOs
15771          * are not used to switch power.
15772          */
15773         tg3_get_eeprom_hw_cfg(tp);
15774
15775         if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
15776                 tg3_flag_clear(tp, TSO_CAPABLE);
15777                 tg3_flag_clear(tp, TSO_BUG);
15778                 tp->fw_needed = NULL;
15779         }
15780
15781         if (tg3_flag(tp, ENABLE_APE)) {
15782                 /* Allow reads and writes to the
15783                  * APE register and memory space.
15784                  */
15785                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
15786                                  PCISTATE_ALLOW_APE_SHMEM_WR |
15787                                  PCISTATE_ALLOW_APE_PSPACE_WR;
15788                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
15789                                        pci_state_reg);
15790
15791                 tg3_ape_lock_init(tp);
15792         }
15793
15794         /* Set up tp->grc_local_ctrl before calling
15795          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
15796          * will bring 5700's external PHY out of reset.
15797          * It is also used as eeprom write protect on LOMs.
15798          */
15799         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
15800         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15801             tg3_flag(tp, EEPROM_WRITE_PROT))
15802                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
15803                                        GRC_LCLCTRL_GPIO_OUTPUT1);
15804         /* Unused GPIO3 must be driven as output on 5752 because there
15805          * are no pull-up resistors on unused GPIO pins.
15806          */
15807         else if (tg3_asic_rev(tp) == ASIC_REV_5752)
15808                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
15809
15810         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15811             tg3_asic_rev(tp) == ASIC_REV_57780 ||
15812             tg3_flag(tp, 57765_CLASS))
15813                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15814
15815         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15816             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
15817                 /* Turn off the debug UART. */
15818                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15819                 if (tg3_flag(tp, IS_NIC))
15820                         /* Keep VMain power. */
15821                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
15822                                               GRC_LCLCTRL_GPIO_OUTPUT0;
15823         }
15824
15825         if (tg3_asic_rev(tp) == ASIC_REV_5762)
15826                 tp->grc_local_ctrl |=
15827                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
15828
15829         /* Switch out of Vaux if it is a NIC */
15830         tg3_pwrsrc_switch_to_vmain(tp);
15831
15832         /* Derive initial jumbo mode from MTU assigned in
15833          * ether_setup() via the alloc_etherdev() call
15834          */
15835         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
15836                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
15837
15838         /* Determine WakeOnLan speed to use. */
15839         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15840             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
15841             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
15842             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
15843                 tg3_flag_clear(tp, WOL_SPEED_100MB);
15844         } else {
15845                 tg3_flag_set(tp, WOL_SPEED_100MB);
15846         }
15847
15848         if (tg3_asic_rev(tp) == ASIC_REV_5906)
15849                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
15850
15851         /* A few boards don't want Ethernet@WireSpeed phy feature */
15852         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15853             (tg3_asic_rev(tp) == ASIC_REV_5705 &&
15854              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
15855              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
15856             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15857             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15858                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
15859
15860         if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
15861             tg3_chip_rev(tp) == CHIPREV_5704_AX)
15862                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
15863         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
15864                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
15865
15866         if (tg3_flag(tp, 5705_PLUS) &&
15867             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
15868             tg3_asic_rev(tp) != ASIC_REV_5785 &&
15869             tg3_asic_rev(tp) != ASIC_REV_57780 &&
15870             !tg3_flag(tp, 57765_PLUS)) {
15871                 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15872                     tg3_asic_rev(tp) == ASIC_REV_5787 ||
15873                     tg3_asic_rev(tp) == ASIC_REV_5784 ||
15874                     tg3_asic_rev(tp) == ASIC_REV_5761) {
15875                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15876                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
15877                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
15878                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
15879                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
15880                 } else
15881                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
15882         }
15883
15884         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15885             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
15886                 tp->phy_otp = tg3_read_otp_phycfg(tp);
15887                 if (tp->phy_otp == 0)
15888                         tp->phy_otp = TG3_OTP_DEFAULT;
15889         }
15890
15891         if (tg3_flag(tp, CPMU_PRESENT))
15892                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15893         else
15894                 tp->mi_mode = MAC_MI_MODE_BASE;
15895
15896         tp->coalesce_mode = 0;
15897         if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
15898             tg3_chip_rev(tp) != CHIPREV_5700_BX)
15899                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15900
15901         /* Set these bits to enable statistics workaround. */
15902         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15903             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
15904             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
15905                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15906                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15907         }
15908
15909         if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
15910             tg3_asic_rev(tp) == ASIC_REV_57780)
15911                 tg3_flag_set(tp, USE_PHYLIB);
15912
15913         err = tg3_mdio_init(tp);
15914         if (err)
15915                 return err;
15916
15917         /* Initialize data/descriptor byte/word swapping. */
15918         val = tr32(GRC_MODE);
15919         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15920             tg3_asic_rev(tp) == ASIC_REV_5762)
15921                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15922                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
15923                         GRC_MODE_B2HRX_ENABLE |
15924                         GRC_MODE_HTX2B_ENABLE |
15925                         GRC_MODE_HOST_STACKUP);
15926         else
15927                 val &= GRC_MODE_HOST_STACKUP;
15928
15929         tw32(GRC_MODE, val | tp->grc_mode);
15930
15931         tg3_switch_clocks(tp);
15932
15933         /* Clear this out for sanity. */
15934         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15935
15936         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15937                               &pci_state_reg);
15938         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
15939             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
15940                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
15941                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
15942                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
15943                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
15944                         void __iomem *sram_base;
15945
15946                         /* Write some dummy words into the SRAM status block
15947                          * area, see if it reads back correctly.  If the return
15948                          * value is bad, force enable the PCIX workaround.
15949                          */
15950                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15951
15952                         writel(0x00000000, sram_base);
15953                         writel(0x00000000, sram_base + 4);
15954                         writel(0xffffffff, sram_base + 4);
15955                         if (readl(sram_base) != 0x00000000)
15956                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15957                 }
15958         }
15959
15960         udelay(50);
15961         tg3_nvram_init(tp);
15962
15963         /* If the device has an NVRAM, no need to load patch firmware */
15964         if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
15965             !tg3_flag(tp, NO_NVRAM))
15966                 tp->fw_needed = NULL;
15967
15968         grc_misc_cfg = tr32(GRC_MISC_CFG);
15969         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15970
15971         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
15972             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
15973              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
15974                 tg3_flag_set(tp, IS_5788);
15975
15976         if (!tg3_flag(tp, IS_5788) &&
15977             tg3_asic_rev(tp) != ASIC_REV_5700)
15978                 tg3_flag_set(tp, TAGGED_STATUS);
15979         if (tg3_flag(tp, TAGGED_STATUS)) {
15980                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
15981                                       HOSTCC_MODE_CLRTICK_TXBD);
15982
15983                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
15984                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15985                                        tp->misc_host_ctrl);
15986         }
15987
15988         /* Preserve the APE MAC_MODE bits */
15989         if (tg3_flag(tp, ENABLE_APE))
15990                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
15991         else
15992                 tp->mac_mode = 0;
15993
15994         if (tg3_10_100_only_device(tp, ent))
15995                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
15996
15997         err = tg3_phy_probe(tp);
15998         if (err) {
15999                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16000                 /* ... but do not return immediately ... */
16001                 tg3_mdio_fini(tp);
16002         }
16003
16004         tg3_read_vpd(tp);
16005         tg3_read_fw_ver(tp);
16006
16007         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16008                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16009         } else {
16010                 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16011                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16012                 else
16013                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16014         }
16015
16016         /* 5700 {AX,BX} chips have a broken status block link
16017          * change bit implementation, so we must use the
16018          * status register in those cases.
16019          */
16020         if (tg3_asic_rev(tp) == ASIC_REV_5700)
16021                 tg3_flag_set(tp, USE_LINKCHG_REG);
16022         else
16023                 tg3_flag_clear(tp, USE_LINKCHG_REG);
16024
16025         /* The led_ctrl is set during tg3_phy_probe, here we might
16026          * have to force the link status polling mechanism based
16027          * upon subsystem IDs.
16028          */
16029         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16030             tg3_asic_rev(tp) == ASIC_REV_5701 &&
16031             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16032                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16033                 tg3_flag_set(tp, USE_LINKCHG_REG);
16034         }
16035
16036         /* For all SERDES we poll the MAC status register. */
16037         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16038                 tg3_flag_set(tp, POLL_SERDES);
16039         else
16040                 tg3_flag_clear(tp, POLL_SERDES);
16041
16042         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16043         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16044         if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16045             tg3_flag(tp, PCIX_MODE)) {
16046                 tp->rx_offset = NET_SKB_PAD;
16047 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16048                 tp->rx_copy_thresh = ~(u16)0;
16049 #endif
16050         }
16051
16052         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16053         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16054         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16055
16056         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16057
16058         /* Increment the rx prod index on the rx std ring by at most
16059          * 8 for these chips to workaround hw errata.
16060          */
16061         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16062             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16063             tg3_asic_rev(tp) == ASIC_REV_5755)
16064                 tp->rx_std_max_post = 8;
16065
16066         if (tg3_flag(tp, ASPM_WORKAROUND))
16067                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16068                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
16069
16070         return err;
16071 }
16072
16073 #ifdef CONFIG_SPARC
16074 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16075 {
16076         struct net_device *dev = tp->dev;
16077         struct pci_dev *pdev = tp->pdev;
16078         struct device_node *dp = pci_device_to_OF_node(pdev);
16079         const unsigned char *addr;
16080         int len;
16081
16082         addr = of_get_property(dp, "local-mac-address", &len);
16083         if (addr && len == 6) {
16084                 memcpy(dev->dev_addr, addr, 6);
16085                 return 0;
16086         }
16087         return -ENODEV;
16088 }
16089
16090 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16091 {
16092         struct net_device *dev = tp->dev;
16093
16094         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
16095         return 0;
16096 }
16097 #endif
16098
16099 static int tg3_get_device_address(struct tg3 *tp)
16100 {
16101         struct net_device *dev = tp->dev;
16102         u32 hi, lo, mac_offset;
16103         int addr_ok = 0;
16104         int err;
16105
16106 #ifdef CONFIG_SPARC
16107         if (!tg3_get_macaddr_sparc(tp))
16108                 return 0;
16109 #endif
16110
16111         if (tg3_flag(tp, IS_SSB_CORE)) {
16112                 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16113                 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16114                         return 0;
16115         }
16116
16117         mac_offset = 0x7c;
16118         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16119             tg3_flag(tp, 5780_CLASS)) {
16120                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16121                         mac_offset = 0xcc;
16122                 if (tg3_nvram_lock(tp))
16123                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16124                 else
16125                         tg3_nvram_unlock(tp);
16126         } else if (tg3_flag(tp, 5717_PLUS)) {
16127                 if (tp->pci_fn & 1)
16128                         mac_offset = 0xcc;
16129                 if (tp->pci_fn > 1)
16130                         mac_offset += 0x18c;
16131         } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16132                 mac_offset = 0x10;
16133
16134         /* First try to get it from MAC address mailbox. */
16135         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16136         if ((hi >> 16) == 0x484b) {
16137                 dev->dev_addr[0] = (hi >>  8) & 0xff;
16138                 dev->dev_addr[1] = (hi >>  0) & 0xff;
16139
16140                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16141                 dev->dev_addr[2] = (lo >> 24) & 0xff;
16142                 dev->dev_addr[3] = (lo >> 16) & 0xff;
16143                 dev->dev_addr[4] = (lo >>  8) & 0xff;
16144                 dev->dev_addr[5] = (lo >>  0) & 0xff;
16145
16146                 /* Some old bootcode may report a 0 MAC address in SRAM */
16147                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16148         }
16149         if (!addr_ok) {
16150                 /* Next, try NVRAM. */
16151                 if (!tg3_flag(tp, NO_NVRAM) &&
16152                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16153                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16154                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16155                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16156                 }
16157                 /* Finally just fetch it out of the MAC control regs. */
16158                 else {
16159                         hi = tr32(MAC_ADDR_0_HIGH);
16160                         lo = tr32(MAC_ADDR_0_LOW);
16161
16162                         dev->dev_addr[5] = lo & 0xff;
16163                         dev->dev_addr[4] = (lo >> 8) & 0xff;
16164                         dev->dev_addr[3] = (lo >> 16) & 0xff;
16165                         dev->dev_addr[2] = (lo >> 24) & 0xff;
16166                         dev->dev_addr[1] = hi & 0xff;
16167                         dev->dev_addr[0] = (hi >> 8) & 0xff;
16168                 }
16169         }
16170
16171         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16172 #ifdef CONFIG_SPARC
16173                 if (!tg3_get_default_macaddr_sparc(tp))
16174                         return 0;
16175 #endif
16176                 return -EINVAL;
16177         }
16178         return 0;
16179 }
16180
16181 #define BOUNDARY_SINGLE_CACHELINE       1
16182 #define BOUNDARY_MULTI_CACHELINE        2
16183
16184 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16185 {
16186         int cacheline_size;
16187         u8 byte;
16188         int goal;
16189
16190         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16191         if (byte == 0)
16192                 cacheline_size = 1024;
16193         else
16194                 cacheline_size = (int) byte * 4;
16195
16196         /* On 5703 and later chips, the boundary bits have no
16197          * effect.
16198          */
16199         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16200             tg3_asic_rev(tp) != ASIC_REV_5701 &&
16201             !tg3_flag(tp, PCI_EXPRESS))
16202                 goto out;
16203
16204 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16205         goal = BOUNDARY_MULTI_CACHELINE;
16206 #else
16207 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16208         goal = BOUNDARY_SINGLE_CACHELINE;
16209 #else
16210         goal = 0;
16211 #endif
16212 #endif
16213
16214         if (tg3_flag(tp, 57765_PLUS)) {
16215                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16216                 goto out;
16217         }
16218
16219         if (!goal)
16220                 goto out;
16221
16222         /* PCI controllers on most RISC systems tend to disconnect
16223          * when a device tries to burst across a cache-line boundary.
16224          * Therefore, letting tg3 do so just wastes PCI bandwidth.
16225          *
16226          * Unfortunately, for PCI-E there are only limited
16227          * write-side controls for this, and thus for reads
16228          * we will still get the disconnects.  We'll also waste
16229          * these PCI cycles for both read and write for chips
16230          * other than 5700 and 5701 which do not implement the
16231          * boundary bits.
16232          */
16233         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16234                 switch (cacheline_size) {
16235                 case 16:
16236                 case 32:
16237                 case 64:
16238                 case 128:
16239                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16240                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16241                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16242                         } else {
16243                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16244                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16245                         }
16246                         break;
16247
16248                 case 256:
16249                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16250                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16251                         break;
16252
16253                 default:
16254                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16255                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16256                         break;
16257                 }
16258         } else if (tg3_flag(tp, PCI_EXPRESS)) {
16259                 switch (cacheline_size) {
16260                 case 16:
16261                 case 32:
16262                 case 64:
16263                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16264                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16265                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16266                                 break;
16267                         }
16268                         /* fallthrough */
16269                 case 128:
16270                 default:
16271                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16272                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16273                         break;
16274                 }
16275         } else {
16276                 switch (cacheline_size) {
16277                 case 16:
16278                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16279                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16280                                         DMA_RWCTRL_WRITE_BNDRY_16);
16281                                 break;
16282                         }
16283                         /* fallthrough */
16284                 case 32:
16285                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16286                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16287                                         DMA_RWCTRL_WRITE_BNDRY_32);
16288                                 break;
16289                         }
16290                         /* fallthrough */
16291                 case 64:
16292                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16293                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16294                                         DMA_RWCTRL_WRITE_BNDRY_64);
16295                                 break;
16296                         }
16297                         /* fallthrough */
16298                 case 128:
16299                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16300                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16301                                         DMA_RWCTRL_WRITE_BNDRY_128);
16302                                 break;
16303                         }
16304                         /* fallthrough */
16305                 case 256:
16306                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
16307                                 DMA_RWCTRL_WRITE_BNDRY_256);
16308                         break;
16309                 case 512:
16310                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
16311                                 DMA_RWCTRL_WRITE_BNDRY_512);
16312                         break;
16313                 case 1024:
16314                 default:
16315                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16316                                 DMA_RWCTRL_WRITE_BNDRY_1024);
16317                         break;
16318                 }
16319         }
16320
16321 out:
16322         return val;
16323 }
16324
16325 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16326                            int size, int to_device)
16327 {
16328         struct tg3_internal_buffer_desc test_desc;
16329         u32 sram_dma_descs;
16330         int i, ret;
16331
16332         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16333
16334         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16335         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16336         tw32(RDMAC_STATUS, 0);
16337         tw32(WDMAC_STATUS, 0);
16338
16339         tw32(BUFMGR_MODE, 0);
16340         tw32(FTQ_RESET, 0);
16341
16342         test_desc.addr_hi = ((u64) buf_dma) >> 32;
16343         test_desc.addr_lo = buf_dma & 0xffffffff;
16344         test_desc.nic_mbuf = 0x00002100;
16345         test_desc.len = size;
16346
16347         /*
16348          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16349          * the *second* time the tg3 driver was getting loaded after an
16350          * initial scan.
16351          *
16352          * Broadcom tells me:
16353          *   ...the DMA engine is connected to the GRC block and a DMA
16354          *   reset may affect the GRC block in some unpredictable way...
16355          *   The behavior of resets to individual blocks has not been tested.
16356          *
16357          * Broadcom noted the GRC reset will also reset all sub-components.
16358          */
16359         if (to_device) {
16360                 test_desc.cqid_sqid = (13 << 8) | 2;
16361
16362                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16363                 udelay(40);
16364         } else {
16365                 test_desc.cqid_sqid = (16 << 8) | 7;
16366
16367                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16368                 udelay(40);
16369         }
16370         test_desc.flags = 0x00000005;
16371
16372         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16373                 u32 val;
16374
16375                 val = *(((u32 *)&test_desc) + i);
16376                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16377                                        sram_dma_descs + (i * sizeof(u32)));
16378                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16379         }
16380         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16381
16382         if (to_device)
16383                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16384         else
16385                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16386
16387         ret = -ENODEV;
16388         for (i = 0; i < 40; i++) {
16389                 u32 val;
16390
16391                 if (to_device)
16392                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16393                 else
16394                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16395                 if ((val & 0xffff) == sram_dma_descs) {
16396                         ret = 0;
16397                         break;
16398                 }
16399
16400                 udelay(100);
16401         }
16402
16403         return ret;
16404 }
16405
16406 #define TEST_BUFFER_SIZE        0x2000
16407
16408 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16409         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16410         { },
16411 };
16412
16413 static int tg3_test_dma(struct tg3 *tp)
16414 {
16415         dma_addr_t buf_dma;
16416         u32 *buf, saved_dma_rwctrl;
16417         int ret = 0;
16418
16419         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16420                                  &buf_dma, GFP_KERNEL);
16421         if (!buf) {
16422                 ret = -ENOMEM;
16423                 goto out_nofree;
16424         }
16425
16426         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16427                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16428
16429         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16430
16431         if (tg3_flag(tp, 57765_PLUS))
16432                 goto out;
16433
16434         if (tg3_flag(tp, PCI_EXPRESS)) {
16435                 /* DMA read watermark not used on PCIE */
16436                 tp->dma_rwctrl |= 0x00180000;
16437         } else if (!tg3_flag(tp, PCIX_MODE)) {
16438                 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16439                     tg3_asic_rev(tp) == ASIC_REV_5750)
16440                         tp->dma_rwctrl |= 0x003f0000;
16441                 else
16442                         tp->dma_rwctrl |= 0x003f000f;
16443         } else {
16444                 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16445                     tg3_asic_rev(tp) == ASIC_REV_5704) {
16446                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16447                         u32 read_water = 0x7;
16448
16449                         /* If the 5704 is behind the EPB bridge, we can
16450                          * do the less restrictive ONE_DMA workaround for
16451                          * better performance.
16452                          */
16453                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16454                             tg3_asic_rev(tp) == ASIC_REV_5704)
16455                                 tp->dma_rwctrl |= 0x8000;
16456                         else if (ccval == 0x6 || ccval == 0x7)
16457                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16458
16459                         if (tg3_asic_rev(tp) == ASIC_REV_5703)
16460                                 read_water = 4;
16461                         /* Set bit 23 to enable PCIX hw bug fix */
16462                         tp->dma_rwctrl |=
16463                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16464                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16465                                 (1 << 23);
16466                 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
16467                         /* 5780 always in PCIX mode */
16468                         tp->dma_rwctrl |= 0x00144000;
16469                 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
16470                         /* 5714 always in PCIX mode */
16471                         tp->dma_rwctrl |= 0x00148000;
16472                 } else {
16473                         tp->dma_rwctrl |= 0x001b000f;
16474                 }
16475         }
16476         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16477                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16478
16479         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16480             tg3_asic_rev(tp) == ASIC_REV_5704)
16481                 tp->dma_rwctrl &= 0xfffffff0;
16482
16483         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16484             tg3_asic_rev(tp) == ASIC_REV_5701) {
16485                 /* Remove this if it causes problems for some boards. */
16486                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16487
16488                 /* On 5700/5701 chips, we need to set this bit.
16489                  * Otherwise the chip will issue cacheline transactions
16490                  * to streamable DMA memory with not all the byte
16491                  * enables turned on.  This is an error on several
16492                  * RISC PCI controllers, in particular sparc64.
16493                  *
16494                  * On 5703/5704 chips, this bit has been reassigned
16495                  * a different meaning.  In particular, it is used
16496                  * on those chips to enable a PCI-X workaround.
16497                  */
16498                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16499         }
16500
16501         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16502
16503 #if 0
16504         /* Unneeded, already done by tg3_get_invariants.  */
16505         tg3_switch_clocks(tp);
16506 #endif
16507
16508         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16509             tg3_asic_rev(tp) != ASIC_REV_5701)
16510                 goto out;
16511
16512         /* It is best to perform DMA test with maximum write burst size
16513          * to expose the 5700/5701 write DMA bug.
16514          */
16515         saved_dma_rwctrl = tp->dma_rwctrl;
16516         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16517         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16518
16519         while (1) {
16520                 u32 *p = buf, i;
16521
16522                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16523                         p[i] = i;
16524
16525                 /* Send the buffer to the chip. */
16526                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
16527                 if (ret) {
16528                         dev_err(&tp->pdev->dev,
16529                                 "%s: Buffer write failed. err = %d\n",
16530                                 __func__, ret);
16531                         break;
16532                 }
16533
16534 #if 0
16535                 /* validate data reached card RAM correctly. */
16536                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16537                         u32 val;
16538                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
16539                         if (le32_to_cpu(val) != p[i]) {
16540                                 dev_err(&tp->pdev->dev,
16541                                         "%s: Buffer corrupted on device! "
16542                                         "(%d != %d)\n", __func__, val, i);
16543                                 /* ret = -ENODEV here? */
16544                         }
16545                         p[i] = 0;
16546                 }
16547 #endif
16548                 /* Now read it back. */
16549                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
16550                 if (ret) {
16551                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16552                                 "err = %d\n", __func__, ret);
16553                         break;
16554                 }
16555
16556                 /* Verify it. */
16557                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16558                         if (p[i] == i)
16559                                 continue;
16560
16561                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16562                             DMA_RWCTRL_WRITE_BNDRY_16) {
16563                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16564                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16565                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16566                                 break;
16567                         } else {
16568                                 dev_err(&tp->pdev->dev,
16569                                         "%s: Buffer corrupted on read back! "
16570                                         "(%d != %d)\n", __func__, p[i], i);
16571                                 ret = -ENODEV;
16572                                 goto out;
16573                         }
16574                 }
16575
16576                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16577                         /* Success. */
16578                         ret = 0;
16579                         break;
16580                 }
16581         }
16582         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16583             DMA_RWCTRL_WRITE_BNDRY_16) {
16584                 /* DMA test passed without adjusting DMA boundary,
16585                  * now look for chipsets that are known to expose the
16586                  * DMA bug without failing the test.
16587                  */
16588                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16589                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16590                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16591                 } else {
16592                         /* Safe to use the calculated DMA boundary. */
16593                         tp->dma_rwctrl = saved_dma_rwctrl;
16594                 }
16595
16596                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16597         }
16598
16599 out:
16600         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16601 out_nofree:
16602         return ret;
16603 }
16604
16605 static void tg3_init_bufmgr_config(struct tg3 *tp)
16606 {
16607         if (tg3_flag(tp, 57765_PLUS)) {
16608                 tp->bufmgr_config.mbuf_read_dma_low_water =
16609                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16610                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16611                         DEFAULT_MB_MACRX_LOW_WATER_57765;
16612                 tp->bufmgr_config.mbuf_high_water =
16613                         DEFAULT_MB_HIGH_WATER_57765;
16614
16615                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16616                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16617                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16618                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16619                 tp->bufmgr_config.mbuf_high_water_jumbo =
16620                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16621         } else if (tg3_flag(tp, 5705_PLUS)) {
16622                 tp->bufmgr_config.mbuf_read_dma_low_water =
16623                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16624                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16625                         DEFAULT_MB_MACRX_LOW_WATER_5705;
16626                 tp->bufmgr_config.mbuf_high_water =
16627                         DEFAULT_MB_HIGH_WATER_5705;
16628                 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16629                         tp->bufmgr_config.mbuf_mac_rx_low_water =
16630                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
16631                         tp->bufmgr_config.mbuf_high_water =
16632                                 DEFAULT_MB_HIGH_WATER_5906;
16633                 }
16634
16635                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16636                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
16637                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16638                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
16639                 tp->bufmgr_config.mbuf_high_water_jumbo =
16640                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
16641         } else {
16642                 tp->bufmgr_config.mbuf_read_dma_low_water =
16643                         DEFAULT_MB_RDMA_LOW_WATER;
16644                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16645                         DEFAULT_MB_MACRX_LOW_WATER;
16646                 tp->bufmgr_config.mbuf_high_water =
16647                         DEFAULT_MB_HIGH_WATER;
16648
16649                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16650                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
16651                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16652                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
16653                 tp->bufmgr_config.mbuf_high_water_jumbo =
16654                         DEFAULT_MB_HIGH_WATER_JUMBO;
16655         }
16656
16657         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
16658         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
16659 }
16660
16661 static char *tg3_phy_string(struct tg3 *tp)
16662 {
16663         switch (tp->phy_id & TG3_PHY_ID_MASK) {
16664         case TG3_PHY_ID_BCM5400:        return "5400";
16665         case TG3_PHY_ID_BCM5401:        return "5401";
16666         case TG3_PHY_ID_BCM5411:        return "5411";
16667         case TG3_PHY_ID_BCM5701:        return "5701";
16668         case TG3_PHY_ID_BCM5703:        return "5703";
16669         case TG3_PHY_ID_BCM5704:        return "5704";
16670         case TG3_PHY_ID_BCM5705:        return "5705";
16671         case TG3_PHY_ID_BCM5750:        return "5750";
16672         case TG3_PHY_ID_BCM5752:        return "5752";
16673         case TG3_PHY_ID_BCM5714:        return "5714";
16674         case TG3_PHY_ID_BCM5780:        return "5780";
16675         case TG3_PHY_ID_BCM5755:        return "5755";
16676         case TG3_PHY_ID_BCM5787:        return "5787";
16677         case TG3_PHY_ID_BCM5784:        return "5784";
16678         case TG3_PHY_ID_BCM5756:        return "5722/5756";
16679         case TG3_PHY_ID_BCM5906:        return "5906";
16680         case TG3_PHY_ID_BCM5761:        return "5761";
16681         case TG3_PHY_ID_BCM5718C:       return "5718C";
16682         case TG3_PHY_ID_BCM5718S:       return "5718S";
16683         case TG3_PHY_ID_BCM57765:       return "57765";
16684         case TG3_PHY_ID_BCM5719C:       return "5719C";
16685         case TG3_PHY_ID_BCM5720C:       return "5720C";
16686         case TG3_PHY_ID_BCM5762:        return "5762C";
16687         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
16688         case 0:                 return "serdes";
16689         default:                return "unknown";
16690         }
16691 }
16692
16693 static char *tg3_bus_string(struct tg3 *tp, char *str)
16694 {
16695         if (tg3_flag(tp, PCI_EXPRESS)) {
16696                 strcpy(str, "PCI Express");
16697                 return str;
16698         } else if (tg3_flag(tp, PCIX_MODE)) {
16699                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
16700
16701                 strcpy(str, "PCIX:");
16702
16703                 if ((clock_ctrl == 7) ||
16704                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
16705                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
16706                         strcat(str, "133MHz");
16707                 else if (clock_ctrl == 0)
16708                         strcat(str, "33MHz");
16709                 else if (clock_ctrl == 2)
16710                         strcat(str, "50MHz");
16711                 else if (clock_ctrl == 4)
16712                         strcat(str, "66MHz");
16713                 else if (clock_ctrl == 6)
16714                         strcat(str, "100MHz");
16715         } else {
16716                 strcpy(str, "PCI:");
16717                 if (tg3_flag(tp, PCI_HIGH_SPEED))
16718                         strcat(str, "66MHz");
16719                 else
16720                         strcat(str, "33MHz");
16721         }
16722         if (tg3_flag(tp, PCI_32BIT))
16723                 strcat(str, ":32-bit");
16724         else
16725                 strcat(str, ":64-bit");
16726         return str;
16727 }
16728
16729 static void tg3_init_coal(struct tg3 *tp)
16730 {
16731         struct ethtool_coalesce *ec = &tp->coal;
16732
16733         memset(ec, 0, sizeof(*ec));
16734         ec->cmd = ETHTOOL_GCOALESCE;
16735         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
16736         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
16737         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
16738         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
16739         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
16740         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
16741         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
16742         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
16743         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
16744
16745         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
16746                                  HOSTCC_MODE_CLRTICK_TXBD)) {
16747                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
16748                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
16749                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
16750                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
16751         }
16752
16753         if (tg3_flag(tp, 5705_PLUS)) {
16754                 ec->rx_coalesce_usecs_irq = 0;
16755                 ec->tx_coalesce_usecs_irq = 0;
16756                 ec->stats_block_coalesce_usecs = 0;
16757         }
16758 }
16759
16760 static int tg3_init_one(struct pci_dev *pdev,
16761                                   const struct pci_device_id *ent)
16762 {
16763         struct net_device *dev;
16764         struct tg3 *tp;
16765         int i, err, pm_cap;
16766         u32 sndmbx, rcvmbx, intmbx;
16767         char str[40];
16768         u64 dma_mask, persist_dma_mask;
16769         netdev_features_t features = 0;
16770
16771         printk_once(KERN_INFO "%s\n", version);
16772
16773         err = pci_enable_device(pdev);
16774         if (err) {
16775                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
16776                 return err;
16777         }
16778
16779         err = pci_request_regions(pdev, DRV_MODULE_NAME);
16780         if (err) {
16781                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
16782                 goto err_out_disable_pdev;
16783         }
16784
16785         pci_set_master(pdev);
16786
16787         /* Find power-management capability. */
16788         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
16789         if (pm_cap == 0) {
16790                 dev_err(&pdev->dev,
16791                         "Cannot find Power Management capability, aborting\n");
16792                 err = -EIO;
16793                 goto err_out_free_res;
16794         }
16795
16796         err = pci_set_power_state(pdev, PCI_D0);
16797         if (err) {
16798                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
16799                 goto err_out_free_res;
16800         }
16801
16802         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
16803         if (!dev) {
16804                 err = -ENOMEM;
16805                 goto err_out_power_down;
16806         }
16807
16808         SET_NETDEV_DEV(dev, &pdev->dev);
16809
16810         tp = netdev_priv(dev);
16811         tp->pdev = pdev;
16812         tp->dev = dev;
16813         tp->pm_cap = pm_cap;
16814         tp->rx_mode = TG3_DEF_RX_MODE;
16815         tp->tx_mode = TG3_DEF_TX_MODE;
16816         tp->irq_sync = 1;
16817
16818         if (tg3_debug > 0)
16819                 tp->msg_enable = tg3_debug;
16820         else
16821                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
16822
16823         if (pdev_is_ssb_gige_core(pdev)) {
16824                 tg3_flag_set(tp, IS_SSB_CORE);
16825                 if (ssb_gige_must_flush_posted_writes(pdev))
16826                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
16827                 if (ssb_gige_one_dma_at_once(pdev))
16828                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
16829                 if (ssb_gige_have_roboswitch(pdev))
16830                         tg3_flag_set(tp, ROBOSWITCH);
16831                 if (ssb_gige_is_rgmii(pdev))
16832                         tg3_flag_set(tp, RGMII_MODE);
16833         }
16834
16835         /* The word/byte swap controls here control register access byte
16836          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
16837          * setting below.
16838          */
16839         tp->misc_host_ctrl =
16840                 MISC_HOST_CTRL_MASK_PCI_INT |
16841                 MISC_HOST_CTRL_WORD_SWAP |
16842                 MISC_HOST_CTRL_INDIR_ACCESS |
16843                 MISC_HOST_CTRL_PCISTATE_RW;
16844
16845         /* The NONFRM (non-frame) byte/word swap controls take effect
16846          * on descriptor entries, anything which isn't packet data.
16847          *
16848          * The StrongARM chips on the board (one for tx, one for rx)
16849          * are running in big-endian mode.
16850          */
16851         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
16852                         GRC_MODE_WSWAP_NONFRM_DATA);
16853 #ifdef __BIG_ENDIAN
16854         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
16855 #endif
16856         spin_lock_init(&tp->lock);
16857         spin_lock_init(&tp->indirect_lock);
16858         INIT_WORK(&tp->reset_task, tg3_reset_task);
16859
16860         tp->regs = pci_ioremap_bar(pdev, BAR_0);
16861         if (!tp->regs) {
16862                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
16863                 err = -ENOMEM;
16864                 goto err_out_free_dev;
16865         }
16866
16867         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16868             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16869             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16870             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16871             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16872             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16873             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16874             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16875             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16876             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16877             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16878             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
16879                 tg3_flag_set(tp, ENABLE_APE);
16880                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16881                 if (!tp->aperegs) {
16882                         dev_err(&pdev->dev,
16883                                 "Cannot map APE registers, aborting\n");
16884                         err = -ENOMEM;
16885                         goto err_out_iounmap;
16886                 }
16887         }
16888
16889         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16890         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
16891
16892         dev->ethtool_ops = &tg3_ethtool_ops;
16893         dev->watchdog_timeo = TG3_TX_TIMEOUT;
16894         dev->netdev_ops = &tg3_netdev_ops;
16895         dev->irq = pdev->irq;
16896
16897         err = tg3_get_invariants(tp, ent);
16898         if (err) {
16899                 dev_err(&pdev->dev,
16900                         "Problem fetching invariants of chip, aborting\n");
16901                 goto err_out_apeunmap;
16902         }
16903
16904         /* The EPB bridge inside 5714, 5715, and 5780 and any
16905          * device behind the EPB cannot support DMA addresses > 40-bit.
16906          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16907          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16908          * do DMA address check in tg3_start_xmit().
16909          */
16910         if (tg3_flag(tp, IS_5788))
16911                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
16912         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
16913                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
16914 #ifdef CONFIG_HIGHMEM
16915                 dma_mask = DMA_BIT_MASK(64);
16916 #endif
16917         } else
16918                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
16919
16920         /* Configure DMA attributes. */
16921         if (dma_mask > DMA_BIT_MASK(32)) {
16922                 err = pci_set_dma_mask(pdev, dma_mask);
16923                 if (!err) {
16924                         features |= NETIF_F_HIGHDMA;
16925                         err = pci_set_consistent_dma_mask(pdev,
16926                                                           persist_dma_mask);
16927                         if (err < 0) {
16928                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
16929                                         "DMA for consistent allocations\n");
16930                                 goto err_out_apeunmap;
16931                         }
16932                 }
16933         }
16934         if (err || dma_mask == DMA_BIT_MASK(32)) {
16935                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
16936                 if (err) {
16937                         dev_err(&pdev->dev,
16938                                 "No usable DMA configuration, aborting\n");
16939                         goto err_out_apeunmap;
16940                 }
16941         }
16942
16943         tg3_init_bufmgr_config(tp);
16944
16945         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16946
16947         /* 5700 B0 chips do not support checksumming correctly due
16948          * to hardware bugs.
16949          */
16950         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
16951                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16952
16953                 if (tg3_flag(tp, 5755_PLUS))
16954                         features |= NETIF_F_IPV6_CSUM;
16955         }
16956
16957         /* TSO is on by default on chips that support hardware TSO.
16958          * Firmware TSO on older chips gives lower performance, so it
16959          * is off by default, but can be enabled using ethtool.
16960          */
16961         if ((tg3_flag(tp, HW_TSO_1) ||
16962              tg3_flag(tp, HW_TSO_2) ||
16963              tg3_flag(tp, HW_TSO_3)) &&
16964             (features & NETIF_F_IP_CSUM))
16965                 features |= NETIF_F_TSO;
16966         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
16967                 if (features & NETIF_F_IPV6_CSUM)
16968                         features |= NETIF_F_TSO6;
16969                 if (tg3_flag(tp, HW_TSO_3) ||
16970                     tg3_asic_rev(tp) == ASIC_REV_5761 ||
16971                     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16972                      tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
16973                     tg3_asic_rev(tp) == ASIC_REV_5785 ||
16974                     tg3_asic_rev(tp) == ASIC_REV_57780)
16975                         features |= NETIF_F_TSO_ECN;
16976         }
16977
16978         dev->features |= features;
16979         dev->vlan_features |= features;
16980
16981         /*
16982          * Add loopback capability only for a subset of devices that support
16983          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16984          * loopback for the remaining devices.
16985          */
16986         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
16987             !tg3_flag(tp, CPMU_PRESENT))
16988                 /* Add the loopback capability */
16989                 features |= NETIF_F_LOOPBACK;
16990
16991         dev->hw_features |= features;
16992
16993         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
16994             !tg3_flag(tp, TSO_CAPABLE) &&
16995             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
16996                 tg3_flag_set(tp, MAX_RXPEND_64);
16997                 tp->rx_pending = 63;
16998         }
16999
17000         err = tg3_get_device_address(tp);
17001         if (err) {
17002                 dev_err(&pdev->dev,
17003                         "Could not obtain valid ethernet address, aborting\n");
17004                 goto err_out_apeunmap;
17005         }
17006
17007         /*
17008          * Reset chip in case UNDI or EFI driver did not shutdown
17009          * DMA self test will enable WDMAC and we'll see (spurious)
17010          * pending DMA on the PCI bus at that point.
17011          */
17012         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17013             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17014                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17015                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17016         }
17017
17018         err = tg3_test_dma(tp);
17019         if (err) {
17020                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17021                 goto err_out_apeunmap;
17022         }
17023
17024         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17025         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17026         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17027         for (i = 0; i < tp->irq_max; i++) {
17028                 struct tg3_napi *tnapi = &tp->napi[i];
17029
17030                 tnapi->tp = tp;
17031                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17032
17033                 tnapi->int_mbox = intmbx;
17034                 if (i <= 4)
17035                         intmbx += 0x8;
17036                 else
17037                         intmbx += 0x4;
17038
17039                 tnapi->consmbox = rcvmbx;
17040                 tnapi->prodmbox = sndmbx;
17041
17042                 if (i)
17043                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17044                 else
17045                         tnapi->coal_now = HOSTCC_MODE_NOW;
17046
17047                 if (!tg3_flag(tp, SUPPORT_MSIX))
17048                         break;
17049
17050                 /*
17051                  * If we support MSIX, we'll be using RSS.  If we're using
17052                  * RSS, the first vector only handles link interrupts and the
17053                  * remaining vectors handle rx and tx interrupts.  Reuse the
17054                  * mailbox values for the next iteration.  The values we setup
17055                  * above are still useful for the single vectored mode.
17056                  */
17057                 if (!i)
17058                         continue;
17059
17060                 rcvmbx += 0x8;
17061
17062                 if (sndmbx & 0x4)
17063                         sndmbx -= 0x4;
17064                 else
17065                         sndmbx += 0xc;
17066         }
17067
17068         tg3_init_coal(tp);
17069
17070         pci_set_drvdata(pdev, dev);
17071
17072         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17073             tg3_asic_rev(tp) == ASIC_REV_5720 ||
17074             tg3_asic_rev(tp) == ASIC_REV_5762)
17075                 tg3_flag_set(tp, PTP_CAPABLE);
17076
17077         if (tg3_flag(tp, 5717_PLUS)) {
17078                 /* Resume a low-power mode */
17079                 tg3_frob_aux_power(tp, false);
17080         }
17081
17082         tg3_timer_init(tp);
17083
17084         tg3_carrier_off(tp);
17085
17086         err = register_netdev(dev);
17087         if (err) {
17088                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17089                 goto err_out_apeunmap;
17090         }
17091
17092         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17093                     tp->board_part_number,
17094                     tg3_chip_rev_id(tp),
17095                     tg3_bus_string(tp, str),
17096                     dev->dev_addr);
17097
17098         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17099                 struct phy_device *phydev;
17100                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
17101                 netdev_info(dev,
17102                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17103                             phydev->drv->name, dev_name(&phydev->dev));
17104         } else {
17105                 char *ethtype;
17106
17107                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17108                         ethtype = "10/100Base-TX";
17109                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17110                         ethtype = "1000Base-SX";
17111                 else
17112                         ethtype = "10/100/1000Base-T";
17113
17114                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17115                             "(WireSpeed[%d], EEE[%d])\n",
17116                             tg3_phy_string(tp), ethtype,
17117                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17118                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17119         }
17120
17121         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17122                     (dev->features & NETIF_F_RXCSUM) != 0,
17123                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
17124                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17125                     tg3_flag(tp, ENABLE_ASF) != 0,
17126                     tg3_flag(tp, TSO_CAPABLE) != 0);
17127         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17128                     tp->dma_rwctrl,
17129                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17130                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17131
17132         pci_save_state(pdev);
17133
17134         return 0;
17135
17136 err_out_apeunmap:
17137         if (tp->aperegs) {
17138                 iounmap(tp->aperegs);
17139                 tp->aperegs = NULL;
17140         }
17141
17142 err_out_iounmap:
17143         if (tp->regs) {
17144                 iounmap(tp->regs);
17145                 tp->regs = NULL;
17146         }
17147
17148 err_out_free_dev:
17149         free_netdev(dev);
17150
17151 err_out_power_down:
17152         pci_set_power_state(pdev, PCI_D3hot);
17153
17154 err_out_free_res:
17155         pci_release_regions(pdev);
17156
17157 err_out_disable_pdev:
17158         pci_disable_device(pdev);
17159         pci_set_drvdata(pdev, NULL);
17160         return err;
17161 }
17162
17163 static void tg3_remove_one(struct pci_dev *pdev)
17164 {
17165         struct net_device *dev = pci_get_drvdata(pdev);
17166
17167         if (dev) {
17168                 struct tg3 *tp = netdev_priv(dev);
17169
17170                 release_firmware(tp->fw);
17171
17172                 tg3_reset_task_cancel(tp);
17173
17174                 if (tg3_flag(tp, USE_PHYLIB)) {
17175                         tg3_phy_fini(tp);
17176                         tg3_mdio_fini(tp);
17177                 }
17178
17179                 unregister_netdev(dev);
17180                 if (tp->aperegs) {
17181                         iounmap(tp->aperegs);
17182                         tp->aperegs = NULL;
17183                 }
17184                 if (tp->regs) {
17185                         iounmap(tp->regs);
17186                         tp->regs = NULL;
17187                 }
17188                 free_netdev(dev);
17189                 pci_release_regions(pdev);
17190                 pci_disable_device(pdev);
17191                 pci_set_drvdata(pdev, NULL);
17192         }
17193 }
17194
17195 #ifdef CONFIG_PM_SLEEP
17196 static int tg3_suspend(struct device *device)
17197 {
17198         struct pci_dev *pdev = to_pci_dev(device);
17199         struct net_device *dev = pci_get_drvdata(pdev);
17200         struct tg3 *tp = netdev_priv(dev);
17201         int err;
17202
17203         if (!netif_running(dev))
17204                 return 0;
17205
17206         tg3_reset_task_cancel(tp);
17207         tg3_phy_stop(tp);
17208         tg3_netif_stop(tp);
17209
17210         tg3_timer_stop(tp);
17211
17212         tg3_full_lock(tp, 1);
17213         tg3_disable_ints(tp);
17214         tg3_full_unlock(tp);
17215
17216         netif_device_detach(dev);
17217
17218         tg3_full_lock(tp, 0);
17219         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17220         tg3_flag_clear(tp, INIT_COMPLETE);
17221         tg3_full_unlock(tp);
17222
17223         err = tg3_power_down_prepare(tp);
17224         if (err) {
17225                 int err2;
17226
17227                 tg3_full_lock(tp, 0);
17228
17229                 tg3_flag_set(tp, INIT_COMPLETE);
17230                 err2 = tg3_restart_hw(tp, 1);
17231                 if (err2)
17232                         goto out;
17233
17234                 tg3_timer_start(tp);
17235
17236                 netif_device_attach(dev);
17237                 tg3_netif_start(tp);
17238
17239 out:
17240                 tg3_full_unlock(tp);
17241
17242                 if (!err2)
17243                         tg3_phy_start(tp);
17244         }
17245
17246         return err;
17247 }
17248
17249 static int tg3_resume(struct device *device)
17250 {
17251         struct pci_dev *pdev = to_pci_dev(device);
17252         struct net_device *dev = pci_get_drvdata(pdev);
17253         struct tg3 *tp = netdev_priv(dev);
17254         int err;
17255
17256         if (!netif_running(dev))
17257                 return 0;
17258
17259         netif_device_attach(dev);
17260
17261         tg3_full_lock(tp, 0);
17262
17263         tg3_flag_set(tp, INIT_COMPLETE);
17264         err = tg3_restart_hw(tp, 1);
17265         if (err)
17266                 goto out;
17267
17268         tg3_timer_start(tp);
17269
17270         tg3_netif_start(tp);
17271
17272 out:
17273         tg3_full_unlock(tp);
17274
17275         if (!err)
17276                 tg3_phy_start(tp);
17277
17278         return err;
17279 }
17280
17281 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17282 #define TG3_PM_OPS (&tg3_pm_ops)
17283
17284 #else
17285
17286 #define TG3_PM_OPS NULL
17287
17288 #endif /* CONFIG_PM_SLEEP */
17289
17290 /**
17291  * tg3_io_error_detected - called when PCI error is detected
17292  * @pdev: Pointer to PCI device
17293  * @state: The current pci connection state
17294  *
17295  * This function is called after a PCI bus error affecting
17296  * this device has been detected.
17297  */
17298 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17299                                               pci_channel_state_t state)
17300 {
17301         struct net_device *netdev = pci_get_drvdata(pdev);
17302         struct tg3 *tp = netdev_priv(netdev);
17303         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17304
17305         netdev_info(netdev, "PCI I/O error detected\n");
17306
17307         rtnl_lock();
17308
17309         if (!netif_running(netdev))
17310                 goto done;
17311
17312         tg3_phy_stop(tp);
17313
17314         tg3_netif_stop(tp);
17315
17316         tg3_timer_stop(tp);
17317
17318         /* Want to make sure that the reset task doesn't run */
17319         tg3_reset_task_cancel(tp);
17320
17321         netif_device_detach(netdev);
17322
17323         /* Clean up software state, even if MMIO is blocked */
17324         tg3_full_lock(tp, 0);
17325         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17326         tg3_full_unlock(tp);
17327
17328 done:
17329         if (state == pci_channel_io_perm_failure)
17330                 err = PCI_ERS_RESULT_DISCONNECT;
17331         else
17332                 pci_disable_device(pdev);
17333
17334         rtnl_unlock();
17335
17336         return err;
17337 }
17338
17339 /**
17340  * tg3_io_slot_reset - called after the pci bus has been reset.
17341  * @pdev: Pointer to PCI device
17342  *
17343  * Restart the card from scratch, as if from a cold-boot.
17344  * At this point, the card has exprienced a hard reset,
17345  * followed by fixups by BIOS, and has its config space
17346  * set up identically to what it was at cold boot.
17347  */
17348 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17349 {
17350         struct net_device *netdev = pci_get_drvdata(pdev);
17351         struct tg3 *tp = netdev_priv(netdev);
17352         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17353         int err;
17354
17355         rtnl_lock();
17356
17357         if (pci_enable_device(pdev)) {
17358                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17359                 goto done;
17360         }
17361
17362         pci_set_master(pdev);
17363         pci_restore_state(pdev);
17364         pci_save_state(pdev);
17365
17366         if (!netif_running(netdev)) {
17367                 rc = PCI_ERS_RESULT_RECOVERED;
17368                 goto done;
17369         }
17370
17371         err = tg3_power_up(tp);
17372         if (err)
17373                 goto done;
17374
17375         rc = PCI_ERS_RESULT_RECOVERED;
17376
17377 done:
17378         rtnl_unlock();
17379
17380         return rc;
17381 }
17382
17383 /**
17384  * tg3_io_resume - called when traffic can start flowing again.
17385  * @pdev: Pointer to PCI device
17386  *
17387  * This callback is called when the error recovery driver tells
17388  * us that its OK to resume normal operation.
17389  */
17390 static void tg3_io_resume(struct pci_dev *pdev)
17391 {
17392         struct net_device *netdev = pci_get_drvdata(pdev);
17393         struct tg3 *tp = netdev_priv(netdev);
17394         int err;
17395
17396         rtnl_lock();
17397
17398         if (!netif_running(netdev))
17399                 goto done;
17400
17401         tg3_full_lock(tp, 0);
17402         tg3_flag_set(tp, INIT_COMPLETE);
17403         err = tg3_restart_hw(tp, 1);
17404         if (err) {
17405                 tg3_full_unlock(tp);
17406                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17407                 goto done;
17408         }
17409
17410         netif_device_attach(netdev);
17411
17412         tg3_timer_start(tp);
17413
17414         tg3_netif_start(tp);
17415
17416         tg3_full_unlock(tp);
17417
17418         tg3_phy_start(tp);
17419
17420 done:
17421         rtnl_unlock();
17422 }
17423
17424 static const struct pci_error_handlers tg3_err_handler = {
17425         .error_detected = tg3_io_error_detected,
17426         .slot_reset     = tg3_io_slot_reset,
17427         .resume         = tg3_io_resume
17428 };
17429
17430 static struct pci_driver tg3_driver = {
17431         .name           = DRV_MODULE_NAME,
17432         .id_table       = tg3_pci_tbl,
17433         .probe          = tg3_init_one,
17434         .remove         = tg3_remove_one,
17435         .err_handler    = &tg3_err_handler,
17436         .driver.pm      = TG3_PM_OPS,
17437 };
17438
17439 static int __init tg3_init(void)
17440 {
17441         return pci_register_driver(&tg3_driver);
17442 }
17443
17444 static void __exit tg3_cleanup(void)
17445 {
17446         pci_unregister_driver(&tg3_driver);
17447 }
17448
17449 module_init(tg3_init);
17450 module_exit(tg3_cleanup);