]> Pileus Git - ~andy/linux/blob - drivers/net/ethernet/broadcom/tg3.c
Merge branch 'x86-reboot-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[~andy/linux] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2012 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <linux/io.h>
52 #include <asm/byteorder.h>
53 #include <linux/uaccess.h>
54
55 #ifdef CONFIG_SPARC
56 #include <asm/idprom.h>
57 #include <asm/prom.h>
58 #endif
59
60 #define BAR_0   0
61 #define BAR_2   2
62
63 #include "tg3.h"
64
65 /* Functions & macros to verify TG3_FLAGS types */
66
67 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
68 {
69         return test_bit(flag, bits);
70 }
71
72 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
73 {
74         set_bit(flag, bits);
75 }
76
77 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
78 {
79         clear_bit(flag, bits);
80 }
81
82 #define tg3_flag(tp, flag)                              \
83         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
84 #define tg3_flag_set(tp, flag)                          \
85         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
86 #define tg3_flag_clear(tp, flag)                        \
87         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
88
89 #define DRV_MODULE_NAME         "tg3"
90 #define TG3_MAJ_NUM                     3
91 #define TG3_MIN_NUM                     123
92 #define DRV_MODULE_VERSION      \
93         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
94 #define DRV_MODULE_RELDATE      "March 21, 2012"
95
96 #define RESET_KIND_SHUTDOWN     0
97 #define RESET_KIND_INIT         1
98 #define RESET_KIND_SUSPEND      2
99
100 #define TG3_DEF_RX_MODE         0
101 #define TG3_DEF_TX_MODE         0
102 #define TG3_DEF_MSG_ENABLE        \
103         (NETIF_MSG_DRV          | \
104          NETIF_MSG_PROBE        | \
105          NETIF_MSG_LINK         | \
106          NETIF_MSG_TIMER        | \
107          NETIF_MSG_IFDOWN       | \
108          NETIF_MSG_IFUP         | \
109          NETIF_MSG_RX_ERR       | \
110          NETIF_MSG_TX_ERR)
111
112 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
113
114 /* length of time before we decide the hardware is borked,
115  * and dev->tx_timeout() should be called to fix the problem
116  */
117
118 #define TG3_TX_TIMEOUT                  (5 * HZ)
119
120 /* hardware minimum and maximum for a single frame's data payload */
121 #define TG3_MIN_MTU                     60
122 #define TG3_MAX_MTU(tp) \
123         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
124
125 /* These numbers seem to be hard coded in the NIC firmware somehow.
126  * You can't change the ring sizes, but you can change where you place
127  * them in the NIC onboard memory.
128  */
129 #define TG3_RX_STD_RING_SIZE(tp) \
130         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
131          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
132 #define TG3_DEF_RX_RING_PENDING         200
133 #define TG3_RX_JMB_RING_SIZE(tp) \
134         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
135          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
136 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
137
138 /* Do not place this n-ring entries value into the tp struct itself,
139  * we really want to expose these constants to GCC so that modulo et
140  * al.  operations are done with shifts and masks instead of with
141  * hw multiply/modulo instructions.  Another solution would be to
142  * replace things like '% foo' with '& (foo - 1)'.
143  */
144
145 #define TG3_TX_RING_SIZE                512
146 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
147
148 #define TG3_RX_STD_RING_BYTES(tp) \
149         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
150 #define TG3_RX_JMB_RING_BYTES(tp) \
151         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
152 #define TG3_RX_RCB_RING_BYTES(tp) \
153         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
154 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
155                                  TG3_TX_RING_SIZE)
156 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
157
158 #define TG3_DMA_BYTE_ENAB               64
159
160 #define TG3_RX_STD_DMA_SZ               1536
161 #define TG3_RX_JMB_DMA_SZ               9046
162
163 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
164
165 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
166 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
167
168 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
169         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
170
171 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
172         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
173
174 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
175  * that are at least dword aligned when used in PCIX mode.  The driver
176  * works around this bug by double copying the packet.  This workaround
177  * is built into the normal double copy length check for efficiency.
178  *
179  * However, the double copy is only necessary on those architectures
180  * where unaligned memory accesses are inefficient.  For those architectures
181  * where unaligned memory accesses incur little penalty, we can reintegrate
182  * the 5701 in the normal rx path.  Doing so saves a device structure
183  * dereference by hardcoding the double copy threshold in place.
184  */
185 #define TG3_RX_COPY_THRESHOLD           256
186 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
187         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
188 #else
189         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
190 #endif
191
192 #if (NET_IP_ALIGN != 0)
193 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
194 #else
195 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
196 #endif
197
198 /* minimum number of free TX descriptors required to wake up TX process */
199 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
200 #define TG3_TX_BD_DMA_MAX_2K            2048
201 #define TG3_TX_BD_DMA_MAX_4K            4096
202
203 #define TG3_RAW_IP_ALIGN 2
204
205 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
206 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
207
208 #define FIRMWARE_TG3            "tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
211
212 static char version[] __devinitdata =
213         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
214
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION);
219 MODULE_FIRMWARE(FIRMWARE_TG3);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
222
223 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug, int, 0);
225 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
226
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
301         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
302         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
303         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
304         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
305         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
306         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
307         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
308         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
309         {}
310 };
311
312 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
313
314 static const struct {
315         const char string[ETH_GSTRING_LEN];
316 } ethtool_stats_keys[] = {
317         { "rx_octets" },
318         { "rx_fragments" },
319         { "rx_ucast_packets" },
320         { "rx_mcast_packets" },
321         { "rx_bcast_packets" },
322         { "rx_fcs_errors" },
323         { "rx_align_errors" },
324         { "rx_xon_pause_rcvd" },
325         { "rx_xoff_pause_rcvd" },
326         { "rx_mac_ctrl_rcvd" },
327         { "rx_xoff_entered" },
328         { "rx_frame_too_long_errors" },
329         { "rx_jabbers" },
330         { "rx_undersize_packets" },
331         { "rx_in_length_errors" },
332         { "rx_out_length_errors" },
333         { "rx_64_or_less_octet_packets" },
334         { "rx_65_to_127_octet_packets" },
335         { "rx_128_to_255_octet_packets" },
336         { "rx_256_to_511_octet_packets" },
337         { "rx_512_to_1023_octet_packets" },
338         { "rx_1024_to_1522_octet_packets" },
339         { "rx_1523_to_2047_octet_packets" },
340         { "rx_2048_to_4095_octet_packets" },
341         { "rx_4096_to_8191_octet_packets" },
342         { "rx_8192_to_9022_octet_packets" },
343
344         { "tx_octets" },
345         { "tx_collisions" },
346
347         { "tx_xon_sent" },
348         { "tx_xoff_sent" },
349         { "tx_flow_control" },
350         { "tx_mac_errors" },
351         { "tx_single_collisions" },
352         { "tx_mult_collisions" },
353         { "tx_deferred" },
354         { "tx_excessive_collisions" },
355         { "tx_late_collisions" },
356         { "tx_collide_2times" },
357         { "tx_collide_3times" },
358         { "tx_collide_4times" },
359         { "tx_collide_5times" },
360         { "tx_collide_6times" },
361         { "tx_collide_7times" },
362         { "tx_collide_8times" },
363         { "tx_collide_9times" },
364         { "tx_collide_10times" },
365         { "tx_collide_11times" },
366         { "tx_collide_12times" },
367         { "tx_collide_13times" },
368         { "tx_collide_14times" },
369         { "tx_collide_15times" },
370         { "tx_ucast_packets" },
371         { "tx_mcast_packets" },
372         { "tx_bcast_packets" },
373         { "tx_carrier_sense_errors" },
374         { "tx_discards" },
375         { "tx_errors" },
376
377         { "dma_writeq_full" },
378         { "dma_write_prioq_full" },
379         { "rxbds_empty" },
380         { "rx_discards" },
381         { "rx_errors" },
382         { "rx_threshold_hit" },
383
384         { "dma_readq_full" },
385         { "dma_read_prioq_full" },
386         { "tx_comp_queue_full" },
387
388         { "ring_set_send_prod_index" },
389         { "ring_status_update" },
390         { "nic_irqs" },
391         { "nic_avoided_irqs" },
392         { "nic_tx_threshold_hit" },
393
394         { "mbuf_lwm_thresh_hit" },
395 };
396
397 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
398
399
400 static const struct {
401         const char string[ETH_GSTRING_LEN];
402 } ethtool_test_keys[] = {
403         { "nvram test        (online) " },
404         { "link test         (online) " },
405         { "register test     (offline)" },
406         { "memory test       (offline)" },
407         { "mac loopback test (offline)" },
408         { "phy loopback test (offline)" },
409         { "ext loopback test (offline)" },
410         { "interrupt test    (offline)" },
411 };
412
413 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
414
415
416 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
417 {
418         writel(val, tp->regs + off);
419 }
420
421 static u32 tg3_read32(struct tg3 *tp, u32 off)
422 {
423         return readl(tp->regs + off);
424 }
425
426 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
427 {
428         writel(val, tp->aperegs + off);
429 }
430
431 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
432 {
433         return readl(tp->aperegs + off);
434 }
435
436 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
437 {
438         unsigned long flags;
439
440         spin_lock_irqsave(&tp->indirect_lock, flags);
441         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
443         spin_unlock_irqrestore(&tp->indirect_lock, flags);
444 }
445
446 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
447 {
448         writel(val, tp->regs + off);
449         readl(tp->regs + off);
450 }
451
452 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
453 {
454         unsigned long flags;
455         u32 val;
456
457         spin_lock_irqsave(&tp->indirect_lock, flags);
458         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
459         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
460         spin_unlock_irqrestore(&tp->indirect_lock, flags);
461         return val;
462 }
463
464 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
465 {
466         unsigned long flags;
467
468         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
469                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
470                                        TG3_64BIT_REG_LOW, val);
471                 return;
472         }
473         if (off == TG3_RX_STD_PROD_IDX_REG) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
475                                        TG3_64BIT_REG_LOW, val);
476                 return;
477         }
478
479         spin_lock_irqsave(&tp->indirect_lock, flags);
480         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
481         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
482         spin_unlock_irqrestore(&tp->indirect_lock, flags);
483
484         /* In indirect mode when disabling interrupts, we also need
485          * to clear the interrupt bit in the GRC local ctrl register.
486          */
487         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
488             (val == 0x1)) {
489                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
490                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
491         }
492 }
493
494 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
495 {
496         unsigned long flags;
497         u32 val;
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
501         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
502         spin_unlock_irqrestore(&tp->indirect_lock, flags);
503         return val;
504 }
505
506 /* usec_wait specifies the wait time in usec when writing to certain registers
507  * where it is unsafe to read back the register without some delay.
508  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
509  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
510  */
511 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
512 {
513         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
514                 /* Non-posted methods */
515                 tp->write32(tp, off, val);
516         else {
517                 /* Posted method */
518                 tg3_write32(tp, off, val);
519                 if (usec_wait)
520                         udelay(usec_wait);
521                 tp->read32(tp, off);
522         }
523         /* Wait again after the read for the posted method to guarantee that
524          * the wait time is met.
525          */
526         if (usec_wait)
527                 udelay(usec_wait);
528 }
529
530 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
531 {
532         tp->write32_mbox(tp, off, val);
533         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
534                 tp->read32_mbox(tp, off);
535 }
536
537 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
538 {
539         void __iomem *mbox = tp->regs + off;
540         writel(val, mbox);
541         if (tg3_flag(tp, TXD_MBOX_HWBUG))
542                 writel(val, mbox);
543         if (tg3_flag(tp, MBOX_WRITE_REORDER))
544                 readl(mbox);
545 }
546
547 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
548 {
549         return readl(tp->regs + off + GRCMBOX_BASE);
550 }
551
552 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
553 {
554         writel(val, tp->regs + off + GRCMBOX_BASE);
555 }
556
557 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
558 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
559 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
560 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
561 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
562
563 #define tw32(reg, val)                  tp->write32(tp, reg, val)
564 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
565 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
566 #define tr32(reg)                       tp->read32(tp, reg)
567
568 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
569 {
570         unsigned long flags;
571
572         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
573             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
574                 return;
575
576         spin_lock_irqsave(&tp->indirect_lock, flags);
577         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
578                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
579                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
580
581                 /* Always leave this as zero. */
582                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
583         } else {
584                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
585                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
586
587                 /* Always leave this as zero. */
588                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
589         }
590         spin_unlock_irqrestore(&tp->indirect_lock, flags);
591 }
592
593 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
594 {
595         unsigned long flags;
596
597         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
598             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
599                 *val = 0;
600                 return;
601         }
602
603         spin_lock_irqsave(&tp->indirect_lock, flags);
604         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
605                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
606                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
607
608                 /* Always leave this as zero. */
609                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
610         } else {
611                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
612                 *val = tr32(TG3PCI_MEM_WIN_DATA);
613
614                 /* Always leave this as zero. */
615                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
616         }
617         spin_unlock_irqrestore(&tp->indirect_lock, flags);
618 }
619
620 static void tg3_ape_lock_init(struct tg3 *tp)
621 {
622         int i;
623         u32 regbase, bit;
624
625         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
626                 regbase = TG3_APE_LOCK_GRANT;
627         else
628                 regbase = TG3_APE_PER_LOCK_GRANT;
629
630         /* Make sure the driver hasn't any stale locks. */
631         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
632                 switch (i) {
633                 case TG3_APE_LOCK_PHY0:
634                 case TG3_APE_LOCK_PHY1:
635                 case TG3_APE_LOCK_PHY2:
636                 case TG3_APE_LOCK_PHY3:
637                         bit = APE_LOCK_GRANT_DRIVER;
638                         break;
639                 default:
640                         if (!tp->pci_fn)
641                                 bit = APE_LOCK_GRANT_DRIVER;
642                         else
643                                 bit = 1 << tp->pci_fn;
644                 }
645                 tg3_ape_write32(tp, regbase + 4 * i, bit);
646         }
647
648 }
649
650 static int tg3_ape_lock(struct tg3 *tp, int locknum)
651 {
652         int i, off;
653         int ret = 0;
654         u32 status, req, gnt, bit;
655
656         if (!tg3_flag(tp, ENABLE_APE))
657                 return 0;
658
659         switch (locknum) {
660         case TG3_APE_LOCK_GPIO:
661                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
662                         return 0;
663         case TG3_APE_LOCK_GRC:
664         case TG3_APE_LOCK_MEM:
665                 if (!tp->pci_fn)
666                         bit = APE_LOCK_REQ_DRIVER;
667                 else
668                         bit = 1 << tp->pci_fn;
669                 break;
670         default:
671                 return -EINVAL;
672         }
673
674         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
675                 req = TG3_APE_LOCK_REQ;
676                 gnt = TG3_APE_LOCK_GRANT;
677         } else {
678                 req = TG3_APE_PER_LOCK_REQ;
679                 gnt = TG3_APE_PER_LOCK_GRANT;
680         }
681
682         off = 4 * locknum;
683
684         tg3_ape_write32(tp, req + off, bit);
685
686         /* Wait for up to 1 millisecond to acquire lock. */
687         for (i = 0; i < 100; i++) {
688                 status = tg3_ape_read32(tp, gnt + off);
689                 if (status == bit)
690                         break;
691                 udelay(10);
692         }
693
694         if (status != bit) {
695                 /* Revoke the lock request. */
696                 tg3_ape_write32(tp, gnt + off, bit);
697                 ret = -EBUSY;
698         }
699
700         return ret;
701 }
702
703 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
704 {
705         u32 gnt, bit;
706
707         if (!tg3_flag(tp, ENABLE_APE))
708                 return;
709
710         switch (locknum) {
711         case TG3_APE_LOCK_GPIO:
712                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
713                         return;
714         case TG3_APE_LOCK_GRC:
715         case TG3_APE_LOCK_MEM:
716                 if (!tp->pci_fn)
717                         bit = APE_LOCK_GRANT_DRIVER;
718                 else
719                         bit = 1 << tp->pci_fn;
720                 break;
721         default:
722                 return;
723         }
724
725         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
726                 gnt = TG3_APE_LOCK_GRANT;
727         else
728                 gnt = TG3_APE_PER_LOCK_GRANT;
729
730         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
731 }
732
733 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
734 {
735         int i;
736         u32 apedata;
737
738         /* NCSI does not support APE events */
739         if (tg3_flag(tp, APE_HAS_NCSI))
740                 return;
741
742         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
743         if (apedata != APE_SEG_SIG_MAGIC)
744                 return;
745
746         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
747         if (!(apedata & APE_FW_STATUS_READY))
748                 return;
749
750         /* Wait for up to 1 millisecond for APE to service previous event. */
751         for (i = 0; i < 10; i++) {
752                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
753                         return;
754
755                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
756
757                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
758                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
759                                         event | APE_EVENT_STATUS_EVENT_PENDING);
760
761                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
762
763                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
764                         break;
765
766                 udelay(100);
767         }
768
769         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
770                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
771 }
772
773 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
774 {
775         u32 event;
776         u32 apedata;
777
778         if (!tg3_flag(tp, ENABLE_APE))
779                 return;
780
781         switch (kind) {
782         case RESET_KIND_INIT:
783                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
784                                 APE_HOST_SEG_SIG_MAGIC);
785                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
786                                 APE_HOST_SEG_LEN_MAGIC);
787                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
788                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
789                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
790                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
791                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
792                                 APE_HOST_BEHAV_NO_PHYLOCK);
793                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
794                                     TG3_APE_HOST_DRVR_STATE_START);
795
796                 event = APE_EVENT_STATUS_STATE_START;
797                 break;
798         case RESET_KIND_SHUTDOWN:
799                 /* With the interface we are currently using,
800                  * APE does not track driver state.  Wiping
801                  * out the HOST SEGMENT SIGNATURE forces
802                  * the APE to assume OS absent status.
803                  */
804                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
805
806                 if (device_may_wakeup(&tp->pdev->dev) &&
807                     tg3_flag(tp, WOL_ENABLE)) {
808                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
809                                             TG3_APE_HOST_WOL_SPEED_AUTO);
810                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
811                 } else
812                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
813
814                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
815
816                 event = APE_EVENT_STATUS_STATE_UNLOAD;
817                 break;
818         case RESET_KIND_SUSPEND:
819                 event = APE_EVENT_STATUS_STATE_SUSPEND;
820                 break;
821         default:
822                 return;
823         }
824
825         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
826
827         tg3_ape_send_event(tp, event);
828 }
829
830 static void tg3_disable_ints(struct tg3 *tp)
831 {
832         int i;
833
834         tw32(TG3PCI_MISC_HOST_CTRL,
835              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
836         for (i = 0; i < tp->irq_max; i++)
837                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
838 }
839
840 static void tg3_enable_ints(struct tg3 *tp)
841 {
842         int i;
843
844         tp->irq_sync = 0;
845         wmb();
846
847         tw32(TG3PCI_MISC_HOST_CTRL,
848              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
849
850         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
851         for (i = 0; i < tp->irq_cnt; i++) {
852                 struct tg3_napi *tnapi = &tp->napi[i];
853
854                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
855                 if (tg3_flag(tp, 1SHOT_MSI))
856                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
857
858                 tp->coal_now |= tnapi->coal_now;
859         }
860
861         /* Force an initial interrupt */
862         if (!tg3_flag(tp, TAGGED_STATUS) &&
863             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
864                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
865         else
866                 tw32(HOSTCC_MODE, tp->coal_now);
867
868         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
869 }
870
871 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
872 {
873         struct tg3 *tp = tnapi->tp;
874         struct tg3_hw_status *sblk = tnapi->hw_status;
875         unsigned int work_exists = 0;
876
877         /* check for phy events */
878         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
879                 if (sblk->status & SD_STATUS_LINK_CHG)
880                         work_exists = 1;
881         }
882
883         /* check for TX work to do */
884         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
885                 work_exists = 1;
886
887         /* check for RX work to do */
888         if (tnapi->rx_rcb_prod_idx &&
889             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
890                 work_exists = 1;
891
892         return work_exists;
893 }
894
895 /* tg3_int_reenable
896  *  similar to tg3_enable_ints, but it accurately determines whether there
897  *  is new work pending and can return without flushing the PIO write
898  *  which reenables interrupts
899  */
900 static void tg3_int_reenable(struct tg3_napi *tnapi)
901 {
902         struct tg3 *tp = tnapi->tp;
903
904         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
905         mmiowb();
906
907         /* When doing tagged status, this work check is unnecessary.
908          * The last_tag we write above tells the chip which piece of
909          * work we've completed.
910          */
911         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
912                 tw32(HOSTCC_MODE, tp->coalesce_mode |
913                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
914 }
915
916 static void tg3_switch_clocks(struct tg3 *tp)
917 {
918         u32 clock_ctrl;
919         u32 orig_clock_ctrl;
920
921         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
922                 return;
923
924         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
925
926         orig_clock_ctrl = clock_ctrl;
927         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
928                        CLOCK_CTRL_CLKRUN_OENABLE |
929                        0x1f);
930         tp->pci_clock_ctrl = clock_ctrl;
931
932         if (tg3_flag(tp, 5705_PLUS)) {
933                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
934                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
935                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
936                 }
937         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
938                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
939                             clock_ctrl |
940                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
941                             40);
942                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
943                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
944                             40);
945         }
946         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
947 }
948
949 #define PHY_BUSY_LOOPS  5000
950
951 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
952 {
953         u32 frame_val;
954         unsigned int loops;
955         int ret;
956
957         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
958                 tw32_f(MAC_MI_MODE,
959                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
960                 udelay(80);
961         }
962
963         *val = 0x0;
964
965         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
966                       MI_COM_PHY_ADDR_MASK);
967         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
968                       MI_COM_REG_ADDR_MASK);
969         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
970
971         tw32_f(MAC_MI_COM, frame_val);
972
973         loops = PHY_BUSY_LOOPS;
974         while (loops != 0) {
975                 udelay(10);
976                 frame_val = tr32(MAC_MI_COM);
977
978                 if ((frame_val & MI_COM_BUSY) == 0) {
979                         udelay(5);
980                         frame_val = tr32(MAC_MI_COM);
981                         break;
982                 }
983                 loops -= 1;
984         }
985
986         ret = -EBUSY;
987         if (loops != 0) {
988                 *val = frame_val & MI_COM_DATA_MASK;
989                 ret = 0;
990         }
991
992         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
993                 tw32_f(MAC_MI_MODE, tp->mi_mode);
994                 udelay(80);
995         }
996
997         return ret;
998 }
999
1000 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1001 {
1002         u32 frame_val;
1003         unsigned int loops;
1004         int ret;
1005
1006         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1007             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1008                 return 0;
1009
1010         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1011                 tw32_f(MAC_MI_MODE,
1012                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1013                 udelay(80);
1014         }
1015
1016         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1017                       MI_COM_PHY_ADDR_MASK);
1018         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1019                       MI_COM_REG_ADDR_MASK);
1020         frame_val |= (val & MI_COM_DATA_MASK);
1021         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1022
1023         tw32_f(MAC_MI_COM, frame_val);
1024
1025         loops = PHY_BUSY_LOOPS;
1026         while (loops != 0) {
1027                 udelay(10);
1028                 frame_val = tr32(MAC_MI_COM);
1029                 if ((frame_val & MI_COM_BUSY) == 0) {
1030                         udelay(5);
1031                         frame_val = tr32(MAC_MI_COM);
1032                         break;
1033                 }
1034                 loops -= 1;
1035         }
1036
1037         ret = -EBUSY;
1038         if (loops != 0)
1039                 ret = 0;
1040
1041         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1042                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1043                 udelay(80);
1044         }
1045
1046         return ret;
1047 }
1048
1049 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1050 {
1051         int err;
1052
1053         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1054         if (err)
1055                 goto done;
1056
1057         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1058         if (err)
1059                 goto done;
1060
1061         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1062                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1063         if (err)
1064                 goto done;
1065
1066         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1067
1068 done:
1069         return err;
1070 }
1071
1072 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1073 {
1074         int err;
1075
1076         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1077         if (err)
1078                 goto done;
1079
1080         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1081         if (err)
1082                 goto done;
1083
1084         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1085                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1086         if (err)
1087                 goto done;
1088
1089         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1090
1091 done:
1092         return err;
1093 }
1094
1095 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1096 {
1097         int err;
1098
1099         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1100         if (!err)
1101                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1102
1103         return err;
1104 }
1105
1106 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1107 {
1108         int err;
1109
1110         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1111         if (!err)
1112                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1113
1114         return err;
1115 }
1116
1117 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1118 {
1119         int err;
1120
1121         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1122                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1123                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1124         if (!err)
1125                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1126
1127         return err;
1128 }
1129
1130 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1131 {
1132         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1133                 set |= MII_TG3_AUXCTL_MISC_WREN;
1134
1135         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1136 }
1137
1138 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1139         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1140                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1141                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1142
1143 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1144         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1145                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1146
1147 static int tg3_bmcr_reset(struct tg3 *tp)
1148 {
1149         u32 phy_control;
1150         int limit, err;
1151
1152         /* OK, reset it, and poll the BMCR_RESET bit until it
1153          * clears or we time out.
1154          */
1155         phy_control = BMCR_RESET;
1156         err = tg3_writephy(tp, MII_BMCR, phy_control);
1157         if (err != 0)
1158                 return -EBUSY;
1159
1160         limit = 5000;
1161         while (limit--) {
1162                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1163                 if (err != 0)
1164                         return -EBUSY;
1165
1166                 if ((phy_control & BMCR_RESET) == 0) {
1167                         udelay(40);
1168                         break;
1169                 }
1170                 udelay(10);
1171         }
1172         if (limit < 0)
1173                 return -EBUSY;
1174
1175         return 0;
1176 }
1177
1178 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1179 {
1180         struct tg3 *tp = bp->priv;
1181         u32 val;
1182
1183         spin_lock_bh(&tp->lock);
1184
1185         if (tg3_readphy(tp, reg, &val))
1186                 val = -EIO;
1187
1188         spin_unlock_bh(&tp->lock);
1189
1190         return val;
1191 }
1192
1193 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1194 {
1195         struct tg3 *tp = bp->priv;
1196         u32 ret = 0;
1197
1198         spin_lock_bh(&tp->lock);
1199
1200         if (tg3_writephy(tp, reg, val))
1201                 ret = -EIO;
1202
1203         spin_unlock_bh(&tp->lock);
1204
1205         return ret;
1206 }
1207
1208 static int tg3_mdio_reset(struct mii_bus *bp)
1209 {
1210         return 0;
1211 }
1212
1213 static void tg3_mdio_config_5785(struct tg3 *tp)
1214 {
1215         u32 val;
1216         struct phy_device *phydev;
1217
1218         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1219         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1220         case PHY_ID_BCM50610:
1221         case PHY_ID_BCM50610M:
1222                 val = MAC_PHYCFG2_50610_LED_MODES;
1223                 break;
1224         case PHY_ID_BCMAC131:
1225                 val = MAC_PHYCFG2_AC131_LED_MODES;
1226                 break;
1227         case PHY_ID_RTL8211C:
1228                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1229                 break;
1230         case PHY_ID_RTL8201E:
1231                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1232                 break;
1233         default:
1234                 return;
1235         }
1236
1237         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1238                 tw32(MAC_PHYCFG2, val);
1239
1240                 val = tr32(MAC_PHYCFG1);
1241                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1242                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1243                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1244                 tw32(MAC_PHYCFG1, val);
1245
1246                 return;
1247         }
1248
1249         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1250                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1251                        MAC_PHYCFG2_FMODE_MASK_MASK |
1252                        MAC_PHYCFG2_GMODE_MASK_MASK |
1253                        MAC_PHYCFG2_ACT_MASK_MASK   |
1254                        MAC_PHYCFG2_QUAL_MASK_MASK |
1255                        MAC_PHYCFG2_INBAND_ENABLE;
1256
1257         tw32(MAC_PHYCFG2, val);
1258
1259         val = tr32(MAC_PHYCFG1);
1260         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1261                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1262         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1263                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1264                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1265                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1266                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1267         }
1268         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1269                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1270         tw32(MAC_PHYCFG1, val);
1271
1272         val = tr32(MAC_EXT_RGMII_MODE);
1273         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1274                  MAC_RGMII_MODE_RX_QUALITY |
1275                  MAC_RGMII_MODE_RX_ACTIVITY |
1276                  MAC_RGMII_MODE_RX_ENG_DET |
1277                  MAC_RGMII_MODE_TX_ENABLE |
1278                  MAC_RGMII_MODE_TX_LOWPWR |
1279                  MAC_RGMII_MODE_TX_RESET);
1280         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1281                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1282                         val |= MAC_RGMII_MODE_RX_INT_B |
1283                                MAC_RGMII_MODE_RX_QUALITY |
1284                                MAC_RGMII_MODE_RX_ACTIVITY |
1285                                MAC_RGMII_MODE_RX_ENG_DET;
1286                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1287                         val |= MAC_RGMII_MODE_TX_ENABLE |
1288                                MAC_RGMII_MODE_TX_LOWPWR |
1289                                MAC_RGMII_MODE_TX_RESET;
1290         }
1291         tw32(MAC_EXT_RGMII_MODE, val);
1292 }
1293
1294 static void tg3_mdio_start(struct tg3 *tp)
1295 {
1296         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1297         tw32_f(MAC_MI_MODE, tp->mi_mode);
1298         udelay(80);
1299
1300         if (tg3_flag(tp, MDIOBUS_INITED) &&
1301             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1302                 tg3_mdio_config_5785(tp);
1303 }
1304
1305 static int tg3_mdio_init(struct tg3 *tp)
1306 {
1307         int i;
1308         u32 reg;
1309         struct phy_device *phydev;
1310
1311         if (tg3_flag(tp, 5717_PLUS)) {
1312                 u32 is_serdes;
1313
1314                 tp->phy_addr = tp->pci_fn + 1;
1315
1316                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1317                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1318                 else
1319                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1320                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1321                 if (is_serdes)
1322                         tp->phy_addr += 7;
1323         } else
1324                 tp->phy_addr = TG3_PHY_MII_ADDR;
1325
1326         tg3_mdio_start(tp);
1327
1328         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1329                 return 0;
1330
1331         tp->mdio_bus = mdiobus_alloc();
1332         if (tp->mdio_bus == NULL)
1333                 return -ENOMEM;
1334
1335         tp->mdio_bus->name     = "tg3 mdio bus";
1336         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1337                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1338         tp->mdio_bus->priv     = tp;
1339         tp->mdio_bus->parent   = &tp->pdev->dev;
1340         tp->mdio_bus->read     = &tg3_mdio_read;
1341         tp->mdio_bus->write    = &tg3_mdio_write;
1342         tp->mdio_bus->reset    = &tg3_mdio_reset;
1343         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1344         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1345
1346         for (i = 0; i < PHY_MAX_ADDR; i++)
1347                 tp->mdio_bus->irq[i] = PHY_POLL;
1348
1349         /* The bus registration will look for all the PHYs on the mdio bus.
1350          * Unfortunately, it does not ensure the PHY is powered up before
1351          * accessing the PHY ID registers.  A chip reset is the
1352          * quickest way to bring the device back to an operational state..
1353          */
1354         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1355                 tg3_bmcr_reset(tp);
1356
1357         i = mdiobus_register(tp->mdio_bus);
1358         if (i) {
1359                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1360                 mdiobus_free(tp->mdio_bus);
1361                 return i;
1362         }
1363
1364         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1365
1366         if (!phydev || !phydev->drv) {
1367                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1368                 mdiobus_unregister(tp->mdio_bus);
1369                 mdiobus_free(tp->mdio_bus);
1370                 return -ENODEV;
1371         }
1372
1373         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1374         case PHY_ID_BCM57780:
1375                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1376                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1377                 break;
1378         case PHY_ID_BCM50610:
1379         case PHY_ID_BCM50610M:
1380                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1381                                      PHY_BRCM_RX_REFCLK_UNUSED |
1382                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1383                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1384                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1385                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1386                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1387                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1388                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1389                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1390                 /* fallthru */
1391         case PHY_ID_RTL8211C:
1392                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1393                 break;
1394         case PHY_ID_RTL8201E:
1395         case PHY_ID_BCMAC131:
1396                 phydev->interface = PHY_INTERFACE_MODE_MII;
1397                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1398                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1399                 break;
1400         }
1401
1402         tg3_flag_set(tp, MDIOBUS_INITED);
1403
1404         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1405                 tg3_mdio_config_5785(tp);
1406
1407         return 0;
1408 }
1409
1410 static void tg3_mdio_fini(struct tg3 *tp)
1411 {
1412         if (tg3_flag(tp, MDIOBUS_INITED)) {
1413                 tg3_flag_clear(tp, MDIOBUS_INITED);
1414                 mdiobus_unregister(tp->mdio_bus);
1415                 mdiobus_free(tp->mdio_bus);
1416         }
1417 }
1418
1419 /* tp->lock is held. */
1420 static inline void tg3_generate_fw_event(struct tg3 *tp)
1421 {
1422         u32 val;
1423
1424         val = tr32(GRC_RX_CPU_EVENT);
1425         val |= GRC_RX_CPU_DRIVER_EVENT;
1426         tw32_f(GRC_RX_CPU_EVENT, val);
1427
1428         tp->last_event_jiffies = jiffies;
1429 }
1430
1431 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1432
1433 /* tp->lock is held. */
1434 static void tg3_wait_for_event_ack(struct tg3 *tp)
1435 {
1436         int i;
1437         unsigned int delay_cnt;
1438         long time_remain;
1439
1440         /* If enough time has passed, no wait is necessary. */
1441         time_remain = (long)(tp->last_event_jiffies + 1 +
1442                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1443                       (long)jiffies;
1444         if (time_remain < 0)
1445                 return;
1446
1447         /* Check if we can shorten the wait time. */
1448         delay_cnt = jiffies_to_usecs(time_remain);
1449         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1450                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1451         delay_cnt = (delay_cnt >> 3) + 1;
1452
1453         for (i = 0; i < delay_cnt; i++) {
1454                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1455                         break;
1456                 udelay(8);
1457         }
1458 }
1459
1460 /* tp->lock is held. */
1461 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1462 {
1463         u32 reg, val;
1464
1465         val = 0;
1466         if (!tg3_readphy(tp, MII_BMCR, &reg))
1467                 val = reg << 16;
1468         if (!tg3_readphy(tp, MII_BMSR, &reg))
1469                 val |= (reg & 0xffff);
1470         *data++ = val;
1471
1472         val = 0;
1473         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1474                 val = reg << 16;
1475         if (!tg3_readphy(tp, MII_LPA, &reg))
1476                 val |= (reg & 0xffff);
1477         *data++ = val;
1478
1479         val = 0;
1480         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1481                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1482                         val = reg << 16;
1483                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1484                         val |= (reg & 0xffff);
1485         }
1486         *data++ = val;
1487
1488         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1489                 val = reg << 16;
1490         else
1491                 val = 0;
1492         *data++ = val;
1493 }
1494
1495 /* tp->lock is held. */
1496 static void tg3_ump_link_report(struct tg3 *tp)
1497 {
1498         u32 data[4];
1499
1500         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1501                 return;
1502
1503         tg3_phy_gather_ump_data(tp, data);
1504
1505         tg3_wait_for_event_ack(tp);
1506
1507         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1508         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1509         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1510         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1511         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1512         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1513
1514         tg3_generate_fw_event(tp);
1515 }
1516
1517 /* tp->lock is held. */
1518 static void tg3_stop_fw(struct tg3 *tp)
1519 {
1520         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1521                 /* Wait for RX cpu to ACK the previous event. */
1522                 tg3_wait_for_event_ack(tp);
1523
1524                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1525
1526                 tg3_generate_fw_event(tp);
1527
1528                 /* Wait for RX cpu to ACK this event. */
1529                 tg3_wait_for_event_ack(tp);
1530         }
1531 }
1532
1533 /* tp->lock is held. */
1534 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1535 {
1536         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1537                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1538
1539         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1540                 switch (kind) {
1541                 case RESET_KIND_INIT:
1542                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1543                                       DRV_STATE_START);
1544                         break;
1545
1546                 case RESET_KIND_SHUTDOWN:
1547                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1548                                       DRV_STATE_UNLOAD);
1549                         break;
1550
1551                 case RESET_KIND_SUSPEND:
1552                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1553                                       DRV_STATE_SUSPEND);
1554                         break;
1555
1556                 default:
1557                         break;
1558                 }
1559         }
1560
1561         if (kind == RESET_KIND_INIT ||
1562             kind == RESET_KIND_SUSPEND)
1563                 tg3_ape_driver_state_change(tp, kind);
1564 }
1565
1566 /* tp->lock is held. */
1567 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1568 {
1569         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1570                 switch (kind) {
1571                 case RESET_KIND_INIT:
1572                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1573                                       DRV_STATE_START_DONE);
1574                         break;
1575
1576                 case RESET_KIND_SHUTDOWN:
1577                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1578                                       DRV_STATE_UNLOAD_DONE);
1579                         break;
1580
1581                 default:
1582                         break;
1583                 }
1584         }
1585
1586         if (kind == RESET_KIND_SHUTDOWN)
1587                 tg3_ape_driver_state_change(tp, kind);
1588 }
1589
1590 /* tp->lock is held. */
1591 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1592 {
1593         if (tg3_flag(tp, ENABLE_ASF)) {
1594                 switch (kind) {
1595                 case RESET_KIND_INIT:
1596                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1597                                       DRV_STATE_START);
1598                         break;
1599
1600                 case RESET_KIND_SHUTDOWN:
1601                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1602                                       DRV_STATE_UNLOAD);
1603                         break;
1604
1605                 case RESET_KIND_SUSPEND:
1606                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1607                                       DRV_STATE_SUSPEND);
1608                         break;
1609
1610                 default:
1611                         break;
1612                 }
1613         }
1614 }
1615
1616 static int tg3_poll_fw(struct tg3 *tp)
1617 {
1618         int i;
1619         u32 val;
1620
1621         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1622                 /* Wait up to 20ms for init done. */
1623                 for (i = 0; i < 200; i++) {
1624                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1625                                 return 0;
1626                         udelay(100);
1627                 }
1628                 return -ENODEV;
1629         }
1630
1631         /* Wait for firmware initialization to complete. */
1632         for (i = 0; i < 100000; i++) {
1633                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1634                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1635                         break;
1636                 udelay(10);
1637         }
1638
1639         /* Chip might not be fitted with firmware.  Some Sun onboard
1640          * parts are configured like that.  So don't signal the timeout
1641          * of the above loop as an error, but do report the lack of
1642          * running firmware once.
1643          */
1644         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1645                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1646
1647                 netdev_info(tp->dev, "No firmware running\n");
1648         }
1649
1650         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1651                 /* The 57765 A0 needs a little more
1652                  * time to do some important work.
1653                  */
1654                 mdelay(10);
1655         }
1656
1657         return 0;
1658 }
1659
1660 static void tg3_link_report(struct tg3 *tp)
1661 {
1662         if (!netif_carrier_ok(tp->dev)) {
1663                 netif_info(tp, link, tp->dev, "Link is down\n");
1664                 tg3_ump_link_report(tp);
1665         } else if (netif_msg_link(tp)) {
1666                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1667                             (tp->link_config.active_speed == SPEED_1000 ?
1668                              1000 :
1669                              (tp->link_config.active_speed == SPEED_100 ?
1670                               100 : 10)),
1671                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1672                              "full" : "half"));
1673
1674                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1675                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1676                             "on" : "off",
1677                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1678                             "on" : "off");
1679
1680                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1681                         netdev_info(tp->dev, "EEE is %s\n",
1682                                     tp->setlpicnt ? "enabled" : "disabled");
1683
1684                 tg3_ump_link_report(tp);
1685         }
1686 }
1687
1688 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1689 {
1690         u16 miireg;
1691
1692         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1693                 miireg = ADVERTISE_1000XPAUSE;
1694         else if (flow_ctrl & FLOW_CTRL_TX)
1695                 miireg = ADVERTISE_1000XPSE_ASYM;
1696         else if (flow_ctrl & FLOW_CTRL_RX)
1697                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1698         else
1699                 miireg = 0;
1700
1701         return miireg;
1702 }
1703
1704 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1705 {
1706         u8 cap = 0;
1707
1708         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1709                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1710         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1711                 if (lcladv & ADVERTISE_1000XPAUSE)
1712                         cap = FLOW_CTRL_RX;
1713                 if (rmtadv & ADVERTISE_1000XPAUSE)
1714                         cap = FLOW_CTRL_TX;
1715         }
1716
1717         return cap;
1718 }
1719
1720 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1721 {
1722         u8 autoneg;
1723         u8 flowctrl = 0;
1724         u32 old_rx_mode = tp->rx_mode;
1725         u32 old_tx_mode = tp->tx_mode;
1726
1727         if (tg3_flag(tp, USE_PHYLIB))
1728                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1729         else
1730                 autoneg = tp->link_config.autoneg;
1731
1732         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1733                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1734                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1735                 else
1736                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1737         } else
1738                 flowctrl = tp->link_config.flowctrl;
1739
1740         tp->link_config.active_flowctrl = flowctrl;
1741
1742         if (flowctrl & FLOW_CTRL_RX)
1743                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1744         else
1745                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1746
1747         if (old_rx_mode != tp->rx_mode)
1748                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1749
1750         if (flowctrl & FLOW_CTRL_TX)
1751                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1752         else
1753                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1754
1755         if (old_tx_mode != tp->tx_mode)
1756                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1757 }
1758
1759 static void tg3_adjust_link(struct net_device *dev)
1760 {
1761         u8 oldflowctrl, linkmesg = 0;
1762         u32 mac_mode, lcl_adv, rmt_adv;
1763         struct tg3 *tp = netdev_priv(dev);
1764         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1765
1766         spin_lock_bh(&tp->lock);
1767
1768         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1769                                     MAC_MODE_HALF_DUPLEX);
1770
1771         oldflowctrl = tp->link_config.active_flowctrl;
1772
1773         if (phydev->link) {
1774                 lcl_adv = 0;
1775                 rmt_adv = 0;
1776
1777                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1778                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1779                 else if (phydev->speed == SPEED_1000 ||
1780                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1781                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1782                 else
1783                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1784
1785                 if (phydev->duplex == DUPLEX_HALF)
1786                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1787                 else {
1788                         lcl_adv = mii_advertise_flowctrl(
1789                                   tp->link_config.flowctrl);
1790
1791                         if (phydev->pause)
1792                                 rmt_adv = LPA_PAUSE_CAP;
1793                         if (phydev->asym_pause)
1794                                 rmt_adv |= LPA_PAUSE_ASYM;
1795                 }
1796
1797                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1798         } else
1799                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1800
1801         if (mac_mode != tp->mac_mode) {
1802                 tp->mac_mode = mac_mode;
1803                 tw32_f(MAC_MODE, tp->mac_mode);
1804                 udelay(40);
1805         }
1806
1807         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1808                 if (phydev->speed == SPEED_10)
1809                         tw32(MAC_MI_STAT,
1810                              MAC_MI_STAT_10MBPS_MODE |
1811                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1812                 else
1813                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1814         }
1815
1816         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1817                 tw32(MAC_TX_LENGTHS,
1818                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1819                       (6 << TX_LENGTHS_IPG_SHIFT) |
1820                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1821         else
1822                 tw32(MAC_TX_LENGTHS,
1823                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1824                       (6 << TX_LENGTHS_IPG_SHIFT) |
1825                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1826
1827         if (phydev->link != tp->old_link ||
1828             phydev->speed != tp->link_config.active_speed ||
1829             phydev->duplex != tp->link_config.active_duplex ||
1830             oldflowctrl != tp->link_config.active_flowctrl)
1831                 linkmesg = 1;
1832
1833         tp->old_link = phydev->link;
1834         tp->link_config.active_speed = phydev->speed;
1835         tp->link_config.active_duplex = phydev->duplex;
1836
1837         spin_unlock_bh(&tp->lock);
1838
1839         if (linkmesg)
1840                 tg3_link_report(tp);
1841 }
1842
1843 static int tg3_phy_init(struct tg3 *tp)
1844 {
1845         struct phy_device *phydev;
1846
1847         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1848                 return 0;
1849
1850         /* Bring the PHY back to a known state. */
1851         tg3_bmcr_reset(tp);
1852
1853         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1854
1855         /* Attach the MAC to the PHY. */
1856         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1857                              phydev->dev_flags, phydev->interface);
1858         if (IS_ERR(phydev)) {
1859                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1860                 return PTR_ERR(phydev);
1861         }
1862
1863         /* Mask with MAC supported features. */
1864         switch (phydev->interface) {
1865         case PHY_INTERFACE_MODE_GMII:
1866         case PHY_INTERFACE_MODE_RGMII:
1867                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1868                         phydev->supported &= (PHY_GBIT_FEATURES |
1869                                               SUPPORTED_Pause |
1870                                               SUPPORTED_Asym_Pause);
1871                         break;
1872                 }
1873                 /* fallthru */
1874         case PHY_INTERFACE_MODE_MII:
1875                 phydev->supported &= (PHY_BASIC_FEATURES |
1876                                       SUPPORTED_Pause |
1877                                       SUPPORTED_Asym_Pause);
1878                 break;
1879         default:
1880                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1881                 return -EINVAL;
1882         }
1883
1884         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1885
1886         phydev->advertising = phydev->supported;
1887
1888         return 0;
1889 }
1890
1891 static void tg3_phy_start(struct tg3 *tp)
1892 {
1893         struct phy_device *phydev;
1894
1895         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1896                 return;
1897
1898         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1899
1900         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1901                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1902                 phydev->speed = tp->link_config.speed;
1903                 phydev->duplex = tp->link_config.duplex;
1904                 phydev->autoneg = tp->link_config.autoneg;
1905                 phydev->advertising = tp->link_config.advertising;
1906         }
1907
1908         phy_start(phydev);
1909
1910         phy_start_aneg(phydev);
1911 }
1912
1913 static void tg3_phy_stop(struct tg3 *tp)
1914 {
1915         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1916                 return;
1917
1918         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1919 }
1920
1921 static void tg3_phy_fini(struct tg3 *tp)
1922 {
1923         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1924                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1925                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1926         }
1927 }
1928
1929 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1930 {
1931         int err;
1932         u32 val;
1933
1934         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1935                 return 0;
1936
1937         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1938                 /* Cannot do read-modify-write on 5401 */
1939                 err = tg3_phy_auxctl_write(tp,
1940                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1941                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1942                                            0x4c20);
1943                 goto done;
1944         }
1945
1946         err = tg3_phy_auxctl_read(tp,
1947                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1948         if (err)
1949                 return err;
1950
1951         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1952         err = tg3_phy_auxctl_write(tp,
1953                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1954
1955 done:
1956         return err;
1957 }
1958
1959 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1960 {
1961         u32 phytest;
1962
1963         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1964                 u32 phy;
1965
1966                 tg3_writephy(tp, MII_TG3_FET_TEST,
1967                              phytest | MII_TG3_FET_SHADOW_EN);
1968                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1969                         if (enable)
1970                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1971                         else
1972                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1973                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1974                 }
1975                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1976         }
1977 }
1978
1979 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1980 {
1981         u32 reg;
1982
1983         if (!tg3_flag(tp, 5705_PLUS) ||
1984             (tg3_flag(tp, 5717_PLUS) &&
1985              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1986                 return;
1987
1988         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1989                 tg3_phy_fet_toggle_apd(tp, enable);
1990                 return;
1991         }
1992
1993         reg = MII_TG3_MISC_SHDW_WREN |
1994               MII_TG3_MISC_SHDW_SCR5_SEL |
1995               MII_TG3_MISC_SHDW_SCR5_LPED |
1996               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1997               MII_TG3_MISC_SHDW_SCR5_SDTL |
1998               MII_TG3_MISC_SHDW_SCR5_C125OE;
1999         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2000                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2001
2002         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2003
2004
2005         reg = MII_TG3_MISC_SHDW_WREN |
2006               MII_TG3_MISC_SHDW_APD_SEL |
2007               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2008         if (enable)
2009                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2010
2011         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2012 }
2013
2014 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2015 {
2016         u32 phy;
2017
2018         if (!tg3_flag(tp, 5705_PLUS) ||
2019             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2020                 return;
2021
2022         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2023                 u32 ephy;
2024
2025                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2026                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2027
2028                         tg3_writephy(tp, MII_TG3_FET_TEST,
2029                                      ephy | MII_TG3_FET_SHADOW_EN);
2030                         if (!tg3_readphy(tp, reg, &phy)) {
2031                                 if (enable)
2032                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2033                                 else
2034                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2035                                 tg3_writephy(tp, reg, phy);
2036                         }
2037                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2038                 }
2039         } else {
2040                 int ret;
2041
2042                 ret = tg3_phy_auxctl_read(tp,
2043                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2044                 if (!ret) {
2045                         if (enable)
2046                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2047                         else
2048                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2049                         tg3_phy_auxctl_write(tp,
2050                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2051                 }
2052         }
2053 }
2054
2055 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2056 {
2057         int ret;
2058         u32 val;
2059
2060         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2061                 return;
2062
2063         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2064         if (!ret)
2065                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2066                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2067 }
2068
2069 static void tg3_phy_apply_otp(struct tg3 *tp)
2070 {
2071         u32 otp, phy;
2072
2073         if (!tp->phy_otp)
2074                 return;
2075
2076         otp = tp->phy_otp;
2077
2078         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2079                 return;
2080
2081         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2082         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2083         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2084
2085         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2086               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2087         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2088
2089         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2090         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2091         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2092
2093         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2094         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2095
2096         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2097         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2098
2099         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2100               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2101         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2102
2103         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2104 }
2105
2106 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2107 {
2108         u32 val;
2109
2110         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2111                 return;
2112
2113         tp->setlpicnt = 0;
2114
2115         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2116             current_link_up == 1 &&
2117             tp->link_config.active_duplex == DUPLEX_FULL &&
2118             (tp->link_config.active_speed == SPEED_100 ||
2119              tp->link_config.active_speed == SPEED_1000)) {
2120                 u32 eeectl;
2121
2122                 if (tp->link_config.active_speed == SPEED_1000)
2123                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2124                 else
2125                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2126
2127                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2128
2129                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2130                                   TG3_CL45_D7_EEERES_STAT, &val);
2131
2132                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2133                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2134                         tp->setlpicnt = 2;
2135         }
2136
2137         if (!tp->setlpicnt) {
2138                 if (current_link_up == 1 &&
2139                    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2140                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2141                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2142                 }
2143
2144                 val = tr32(TG3_CPMU_EEE_MODE);
2145                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2146         }
2147 }
2148
2149 static void tg3_phy_eee_enable(struct tg3 *tp)
2150 {
2151         u32 val;
2152
2153         if (tp->link_config.active_speed == SPEED_1000 &&
2154             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2155              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2156              tg3_flag(tp, 57765_CLASS)) &&
2157             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2158                 val = MII_TG3_DSP_TAP26_ALNOKO |
2159                       MII_TG3_DSP_TAP26_RMRXSTO;
2160                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2161                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2162         }
2163
2164         val = tr32(TG3_CPMU_EEE_MODE);
2165         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2166 }
2167
2168 static int tg3_wait_macro_done(struct tg3 *tp)
2169 {
2170         int limit = 100;
2171
2172         while (limit--) {
2173                 u32 tmp32;
2174
2175                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2176                         if ((tmp32 & 0x1000) == 0)
2177                                 break;
2178                 }
2179         }
2180         if (limit < 0)
2181                 return -EBUSY;
2182
2183         return 0;
2184 }
2185
2186 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2187 {
2188         static const u32 test_pat[4][6] = {
2189         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2190         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2191         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2192         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2193         };
2194         int chan;
2195
2196         for (chan = 0; chan < 4; chan++) {
2197                 int i;
2198
2199                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2200                              (chan * 0x2000) | 0x0200);
2201                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2202
2203                 for (i = 0; i < 6; i++)
2204                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2205                                      test_pat[chan][i]);
2206
2207                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2208                 if (tg3_wait_macro_done(tp)) {
2209                         *resetp = 1;
2210                         return -EBUSY;
2211                 }
2212
2213                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2214                              (chan * 0x2000) | 0x0200);
2215                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2216                 if (tg3_wait_macro_done(tp)) {
2217                         *resetp = 1;
2218                         return -EBUSY;
2219                 }
2220
2221                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2222                 if (tg3_wait_macro_done(tp)) {
2223                         *resetp = 1;
2224                         return -EBUSY;
2225                 }
2226
2227                 for (i = 0; i < 6; i += 2) {
2228                         u32 low, high;
2229
2230                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2231                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2232                             tg3_wait_macro_done(tp)) {
2233                                 *resetp = 1;
2234                                 return -EBUSY;
2235                         }
2236                         low &= 0x7fff;
2237                         high &= 0x000f;
2238                         if (low != test_pat[chan][i] ||
2239                             high != test_pat[chan][i+1]) {
2240                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2241                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2242                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2243
2244                                 return -EBUSY;
2245                         }
2246                 }
2247         }
2248
2249         return 0;
2250 }
2251
2252 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2253 {
2254         int chan;
2255
2256         for (chan = 0; chan < 4; chan++) {
2257                 int i;
2258
2259                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2260                              (chan * 0x2000) | 0x0200);
2261                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2262                 for (i = 0; i < 6; i++)
2263                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2264                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2265                 if (tg3_wait_macro_done(tp))
2266                         return -EBUSY;
2267         }
2268
2269         return 0;
2270 }
2271
2272 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2273 {
2274         u32 reg32, phy9_orig;
2275         int retries, do_phy_reset, err;
2276
2277         retries = 10;
2278         do_phy_reset = 1;
2279         do {
2280                 if (do_phy_reset) {
2281                         err = tg3_bmcr_reset(tp);
2282                         if (err)
2283                                 return err;
2284                         do_phy_reset = 0;
2285                 }
2286
2287                 /* Disable transmitter and interrupt.  */
2288                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2289                         continue;
2290
2291                 reg32 |= 0x3000;
2292                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2293
2294                 /* Set full-duplex, 1000 mbps.  */
2295                 tg3_writephy(tp, MII_BMCR,
2296                              BMCR_FULLDPLX | BMCR_SPEED1000);
2297
2298                 /* Set to master mode.  */
2299                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2300                         continue;
2301
2302                 tg3_writephy(tp, MII_CTRL1000,
2303                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2304
2305                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2306                 if (err)
2307                         return err;
2308
2309                 /* Block the PHY control access.  */
2310                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2311
2312                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2313                 if (!err)
2314                         break;
2315         } while (--retries);
2316
2317         err = tg3_phy_reset_chanpat(tp);
2318         if (err)
2319                 return err;
2320
2321         tg3_phydsp_write(tp, 0x8005, 0x0000);
2322
2323         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2324         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2325
2326         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2327
2328         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2329
2330         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2331                 reg32 &= ~0x3000;
2332                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2333         } else if (!err)
2334                 err = -EBUSY;
2335
2336         return err;
2337 }
2338
2339 /* This will reset the tigon3 PHY if there is no valid
2340  * link unless the FORCE argument is non-zero.
2341  */
2342 static int tg3_phy_reset(struct tg3 *tp)
2343 {
2344         u32 val, cpmuctrl;
2345         int err;
2346
2347         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2348                 val = tr32(GRC_MISC_CFG);
2349                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2350                 udelay(40);
2351         }
2352         err  = tg3_readphy(tp, MII_BMSR, &val);
2353         err |= tg3_readphy(tp, MII_BMSR, &val);
2354         if (err != 0)
2355                 return -EBUSY;
2356
2357         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2358                 netif_carrier_off(tp->dev);
2359                 tg3_link_report(tp);
2360         }
2361
2362         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2363             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2364             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2365                 err = tg3_phy_reset_5703_4_5(tp);
2366                 if (err)
2367                         return err;
2368                 goto out;
2369         }
2370
2371         cpmuctrl = 0;
2372         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2373             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2374                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2375                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2376                         tw32(TG3_CPMU_CTRL,
2377                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2378         }
2379
2380         err = tg3_bmcr_reset(tp);
2381         if (err)
2382                 return err;
2383
2384         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2385                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2386                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2387
2388                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2389         }
2390
2391         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2392             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2393                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2394                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2395                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2396                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2397                         udelay(40);
2398                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2399                 }
2400         }
2401
2402         if (tg3_flag(tp, 5717_PLUS) &&
2403             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2404                 return 0;
2405
2406         tg3_phy_apply_otp(tp);
2407
2408         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2409                 tg3_phy_toggle_apd(tp, true);
2410         else
2411                 tg3_phy_toggle_apd(tp, false);
2412
2413 out:
2414         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2415             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2416                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2417                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2418                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2419         }
2420
2421         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2422                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2423                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2424         }
2425
2426         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2427                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2428                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2429                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2430                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2431                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2432                 }
2433         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2434                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2435                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2436                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2437                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2438                                 tg3_writephy(tp, MII_TG3_TEST1,
2439                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2440                         } else
2441                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2442
2443                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2444                 }
2445         }
2446
2447         /* Set Extended packet length bit (bit 14) on all chips that */
2448         /* support jumbo frames */
2449         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2450                 /* Cannot do read-modify-write on 5401 */
2451                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2452         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2453                 /* Set bit 14 with read-modify-write to preserve other bits */
2454                 err = tg3_phy_auxctl_read(tp,
2455                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2456                 if (!err)
2457                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2458                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2459         }
2460
2461         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2462          * jumbo frames transmission.
2463          */
2464         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2465                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2466                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2467                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2468         }
2469
2470         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2471                 /* adjust output voltage */
2472                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2473         }
2474
2475         tg3_phy_toggle_automdix(tp, 1);
2476         tg3_phy_set_wirespeed(tp);
2477         return 0;
2478 }
2479
2480 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2481 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2482 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2483                                           TG3_GPIO_MSG_NEED_VAUX)
2484 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2485         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2486          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2487          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2488          (TG3_GPIO_MSG_DRVR_PRES << 12))
2489
2490 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2491         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2492          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2493          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2494          (TG3_GPIO_MSG_NEED_VAUX << 12))
2495
2496 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2497 {
2498         u32 status, shift;
2499
2500         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2501             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2502                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2503         else
2504                 status = tr32(TG3_CPMU_DRV_STATUS);
2505
2506         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2507         status &= ~(TG3_GPIO_MSG_MASK << shift);
2508         status |= (newstat << shift);
2509
2510         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2511             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2512                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2513         else
2514                 tw32(TG3_CPMU_DRV_STATUS, status);
2515
2516         return status >> TG3_APE_GPIO_MSG_SHIFT;
2517 }
2518
2519 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2520 {
2521         if (!tg3_flag(tp, IS_NIC))
2522                 return 0;
2523
2524         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2525             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2526             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2527                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2528                         return -EIO;
2529
2530                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2531
2532                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2533                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2534
2535                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2536         } else {
2537                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2538                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2539         }
2540
2541         return 0;
2542 }
2543
2544 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2545 {
2546         u32 grc_local_ctrl;
2547
2548         if (!tg3_flag(tp, IS_NIC) ||
2549             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2550             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2551                 return;
2552
2553         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2554
2555         tw32_wait_f(GRC_LOCAL_CTRL,
2556                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2557                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2558
2559         tw32_wait_f(GRC_LOCAL_CTRL,
2560                     grc_local_ctrl,
2561                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2562
2563         tw32_wait_f(GRC_LOCAL_CTRL,
2564                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2565                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2566 }
2567
2568 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2569 {
2570         if (!tg3_flag(tp, IS_NIC))
2571                 return;
2572
2573         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2574             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2575                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2576                             (GRC_LCLCTRL_GPIO_OE0 |
2577                              GRC_LCLCTRL_GPIO_OE1 |
2578                              GRC_LCLCTRL_GPIO_OE2 |
2579                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2580                              GRC_LCLCTRL_GPIO_OUTPUT1),
2581                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2582         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2583                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2584                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2585                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2586                                      GRC_LCLCTRL_GPIO_OE1 |
2587                                      GRC_LCLCTRL_GPIO_OE2 |
2588                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2589                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2590                                      tp->grc_local_ctrl;
2591                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2592                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2593
2594                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2595                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2596                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2597
2598                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2599                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2600                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2601         } else {
2602                 u32 no_gpio2;
2603                 u32 grc_local_ctrl = 0;
2604
2605                 /* Workaround to prevent overdrawing Amps. */
2606                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2607                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2608                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2609                                     grc_local_ctrl,
2610                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2611                 }
2612
2613                 /* On 5753 and variants, GPIO2 cannot be used. */
2614                 no_gpio2 = tp->nic_sram_data_cfg &
2615                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2616
2617                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2618                                   GRC_LCLCTRL_GPIO_OE1 |
2619                                   GRC_LCLCTRL_GPIO_OE2 |
2620                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2621                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2622                 if (no_gpio2) {
2623                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2624                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2625                 }
2626                 tw32_wait_f(GRC_LOCAL_CTRL,
2627                             tp->grc_local_ctrl | grc_local_ctrl,
2628                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2629
2630                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2631
2632                 tw32_wait_f(GRC_LOCAL_CTRL,
2633                             tp->grc_local_ctrl | grc_local_ctrl,
2634                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2635
2636                 if (!no_gpio2) {
2637                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2638                         tw32_wait_f(GRC_LOCAL_CTRL,
2639                                     tp->grc_local_ctrl | grc_local_ctrl,
2640                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2641                 }
2642         }
2643 }
2644
2645 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2646 {
2647         u32 msg = 0;
2648
2649         /* Serialize power state transitions */
2650         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2651                 return;
2652
2653         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2654                 msg = TG3_GPIO_MSG_NEED_VAUX;
2655
2656         msg = tg3_set_function_status(tp, msg);
2657
2658         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2659                 goto done;
2660
2661         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2662                 tg3_pwrsrc_switch_to_vaux(tp);
2663         else
2664                 tg3_pwrsrc_die_with_vmain(tp);
2665
2666 done:
2667         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2668 }
2669
2670 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2671 {
2672         bool need_vaux = false;
2673
2674         /* The GPIOs do something completely different on 57765. */
2675         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2676                 return;
2677
2678         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2679             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2680             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2681                 tg3_frob_aux_power_5717(tp, include_wol ?
2682                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2683                 return;
2684         }
2685
2686         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2687                 struct net_device *dev_peer;
2688
2689                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2690
2691                 /* remove_one() may have been run on the peer. */
2692                 if (dev_peer) {
2693                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2694
2695                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2696                                 return;
2697
2698                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2699                             tg3_flag(tp_peer, ENABLE_ASF))
2700                                 need_vaux = true;
2701                 }
2702         }
2703
2704         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2705             tg3_flag(tp, ENABLE_ASF))
2706                 need_vaux = true;
2707
2708         if (need_vaux)
2709                 tg3_pwrsrc_switch_to_vaux(tp);
2710         else
2711                 tg3_pwrsrc_die_with_vmain(tp);
2712 }
2713
2714 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2715 {
2716         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2717                 return 1;
2718         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2719                 if (speed != SPEED_10)
2720                         return 1;
2721         } else if (speed == SPEED_10)
2722                 return 1;
2723
2724         return 0;
2725 }
2726
2727 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2728 {
2729         u32 val;
2730
2731         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2732                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2733                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2734                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2735
2736                         sg_dig_ctrl |=
2737                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2738                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2739                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2740                 }
2741                 return;
2742         }
2743
2744         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2745                 tg3_bmcr_reset(tp);
2746                 val = tr32(GRC_MISC_CFG);
2747                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2748                 udelay(40);
2749                 return;
2750         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2751                 u32 phytest;
2752                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2753                         u32 phy;
2754
2755                         tg3_writephy(tp, MII_ADVERTISE, 0);
2756                         tg3_writephy(tp, MII_BMCR,
2757                                      BMCR_ANENABLE | BMCR_ANRESTART);
2758
2759                         tg3_writephy(tp, MII_TG3_FET_TEST,
2760                                      phytest | MII_TG3_FET_SHADOW_EN);
2761                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2762                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2763                                 tg3_writephy(tp,
2764                                              MII_TG3_FET_SHDW_AUXMODE4,
2765                                              phy);
2766                         }
2767                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2768                 }
2769                 return;
2770         } else if (do_low_power) {
2771                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2772                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2773
2774                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2775                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2776                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2777                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2778         }
2779
2780         /* The PHY should not be powered down on some chips because
2781          * of bugs.
2782          */
2783         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2784             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2785             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2786              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2787             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2788              !tp->pci_fn))
2789                 return;
2790
2791         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2792             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2793                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2794                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2795                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2796                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2797         }
2798
2799         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2800 }
2801
2802 /* tp->lock is held. */
2803 static int tg3_nvram_lock(struct tg3 *tp)
2804 {
2805         if (tg3_flag(tp, NVRAM)) {
2806                 int i;
2807
2808                 if (tp->nvram_lock_cnt == 0) {
2809                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2810                         for (i = 0; i < 8000; i++) {
2811                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2812                                         break;
2813                                 udelay(20);
2814                         }
2815                         if (i == 8000) {
2816                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2817                                 return -ENODEV;
2818                         }
2819                 }
2820                 tp->nvram_lock_cnt++;
2821         }
2822         return 0;
2823 }
2824
2825 /* tp->lock is held. */
2826 static void tg3_nvram_unlock(struct tg3 *tp)
2827 {
2828         if (tg3_flag(tp, NVRAM)) {
2829                 if (tp->nvram_lock_cnt > 0)
2830                         tp->nvram_lock_cnt--;
2831                 if (tp->nvram_lock_cnt == 0)
2832                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2833         }
2834 }
2835
2836 /* tp->lock is held. */
2837 static void tg3_enable_nvram_access(struct tg3 *tp)
2838 {
2839         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2840                 u32 nvaccess = tr32(NVRAM_ACCESS);
2841
2842                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2843         }
2844 }
2845
2846 /* tp->lock is held. */
2847 static void tg3_disable_nvram_access(struct tg3 *tp)
2848 {
2849         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2850                 u32 nvaccess = tr32(NVRAM_ACCESS);
2851
2852                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2853         }
2854 }
2855
2856 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2857                                         u32 offset, u32 *val)
2858 {
2859         u32 tmp;
2860         int i;
2861
2862         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2863                 return -EINVAL;
2864
2865         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2866                                         EEPROM_ADDR_DEVID_MASK |
2867                                         EEPROM_ADDR_READ);
2868         tw32(GRC_EEPROM_ADDR,
2869              tmp |
2870              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2871              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2872               EEPROM_ADDR_ADDR_MASK) |
2873              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2874
2875         for (i = 0; i < 1000; i++) {
2876                 tmp = tr32(GRC_EEPROM_ADDR);
2877
2878                 if (tmp & EEPROM_ADDR_COMPLETE)
2879                         break;
2880                 msleep(1);
2881         }
2882         if (!(tmp & EEPROM_ADDR_COMPLETE))
2883                 return -EBUSY;
2884
2885         tmp = tr32(GRC_EEPROM_DATA);
2886
2887         /*
2888          * The data will always be opposite the native endian
2889          * format.  Perform a blind byteswap to compensate.
2890          */
2891         *val = swab32(tmp);
2892
2893         return 0;
2894 }
2895
2896 #define NVRAM_CMD_TIMEOUT 10000
2897
2898 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2899 {
2900         int i;
2901
2902         tw32(NVRAM_CMD, nvram_cmd);
2903         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2904                 udelay(10);
2905                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2906                         udelay(10);
2907                         break;
2908                 }
2909         }
2910
2911         if (i == NVRAM_CMD_TIMEOUT)
2912                 return -EBUSY;
2913
2914         return 0;
2915 }
2916
2917 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2918 {
2919         if (tg3_flag(tp, NVRAM) &&
2920             tg3_flag(tp, NVRAM_BUFFERED) &&
2921             tg3_flag(tp, FLASH) &&
2922             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2923             (tp->nvram_jedecnum == JEDEC_ATMEL))
2924
2925                 addr = ((addr / tp->nvram_pagesize) <<
2926                         ATMEL_AT45DB0X1B_PAGE_POS) +
2927                        (addr % tp->nvram_pagesize);
2928
2929         return addr;
2930 }
2931
2932 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2933 {
2934         if (tg3_flag(tp, NVRAM) &&
2935             tg3_flag(tp, NVRAM_BUFFERED) &&
2936             tg3_flag(tp, FLASH) &&
2937             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2938             (tp->nvram_jedecnum == JEDEC_ATMEL))
2939
2940                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2941                         tp->nvram_pagesize) +
2942                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2943
2944         return addr;
2945 }
2946
2947 /* NOTE: Data read in from NVRAM is byteswapped according to
2948  * the byteswapping settings for all other register accesses.
2949  * tg3 devices are BE devices, so on a BE machine, the data
2950  * returned will be exactly as it is seen in NVRAM.  On a LE
2951  * machine, the 32-bit value will be byteswapped.
2952  */
2953 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2954 {
2955         int ret;
2956
2957         if (!tg3_flag(tp, NVRAM))
2958                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2959
2960         offset = tg3_nvram_phys_addr(tp, offset);
2961
2962         if (offset > NVRAM_ADDR_MSK)
2963                 return -EINVAL;
2964
2965         ret = tg3_nvram_lock(tp);
2966         if (ret)
2967                 return ret;
2968
2969         tg3_enable_nvram_access(tp);
2970
2971         tw32(NVRAM_ADDR, offset);
2972         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2973                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2974
2975         if (ret == 0)
2976                 *val = tr32(NVRAM_RDDATA);
2977
2978         tg3_disable_nvram_access(tp);
2979
2980         tg3_nvram_unlock(tp);
2981
2982         return ret;
2983 }
2984
2985 /* Ensures NVRAM data is in bytestream format. */
2986 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2987 {
2988         u32 v;
2989         int res = tg3_nvram_read(tp, offset, &v);
2990         if (!res)
2991                 *val = cpu_to_be32(v);
2992         return res;
2993 }
2994
2995 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
2996                                     u32 offset, u32 len, u8 *buf)
2997 {
2998         int i, j, rc = 0;
2999         u32 val;
3000
3001         for (i = 0; i < len; i += 4) {
3002                 u32 addr;
3003                 __be32 data;
3004
3005                 addr = offset + i;
3006
3007                 memcpy(&data, buf + i, 4);
3008
3009                 /*
3010                  * The SEEPROM interface expects the data to always be opposite
3011                  * the native endian format.  We accomplish this by reversing
3012                  * all the operations that would have been performed on the
3013                  * data from a call to tg3_nvram_read_be32().
3014                  */
3015                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3016
3017                 val = tr32(GRC_EEPROM_ADDR);
3018                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3019
3020                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3021                         EEPROM_ADDR_READ);
3022                 tw32(GRC_EEPROM_ADDR, val |
3023                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3024                         (addr & EEPROM_ADDR_ADDR_MASK) |
3025                         EEPROM_ADDR_START |
3026                         EEPROM_ADDR_WRITE);
3027
3028                 for (j = 0; j < 1000; j++) {
3029                         val = tr32(GRC_EEPROM_ADDR);
3030
3031                         if (val & EEPROM_ADDR_COMPLETE)
3032                                 break;
3033                         msleep(1);
3034                 }
3035                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3036                         rc = -EBUSY;
3037                         break;
3038                 }
3039         }
3040
3041         return rc;
3042 }
3043
3044 /* offset and length are dword aligned */
3045 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3046                 u8 *buf)
3047 {
3048         int ret = 0;
3049         u32 pagesize = tp->nvram_pagesize;
3050         u32 pagemask = pagesize - 1;
3051         u32 nvram_cmd;
3052         u8 *tmp;
3053
3054         tmp = kmalloc(pagesize, GFP_KERNEL);
3055         if (tmp == NULL)
3056                 return -ENOMEM;
3057
3058         while (len) {
3059                 int j;
3060                 u32 phy_addr, page_off, size;
3061
3062                 phy_addr = offset & ~pagemask;
3063
3064                 for (j = 0; j < pagesize; j += 4) {
3065                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3066                                                   (__be32 *) (tmp + j));
3067                         if (ret)
3068                                 break;
3069                 }
3070                 if (ret)
3071                         break;
3072
3073                 page_off = offset & pagemask;
3074                 size = pagesize;
3075                 if (len < size)
3076                         size = len;
3077
3078                 len -= size;
3079
3080                 memcpy(tmp + page_off, buf, size);
3081
3082                 offset = offset + (pagesize - page_off);
3083
3084                 tg3_enable_nvram_access(tp);
3085
3086                 /*
3087                  * Before we can erase the flash page, we need
3088                  * to issue a special "write enable" command.
3089                  */
3090                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3091
3092                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3093                         break;
3094
3095                 /* Erase the target page */
3096                 tw32(NVRAM_ADDR, phy_addr);
3097
3098                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3099                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3100
3101                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3102                         break;
3103
3104                 /* Issue another write enable to start the write. */
3105                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3106
3107                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3108                         break;
3109
3110                 for (j = 0; j < pagesize; j += 4) {
3111                         __be32 data;
3112
3113                         data = *((__be32 *) (tmp + j));
3114
3115                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3116
3117                         tw32(NVRAM_ADDR, phy_addr + j);
3118
3119                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3120                                 NVRAM_CMD_WR;
3121
3122                         if (j == 0)
3123                                 nvram_cmd |= NVRAM_CMD_FIRST;
3124                         else if (j == (pagesize - 4))
3125                                 nvram_cmd |= NVRAM_CMD_LAST;
3126
3127                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3128                         if (ret)
3129                                 break;
3130                 }
3131                 if (ret)
3132                         break;
3133         }
3134
3135         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3136         tg3_nvram_exec_cmd(tp, nvram_cmd);
3137
3138         kfree(tmp);
3139
3140         return ret;
3141 }
3142
3143 /* offset and length are dword aligned */
3144 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3145                 u8 *buf)
3146 {
3147         int i, ret = 0;
3148
3149         for (i = 0; i < len; i += 4, offset += 4) {
3150                 u32 page_off, phy_addr, nvram_cmd;
3151                 __be32 data;
3152
3153                 memcpy(&data, buf + i, 4);
3154                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3155
3156                 page_off = offset % tp->nvram_pagesize;
3157
3158                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3159
3160                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3161
3162                 if (page_off == 0 || i == 0)
3163                         nvram_cmd |= NVRAM_CMD_FIRST;
3164                 if (page_off == (tp->nvram_pagesize - 4))
3165                         nvram_cmd |= NVRAM_CMD_LAST;
3166
3167                 if (i == (len - 4))
3168                         nvram_cmd |= NVRAM_CMD_LAST;
3169
3170                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3171                     !tg3_flag(tp, FLASH) ||
3172                     !tg3_flag(tp, 57765_PLUS))
3173                         tw32(NVRAM_ADDR, phy_addr);
3174
3175                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3176                     !tg3_flag(tp, 5755_PLUS) &&
3177                     (tp->nvram_jedecnum == JEDEC_ST) &&
3178                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3179                         u32 cmd;
3180
3181                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3182                         ret = tg3_nvram_exec_cmd(tp, cmd);
3183                         if (ret)
3184                                 break;
3185                 }
3186                 if (!tg3_flag(tp, FLASH)) {
3187                         /* We always do complete word writes to eeprom. */
3188                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3189                 }
3190
3191                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3192                 if (ret)
3193                         break;
3194         }
3195         return ret;
3196 }
3197
3198 /* offset and length are dword aligned */
3199 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3200 {
3201         int ret;
3202
3203         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3204                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3205                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3206                 udelay(40);
3207         }
3208
3209         if (!tg3_flag(tp, NVRAM)) {
3210                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3211         } else {
3212                 u32 grc_mode;
3213
3214                 ret = tg3_nvram_lock(tp);
3215                 if (ret)
3216                         return ret;
3217
3218                 tg3_enable_nvram_access(tp);
3219                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3220                         tw32(NVRAM_WRITE1, 0x406);
3221
3222                 grc_mode = tr32(GRC_MODE);
3223                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3224
3225                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3226                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3227                                 buf);
3228                 } else {
3229                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3230                                 buf);
3231                 }
3232
3233                 grc_mode = tr32(GRC_MODE);
3234                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3235
3236                 tg3_disable_nvram_access(tp);
3237                 tg3_nvram_unlock(tp);
3238         }
3239
3240         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3241                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3242                 udelay(40);
3243         }
3244
3245         return ret;
3246 }
3247
3248 #define RX_CPU_SCRATCH_BASE     0x30000
3249 #define RX_CPU_SCRATCH_SIZE     0x04000
3250 #define TX_CPU_SCRATCH_BASE     0x34000
3251 #define TX_CPU_SCRATCH_SIZE     0x04000
3252
3253 /* tp->lock is held. */
3254 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3255 {
3256         int i;
3257
3258         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3259
3260         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3261                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3262
3263                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3264                 return 0;
3265         }
3266         if (offset == RX_CPU_BASE) {
3267                 for (i = 0; i < 10000; i++) {
3268                         tw32(offset + CPU_STATE, 0xffffffff);
3269                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3270                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3271                                 break;
3272                 }
3273
3274                 tw32(offset + CPU_STATE, 0xffffffff);
3275                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3276                 udelay(10);
3277         } else {
3278                 for (i = 0; i < 10000; i++) {
3279                         tw32(offset + CPU_STATE, 0xffffffff);
3280                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3281                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3282                                 break;
3283                 }
3284         }
3285
3286         if (i >= 10000) {
3287                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3288                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3289                 return -ENODEV;
3290         }
3291
3292         /* Clear firmware's nvram arbitration. */
3293         if (tg3_flag(tp, NVRAM))
3294                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3295         return 0;
3296 }
3297
3298 struct fw_info {
3299         unsigned int fw_base;
3300         unsigned int fw_len;
3301         const __be32 *fw_data;
3302 };
3303
3304 /* tp->lock is held. */
3305 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3306                                  u32 cpu_scratch_base, int cpu_scratch_size,
3307                                  struct fw_info *info)
3308 {
3309         int err, lock_err, i;
3310         void (*write_op)(struct tg3 *, u32, u32);
3311
3312         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3313                 netdev_err(tp->dev,
3314                            "%s: Trying to load TX cpu firmware which is 5705\n",
3315                            __func__);
3316                 return -EINVAL;
3317         }
3318
3319         if (tg3_flag(tp, 5705_PLUS))
3320                 write_op = tg3_write_mem;
3321         else
3322                 write_op = tg3_write_indirect_reg32;
3323
3324         /* It is possible that bootcode is still loading at this point.
3325          * Get the nvram lock first before halting the cpu.
3326          */
3327         lock_err = tg3_nvram_lock(tp);
3328         err = tg3_halt_cpu(tp, cpu_base);
3329         if (!lock_err)
3330                 tg3_nvram_unlock(tp);
3331         if (err)
3332                 goto out;
3333
3334         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3335                 write_op(tp, cpu_scratch_base + i, 0);
3336         tw32(cpu_base + CPU_STATE, 0xffffffff);
3337         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3338         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3339                 write_op(tp, (cpu_scratch_base +
3340                               (info->fw_base & 0xffff) +
3341                               (i * sizeof(u32))),
3342                               be32_to_cpu(info->fw_data[i]));
3343
3344         err = 0;
3345
3346 out:
3347         return err;
3348 }
3349
3350 /* tp->lock is held. */
3351 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3352 {
3353         struct fw_info info;
3354         const __be32 *fw_data;
3355         int err, i;
3356
3357         fw_data = (void *)tp->fw->data;
3358
3359         /* Firmware blob starts with version numbers, followed by
3360            start address and length. We are setting complete length.
3361            length = end_address_of_bss - start_address_of_text.
3362            Remainder is the blob to be loaded contiguously
3363            from start address. */
3364
3365         info.fw_base = be32_to_cpu(fw_data[1]);
3366         info.fw_len = tp->fw->size - 12;
3367         info.fw_data = &fw_data[3];
3368
3369         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3370                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3371                                     &info);
3372         if (err)
3373                 return err;
3374
3375         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3376                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3377                                     &info);
3378         if (err)
3379                 return err;
3380
3381         /* Now startup only the RX cpu. */
3382         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3383         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3384
3385         for (i = 0; i < 5; i++) {
3386                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3387                         break;
3388                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3389                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3390                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3391                 udelay(1000);
3392         }
3393         if (i >= 5) {
3394                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3395                            "should be %08x\n", __func__,
3396                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3397                 return -ENODEV;
3398         }
3399         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3400         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3401
3402         return 0;
3403 }
3404
3405 /* tp->lock is held. */
3406 static int tg3_load_tso_firmware(struct tg3 *tp)
3407 {
3408         struct fw_info info;
3409         const __be32 *fw_data;
3410         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3411         int err, i;
3412
3413         if (tg3_flag(tp, HW_TSO_1) ||
3414             tg3_flag(tp, HW_TSO_2) ||
3415             tg3_flag(tp, HW_TSO_3))
3416                 return 0;
3417
3418         fw_data = (void *)tp->fw->data;
3419
3420         /* Firmware blob starts with version numbers, followed by
3421            start address and length. We are setting complete length.
3422            length = end_address_of_bss - start_address_of_text.
3423            Remainder is the blob to be loaded contiguously
3424            from start address. */
3425
3426         info.fw_base = be32_to_cpu(fw_data[1]);
3427         cpu_scratch_size = tp->fw_len;
3428         info.fw_len = tp->fw->size - 12;
3429         info.fw_data = &fw_data[3];
3430
3431         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3432                 cpu_base = RX_CPU_BASE;
3433                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3434         } else {
3435                 cpu_base = TX_CPU_BASE;
3436                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3437                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3438         }
3439
3440         err = tg3_load_firmware_cpu(tp, cpu_base,
3441                                     cpu_scratch_base, cpu_scratch_size,
3442                                     &info);
3443         if (err)
3444                 return err;
3445
3446         /* Now startup the cpu. */
3447         tw32(cpu_base + CPU_STATE, 0xffffffff);
3448         tw32_f(cpu_base + CPU_PC, info.fw_base);
3449
3450         for (i = 0; i < 5; i++) {
3451                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3452                         break;
3453                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3454                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3455                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3456                 udelay(1000);
3457         }
3458         if (i >= 5) {
3459                 netdev_err(tp->dev,
3460                            "%s fails to set CPU PC, is %08x should be %08x\n",
3461                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3462                 return -ENODEV;
3463         }
3464         tw32(cpu_base + CPU_STATE, 0xffffffff);
3465         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3466         return 0;
3467 }
3468
3469
3470 /* tp->lock is held. */
3471 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3472 {
3473         u32 addr_high, addr_low;
3474         int i;
3475
3476         addr_high = ((tp->dev->dev_addr[0] << 8) |
3477                      tp->dev->dev_addr[1]);
3478         addr_low = ((tp->dev->dev_addr[2] << 24) |
3479                     (tp->dev->dev_addr[3] << 16) |
3480                     (tp->dev->dev_addr[4] <<  8) |
3481                     (tp->dev->dev_addr[5] <<  0));
3482         for (i = 0; i < 4; i++) {
3483                 if (i == 1 && skip_mac_1)
3484                         continue;
3485                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3486                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3487         }
3488
3489         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3490             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3491                 for (i = 0; i < 12; i++) {
3492                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3493                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3494                 }
3495         }
3496
3497         addr_high = (tp->dev->dev_addr[0] +
3498                      tp->dev->dev_addr[1] +
3499                      tp->dev->dev_addr[2] +
3500                      tp->dev->dev_addr[3] +
3501                      tp->dev->dev_addr[4] +
3502                      tp->dev->dev_addr[5]) &
3503                 TX_BACKOFF_SEED_MASK;
3504         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3505 }
3506
3507 static void tg3_enable_register_access(struct tg3 *tp)
3508 {
3509         /*
3510          * Make sure register accesses (indirect or otherwise) will function
3511          * correctly.
3512          */
3513         pci_write_config_dword(tp->pdev,
3514                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3515 }
3516
3517 static int tg3_power_up(struct tg3 *tp)
3518 {
3519         int err;
3520
3521         tg3_enable_register_access(tp);
3522
3523         err = pci_set_power_state(tp->pdev, PCI_D0);
3524         if (!err) {
3525                 /* Switch out of Vaux if it is a NIC */
3526                 tg3_pwrsrc_switch_to_vmain(tp);
3527         } else {
3528                 netdev_err(tp->dev, "Transition to D0 failed\n");
3529         }
3530
3531         return err;
3532 }
3533
3534 static int tg3_setup_phy(struct tg3 *, int);
3535
3536 static int tg3_power_down_prepare(struct tg3 *tp)
3537 {
3538         u32 misc_host_ctrl;
3539         bool device_should_wake, do_low_power;
3540
3541         tg3_enable_register_access(tp);
3542
3543         /* Restore the CLKREQ setting. */
3544         if (tg3_flag(tp, CLKREQ_BUG)) {
3545                 u16 lnkctl;
3546
3547                 pci_read_config_word(tp->pdev,
3548                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3549                                      &lnkctl);
3550                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3551                 pci_write_config_word(tp->pdev,
3552                                       pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3553                                       lnkctl);
3554         }
3555
3556         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3557         tw32(TG3PCI_MISC_HOST_CTRL,
3558              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3559
3560         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3561                              tg3_flag(tp, WOL_ENABLE);
3562
3563         if (tg3_flag(tp, USE_PHYLIB)) {
3564                 do_low_power = false;
3565                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3566                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3567                         struct phy_device *phydev;
3568                         u32 phyid, advertising;
3569
3570                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3571
3572                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3573
3574                         tp->link_config.speed = phydev->speed;
3575                         tp->link_config.duplex = phydev->duplex;
3576                         tp->link_config.autoneg = phydev->autoneg;
3577                         tp->link_config.advertising = phydev->advertising;
3578
3579                         advertising = ADVERTISED_TP |
3580                                       ADVERTISED_Pause |
3581                                       ADVERTISED_Autoneg |
3582                                       ADVERTISED_10baseT_Half;
3583
3584                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3585                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3586                                         advertising |=
3587                                                 ADVERTISED_100baseT_Half |
3588                                                 ADVERTISED_100baseT_Full |
3589                                                 ADVERTISED_10baseT_Full;
3590                                 else
3591                                         advertising |= ADVERTISED_10baseT_Full;
3592                         }
3593
3594                         phydev->advertising = advertising;
3595
3596                         phy_start_aneg(phydev);
3597
3598                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3599                         if (phyid != PHY_ID_BCMAC131) {
3600                                 phyid &= PHY_BCM_OUI_MASK;
3601                                 if (phyid == PHY_BCM_OUI_1 ||
3602                                     phyid == PHY_BCM_OUI_2 ||
3603                                     phyid == PHY_BCM_OUI_3)
3604                                         do_low_power = true;
3605                         }
3606                 }
3607         } else {
3608                 do_low_power = true;
3609
3610                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3611                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3612
3613                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3614                         tg3_setup_phy(tp, 0);
3615         }
3616
3617         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3618                 u32 val;
3619
3620                 val = tr32(GRC_VCPU_EXT_CTRL);
3621                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3622         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3623                 int i;
3624                 u32 val;
3625
3626                 for (i = 0; i < 200; i++) {
3627                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3628                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3629                                 break;
3630                         msleep(1);
3631                 }
3632         }
3633         if (tg3_flag(tp, WOL_CAP))
3634                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3635                                                      WOL_DRV_STATE_SHUTDOWN |
3636                                                      WOL_DRV_WOL |
3637                                                      WOL_SET_MAGIC_PKT);
3638
3639         if (device_should_wake) {
3640                 u32 mac_mode;
3641
3642                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3643                         if (do_low_power &&
3644                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3645                                 tg3_phy_auxctl_write(tp,
3646                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3647                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3648                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3649                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3650                                 udelay(40);
3651                         }
3652
3653                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3654                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3655                         else
3656                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3657
3658                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3659                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3660                             ASIC_REV_5700) {
3661                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3662                                              SPEED_100 : SPEED_10;
3663                                 if (tg3_5700_link_polarity(tp, speed))
3664                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3665                                 else
3666                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3667                         }
3668                 } else {
3669                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3670                 }
3671
3672                 if (!tg3_flag(tp, 5750_PLUS))
3673                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3674
3675                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3676                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3677                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3678                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3679
3680                 if (tg3_flag(tp, ENABLE_APE))
3681                         mac_mode |= MAC_MODE_APE_TX_EN |
3682                                     MAC_MODE_APE_RX_EN |
3683                                     MAC_MODE_TDE_ENABLE;
3684
3685                 tw32_f(MAC_MODE, mac_mode);
3686                 udelay(100);
3687
3688                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3689                 udelay(10);
3690         }
3691
3692         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3693             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3694              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3695                 u32 base_val;
3696
3697                 base_val = tp->pci_clock_ctrl;
3698                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3699                              CLOCK_CTRL_TXCLK_DISABLE);
3700
3701                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3702                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3703         } else if (tg3_flag(tp, 5780_CLASS) ||
3704                    tg3_flag(tp, CPMU_PRESENT) ||
3705                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3706                 /* do nothing */
3707         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3708                 u32 newbits1, newbits2;
3709
3710                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3711                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3712                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3713                                     CLOCK_CTRL_TXCLK_DISABLE |
3714                                     CLOCK_CTRL_ALTCLK);
3715                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3716                 } else if (tg3_flag(tp, 5705_PLUS)) {
3717                         newbits1 = CLOCK_CTRL_625_CORE;
3718                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3719                 } else {
3720                         newbits1 = CLOCK_CTRL_ALTCLK;
3721                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3722                 }
3723
3724                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3725                             40);
3726
3727                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3728                             40);
3729
3730                 if (!tg3_flag(tp, 5705_PLUS)) {
3731                         u32 newbits3;
3732
3733                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3734                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3735                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3736                                             CLOCK_CTRL_TXCLK_DISABLE |
3737                                             CLOCK_CTRL_44MHZ_CORE);
3738                         } else {
3739                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3740                         }
3741
3742                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3743                                     tp->pci_clock_ctrl | newbits3, 40);
3744                 }
3745         }
3746
3747         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3748                 tg3_power_down_phy(tp, do_low_power);
3749
3750         tg3_frob_aux_power(tp, true);
3751
3752         /* Workaround for unstable PLL clock */
3753         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3754             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3755                 u32 val = tr32(0x7d00);
3756
3757                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3758                 tw32(0x7d00, val);
3759                 if (!tg3_flag(tp, ENABLE_ASF)) {
3760                         int err;
3761
3762                         err = tg3_nvram_lock(tp);
3763                         tg3_halt_cpu(tp, RX_CPU_BASE);
3764                         if (!err)
3765                                 tg3_nvram_unlock(tp);
3766                 }
3767         }
3768
3769         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3770
3771         return 0;
3772 }
3773
3774 static void tg3_power_down(struct tg3 *tp)
3775 {
3776         tg3_power_down_prepare(tp);
3777
3778         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3779         pci_set_power_state(tp->pdev, PCI_D3hot);
3780 }
3781
3782 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3783 {
3784         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3785         case MII_TG3_AUX_STAT_10HALF:
3786                 *speed = SPEED_10;
3787                 *duplex = DUPLEX_HALF;
3788                 break;
3789
3790         case MII_TG3_AUX_STAT_10FULL:
3791                 *speed = SPEED_10;
3792                 *duplex = DUPLEX_FULL;
3793                 break;
3794
3795         case MII_TG3_AUX_STAT_100HALF:
3796                 *speed = SPEED_100;
3797                 *duplex = DUPLEX_HALF;
3798                 break;
3799
3800         case MII_TG3_AUX_STAT_100FULL:
3801                 *speed = SPEED_100;
3802                 *duplex = DUPLEX_FULL;
3803                 break;
3804
3805         case MII_TG3_AUX_STAT_1000HALF:
3806                 *speed = SPEED_1000;
3807                 *duplex = DUPLEX_HALF;
3808                 break;
3809
3810         case MII_TG3_AUX_STAT_1000FULL:
3811                 *speed = SPEED_1000;
3812                 *duplex = DUPLEX_FULL;
3813                 break;
3814
3815         default:
3816                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3817                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3818                                  SPEED_10;
3819                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3820                                   DUPLEX_HALF;
3821                         break;
3822                 }
3823                 *speed = SPEED_UNKNOWN;
3824                 *duplex = DUPLEX_UNKNOWN;
3825                 break;
3826         }
3827 }
3828
3829 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3830 {
3831         int err = 0;
3832         u32 val, new_adv;
3833
3834         new_adv = ADVERTISE_CSMA;
3835         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3836         new_adv |= mii_advertise_flowctrl(flowctrl);
3837
3838         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3839         if (err)
3840                 goto done;
3841
3842         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3843                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3844
3845                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3846                     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3847                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3848
3849                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3850                 if (err)
3851                         goto done;
3852         }
3853
3854         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3855                 goto done;
3856
3857         tw32(TG3_CPMU_EEE_MODE,
3858              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3859
3860         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3861         if (!err) {
3862                 u32 err2;
3863
3864                 val = 0;
3865                 /* Advertise 100-BaseTX EEE ability */
3866                 if (advertise & ADVERTISED_100baseT_Full)
3867                         val |= MDIO_AN_EEE_ADV_100TX;
3868                 /* Advertise 1000-BaseT EEE ability */
3869                 if (advertise & ADVERTISED_1000baseT_Full)
3870                         val |= MDIO_AN_EEE_ADV_1000T;
3871                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3872                 if (err)
3873                         val = 0;
3874
3875                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3876                 case ASIC_REV_5717:
3877                 case ASIC_REV_57765:
3878                 case ASIC_REV_57766:
3879                 case ASIC_REV_5719:
3880                         /* If we advertised any eee advertisements above... */
3881                         if (val)
3882                                 val = MII_TG3_DSP_TAP26_ALNOKO |
3883                                       MII_TG3_DSP_TAP26_RMRXSTO |
3884                                       MII_TG3_DSP_TAP26_OPCSINPT;
3885                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3886                         /* Fall through */
3887                 case ASIC_REV_5720:
3888                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3889                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3890                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3891                 }
3892
3893                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3894                 if (!err)
3895                         err = err2;
3896         }
3897
3898 done:
3899         return err;
3900 }
3901
3902 static void tg3_phy_copper_begin(struct tg3 *tp)
3903 {
3904         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
3905             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3906                 u32 adv, fc;
3907
3908                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3909                         adv = ADVERTISED_10baseT_Half |
3910                               ADVERTISED_10baseT_Full;
3911                         if (tg3_flag(tp, WOL_SPEED_100MB))
3912                                 adv |= ADVERTISED_100baseT_Half |
3913                                        ADVERTISED_100baseT_Full;
3914
3915                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
3916                 } else {
3917                         adv = tp->link_config.advertising;
3918                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3919                                 adv &= ~(ADVERTISED_1000baseT_Half |
3920                                          ADVERTISED_1000baseT_Full);
3921
3922                         fc = tp->link_config.flowctrl;
3923                 }
3924
3925                 tg3_phy_autoneg_cfg(tp, adv, fc);
3926
3927                 tg3_writephy(tp, MII_BMCR,
3928                              BMCR_ANENABLE | BMCR_ANRESTART);
3929         } else {
3930                 int i;
3931                 u32 bmcr, orig_bmcr;
3932
3933                 tp->link_config.active_speed = tp->link_config.speed;
3934                 tp->link_config.active_duplex = tp->link_config.duplex;
3935
3936                 bmcr = 0;
3937                 switch (tp->link_config.speed) {
3938                 default:
3939                 case SPEED_10:
3940                         break;
3941
3942                 case SPEED_100:
3943                         bmcr |= BMCR_SPEED100;
3944                         break;
3945
3946                 case SPEED_1000:
3947                         bmcr |= BMCR_SPEED1000;
3948                         break;
3949                 }
3950
3951                 if (tp->link_config.duplex == DUPLEX_FULL)
3952                         bmcr |= BMCR_FULLDPLX;
3953
3954                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3955                     (bmcr != orig_bmcr)) {
3956                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3957                         for (i = 0; i < 1500; i++) {
3958                                 u32 tmp;
3959
3960                                 udelay(10);
3961                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3962                                     tg3_readphy(tp, MII_BMSR, &tmp))
3963                                         continue;
3964                                 if (!(tmp & BMSR_LSTATUS)) {
3965                                         udelay(40);
3966                                         break;
3967                                 }
3968                         }
3969                         tg3_writephy(tp, MII_BMCR, bmcr);
3970                         udelay(40);
3971                 }
3972         }
3973 }
3974
3975 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3976 {
3977         int err;
3978
3979         /* Turn off tap power management. */
3980         /* Set Extended packet length bit */
3981         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3982
3983         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3984         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3985         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3986         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3987         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3988
3989         udelay(40);
3990
3991         return err;
3992 }
3993
3994 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
3995 {
3996         u32 advmsk, tgtadv, advertising;
3997
3998         advertising = tp->link_config.advertising;
3999         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4000
4001         advmsk = ADVERTISE_ALL;
4002         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4003                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4004                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4005         }
4006
4007         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4008                 return false;
4009
4010         if ((*lcladv & advmsk) != tgtadv)
4011                 return false;
4012
4013         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4014                 u32 tg3_ctrl;
4015
4016                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4017
4018                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4019                         return false;
4020
4021                 if (tgtadv &&
4022                     (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4023                      tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4024                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4025                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4026                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4027                 } else {
4028                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4029                 }
4030
4031                 if (tg3_ctrl != tgtadv)
4032                         return false;
4033         }
4034
4035         return true;
4036 }
4037
4038 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4039 {
4040         u32 lpeth = 0;
4041
4042         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4043                 u32 val;
4044
4045                 if (tg3_readphy(tp, MII_STAT1000, &val))
4046                         return false;
4047
4048                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4049         }
4050
4051         if (tg3_readphy(tp, MII_LPA, rmtadv))
4052                 return false;
4053
4054         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4055         tp->link_config.rmt_adv = lpeth;
4056
4057         return true;
4058 }
4059
4060 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4061 {
4062         int current_link_up;
4063         u32 bmsr, val;
4064         u32 lcl_adv, rmt_adv;
4065         u16 current_speed;
4066         u8 current_duplex;
4067         int i, err;
4068
4069         tw32(MAC_EVENT, 0);
4070
4071         tw32_f(MAC_STATUS,
4072              (MAC_STATUS_SYNC_CHANGED |
4073               MAC_STATUS_CFG_CHANGED |
4074               MAC_STATUS_MI_COMPLETION |
4075               MAC_STATUS_LNKSTATE_CHANGED));
4076         udelay(40);
4077
4078         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4079                 tw32_f(MAC_MI_MODE,
4080                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4081                 udelay(80);
4082         }
4083
4084         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4085
4086         /* Some third-party PHYs need to be reset on link going
4087          * down.
4088          */
4089         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4090              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4091              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4092             netif_carrier_ok(tp->dev)) {
4093                 tg3_readphy(tp, MII_BMSR, &bmsr);
4094                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4095                     !(bmsr & BMSR_LSTATUS))
4096                         force_reset = 1;
4097         }
4098         if (force_reset)
4099                 tg3_phy_reset(tp);
4100
4101         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4102                 tg3_readphy(tp, MII_BMSR, &bmsr);
4103                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4104                     !tg3_flag(tp, INIT_COMPLETE))
4105                         bmsr = 0;
4106
4107                 if (!(bmsr & BMSR_LSTATUS)) {
4108                         err = tg3_init_5401phy_dsp(tp);
4109                         if (err)
4110                                 return err;
4111
4112                         tg3_readphy(tp, MII_BMSR, &bmsr);
4113                         for (i = 0; i < 1000; i++) {
4114                                 udelay(10);
4115                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4116                                     (bmsr & BMSR_LSTATUS)) {
4117                                         udelay(40);
4118                                         break;
4119                                 }
4120                         }
4121
4122                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4123                             TG3_PHY_REV_BCM5401_B0 &&
4124                             !(bmsr & BMSR_LSTATUS) &&
4125                             tp->link_config.active_speed == SPEED_1000) {
4126                                 err = tg3_phy_reset(tp);
4127                                 if (!err)
4128                                         err = tg3_init_5401phy_dsp(tp);
4129                                 if (err)
4130                                         return err;
4131                         }
4132                 }
4133         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4134                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4135                 /* 5701 {A0,B0} CRC bug workaround */
4136                 tg3_writephy(tp, 0x15, 0x0a75);
4137                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4138                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4139                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4140         }
4141
4142         /* Clear pending interrupts... */
4143         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4144         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4145
4146         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4147                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4148         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4149                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4150
4151         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4152             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4153                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4154                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4155                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4156                 else
4157                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4158         }
4159
4160         current_link_up = 0;
4161         current_speed = SPEED_UNKNOWN;
4162         current_duplex = DUPLEX_UNKNOWN;
4163         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4164         tp->link_config.rmt_adv = 0;
4165
4166         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4167                 err = tg3_phy_auxctl_read(tp,
4168                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4169                                           &val);
4170                 if (!err && !(val & (1 << 10))) {
4171                         tg3_phy_auxctl_write(tp,
4172                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4173                                              val | (1 << 10));
4174                         goto relink;
4175                 }
4176         }
4177
4178         bmsr = 0;
4179         for (i = 0; i < 100; i++) {
4180                 tg3_readphy(tp, MII_BMSR, &bmsr);
4181                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4182                     (bmsr & BMSR_LSTATUS))
4183                         break;
4184                 udelay(40);
4185         }
4186
4187         if (bmsr & BMSR_LSTATUS) {
4188                 u32 aux_stat, bmcr;
4189
4190                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4191                 for (i = 0; i < 2000; i++) {
4192                         udelay(10);
4193                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4194                             aux_stat)
4195                                 break;
4196                 }
4197
4198                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4199                                              &current_speed,
4200                                              &current_duplex);
4201
4202                 bmcr = 0;
4203                 for (i = 0; i < 200; i++) {
4204                         tg3_readphy(tp, MII_BMCR, &bmcr);
4205                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4206                                 continue;
4207                         if (bmcr && bmcr != 0x7fff)
4208                                 break;
4209                         udelay(10);
4210                 }
4211
4212                 lcl_adv = 0;
4213                 rmt_adv = 0;
4214
4215                 tp->link_config.active_speed = current_speed;
4216                 tp->link_config.active_duplex = current_duplex;
4217
4218                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4219                         if ((bmcr & BMCR_ANENABLE) &&
4220                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4221                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4222                                 current_link_up = 1;
4223                 } else {
4224                         if (!(bmcr & BMCR_ANENABLE) &&
4225                             tp->link_config.speed == current_speed &&
4226                             tp->link_config.duplex == current_duplex &&
4227                             tp->link_config.flowctrl ==
4228                             tp->link_config.active_flowctrl) {
4229                                 current_link_up = 1;
4230                         }
4231                 }
4232
4233                 if (current_link_up == 1 &&
4234                     tp->link_config.active_duplex == DUPLEX_FULL) {
4235                         u32 reg, bit;
4236
4237                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4238                                 reg = MII_TG3_FET_GEN_STAT;
4239                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4240                         } else {
4241                                 reg = MII_TG3_EXT_STAT;
4242                                 bit = MII_TG3_EXT_STAT_MDIX;
4243                         }
4244
4245                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4246                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4247
4248                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4249                 }
4250         }
4251
4252 relink:
4253         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4254                 tg3_phy_copper_begin(tp);
4255
4256                 tg3_readphy(tp, MII_BMSR, &bmsr);
4257                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4258                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4259                         current_link_up = 1;
4260         }
4261
4262         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4263         if (current_link_up == 1) {
4264                 if (tp->link_config.active_speed == SPEED_100 ||
4265                     tp->link_config.active_speed == SPEED_10)
4266                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4267                 else
4268                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4269         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4270                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4271         else
4272                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4273
4274         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4275         if (tp->link_config.active_duplex == DUPLEX_HALF)
4276                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4277
4278         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4279                 if (current_link_up == 1 &&
4280                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4281                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4282                 else
4283                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4284         }
4285
4286         /* ??? Without this setting Netgear GA302T PHY does not
4287          * ??? send/receive packets...
4288          */
4289         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4290             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4291                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4292                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4293                 udelay(80);
4294         }
4295
4296         tw32_f(MAC_MODE, tp->mac_mode);
4297         udelay(40);
4298
4299         tg3_phy_eee_adjust(tp, current_link_up);
4300
4301         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4302                 /* Polled via timer. */
4303                 tw32_f(MAC_EVENT, 0);
4304         } else {
4305                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4306         }
4307         udelay(40);
4308
4309         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4310             current_link_up == 1 &&
4311             tp->link_config.active_speed == SPEED_1000 &&
4312             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4313                 udelay(120);
4314                 tw32_f(MAC_STATUS,
4315                      (MAC_STATUS_SYNC_CHANGED |
4316                       MAC_STATUS_CFG_CHANGED));
4317                 udelay(40);
4318                 tg3_write_mem(tp,
4319                               NIC_SRAM_FIRMWARE_MBOX,
4320                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4321         }
4322
4323         /* Prevent send BD corruption. */
4324         if (tg3_flag(tp, CLKREQ_BUG)) {
4325                 u16 oldlnkctl, newlnkctl;
4326
4327                 pci_read_config_word(tp->pdev,
4328                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4329                                      &oldlnkctl);
4330                 if (tp->link_config.active_speed == SPEED_100 ||
4331                     tp->link_config.active_speed == SPEED_10)
4332                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4333                 else
4334                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4335                 if (newlnkctl != oldlnkctl)
4336                         pci_write_config_word(tp->pdev,
4337                                               pci_pcie_cap(tp->pdev) +
4338                                               PCI_EXP_LNKCTL, newlnkctl);
4339         }
4340
4341         if (current_link_up != netif_carrier_ok(tp->dev)) {
4342                 if (current_link_up)
4343                         netif_carrier_on(tp->dev);
4344                 else
4345                         netif_carrier_off(tp->dev);
4346                 tg3_link_report(tp);
4347         }
4348
4349         return 0;
4350 }
4351
4352 struct tg3_fiber_aneginfo {
4353         int state;
4354 #define ANEG_STATE_UNKNOWN              0
4355 #define ANEG_STATE_AN_ENABLE            1
4356 #define ANEG_STATE_RESTART_INIT         2
4357 #define ANEG_STATE_RESTART              3
4358 #define ANEG_STATE_DISABLE_LINK_OK      4
4359 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4360 #define ANEG_STATE_ABILITY_DETECT       6
4361 #define ANEG_STATE_ACK_DETECT_INIT      7
4362 #define ANEG_STATE_ACK_DETECT           8
4363 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4364 #define ANEG_STATE_COMPLETE_ACK         10
4365 #define ANEG_STATE_IDLE_DETECT_INIT     11
4366 #define ANEG_STATE_IDLE_DETECT          12
4367 #define ANEG_STATE_LINK_OK              13
4368 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4369 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4370
4371         u32 flags;
4372 #define MR_AN_ENABLE            0x00000001
4373 #define MR_RESTART_AN           0x00000002
4374 #define MR_AN_COMPLETE          0x00000004
4375 #define MR_PAGE_RX              0x00000008
4376 #define MR_NP_LOADED            0x00000010
4377 #define MR_TOGGLE_TX            0x00000020
4378 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4379 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4380 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4381 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4382 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4383 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4384 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4385 #define MR_TOGGLE_RX            0x00002000
4386 #define MR_NP_RX                0x00004000
4387
4388 #define MR_LINK_OK              0x80000000
4389
4390         unsigned long link_time, cur_time;
4391
4392         u32 ability_match_cfg;
4393         int ability_match_count;
4394
4395         char ability_match, idle_match, ack_match;
4396
4397         u32 txconfig, rxconfig;
4398 #define ANEG_CFG_NP             0x00000080
4399 #define ANEG_CFG_ACK            0x00000040
4400 #define ANEG_CFG_RF2            0x00000020
4401 #define ANEG_CFG_RF1            0x00000010
4402 #define ANEG_CFG_PS2            0x00000001
4403 #define ANEG_CFG_PS1            0x00008000
4404 #define ANEG_CFG_HD             0x00004000
4405 #define ANEG_CFG_FD             0x00002000
4406 #define ANEG_CFG_INVAL          0x00001f06
4407
4408 };
4409 #define ANEG_OK         0
4410 #define ANEG_DONE       1
4411 #define ANEG_TIMER_ENAB 2
4412 #define ANEG_FAILED     -1
4413
4414 #define ANEG_STATE_SETTLE_TIME  10000
4415
4416 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4417                                    struct tg3_fiber_aneginfo *ap)
4418 {
4419         u16 flowctrl;
4420         unsigned long delta;
4421         u32 rx_cfg_reg;
4422         int ret;
4423
4424         if (ap->state == ANEG_STATE_UNKNOWN) {
4425                 ap->rxconfig = 0;
4426                 ap->link_time = 0;
4427                 ap->cur_time = 0;
4428                 ap->ability_match_cfg = 0;
4429                 ap->ability_match_count = 0;
4430                 ap->ability_match = 0;
4431                 ap->idle_match = 0;
4432                 ap->ack_match = 0;
4433         }
4434         ap->cur_time++;
4435
4436         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4437                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4438
4439                 if (rx_cfg_reg != ap->ability_match_cfg) {
4440                         ap->ability_match_cfg = rx_cfg_reg;
4441                         ap->ability_match = 0;
4442                         ap->ability_match_count = 0;
4443                 } else {
4444                         if (++ap->ability_match_count > 1) {
4445                                 ap->ability_match = 1;
4446                                 ap->ability_match_cfg = rx_cfg_reg;
4447                         }
4448                 }
4449                 if (rx_cfg_reg & ANEG_CFG_ACK)
4450                         ap->ack_match = 1;
4451                 else
4452                         ap->ack_match = 0;
4453
4454                 ap->idle_match = 0;
4455         } else {
4456                 ap->idle_match = 1;
4457                 ap->ability_match_cfg = 0;
4458                 ap->ability_match_count = 0;
4459                 ap->ability_match = 0;
4460                 ap->ack_match = 0;
4461
4462                 rx_cfg_reg = 0;
4463         }
4464
4465         ap->rxconfig = rx_cfg_reg;
4466         ret = ANEG_OK;
4467
4468         switch (ap->state) {
4469         case ANEG_STATE_UNKNOWN:
4470                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4471                         ap->state = ANEG_STATE_AN_ENABLE;
4472
4473                 /* fallthru */
4474         case ANEG_STATE_AN_ENABLE:
4475                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4476                 if (ap->flags & MR_AN_ENABLE) {
4477                         ap->link_time = 0;
4478                         ap->cur_time = 0;
4479                         ap->ability_match_cfg = 0;
4480                         ap->ability_match_count = 0;
4481                         ap->ability_match = 0;
4482                         ap->idle_match = 0;
4483                         ap->ack_match = 0;
4484
4485                         ap->state = ANEG_STATE_RESTART_INIT;
4486                 } else {
4487                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4488                 }
4489                 break;
4490
4491         case ANEG_STATE_RESTART_INIT:
4492                 ap->link_time = ap->cur_time;
4493                 ap->flags &= ~(MR_NP_LOADED);
4494                 ap->txconfig = 0;
4495                 tw32(MAC_TX_AUTO_NEG, 0);
4496                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4497                 tw32_f(MAC_MODE, tp->mac_mode);
4498                 udelay(40);
4499
4500                 ret = ANEG_TIMER_ENAB;
4501                 ap->state = ANEG_STATE_RESTART;
4502
4503                 /* fallthru */
4504         case ANEG_STATE_RESTART:
4505                 delta = ap->cur_time - ap->link_time;
4506                 if (delta > ANEG_STATE_SETTLE_TIME)
4507                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4508                 else
4509                         ret = ANEG_TIMER_ENAB;
4510                 break;
4511
4512         case ANEG_STATE_DISABLE_LINK_OK:
4513                 ret = ANEG_DONE;
4514                 break;
4515
4516         case ANEG_STATE_ABILITY_DETECT_INIT:
4517                 ap->flags &= ~(MR_TOGGLE_TX);
4518                 ap->txconfig = ANEG_CFG_FD;
4519                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4520                 if (flowctrl & ADVERTISE_1000XPAUSE)
4521                         ap->txconfig |= ANEG_CFG_PS1;
4522                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4523                         ap->txconfig |= ANEG_CFG_PS2;
4524                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4525                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4526                 tw32_f(MAC_MODE, tp->mac_mode);
4527                 udelay(40);
4528
4529                 ap->state = ANEG_STATE_ABILITY_DETECT;
4530                 break;
4531
4532         case ANEG_STATE_ABILITY_DETECT:
4533                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4534                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4535                 break;
4536
4537         case ANEG_STATE_ACK_DETECT_INIT:
4538                 ap->txconfig |= ANEG_CFG_ACK;
4539                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4540                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4541                 tw32_f(MAC_MODE, tp->mac_mode);
4542                 udelay(40);
4543
4544                 ap->state = ANEG_STATE_ACK_DETECT;
4545
4546                 /* fallthru */
4547         case ANEG_STATE_ACK_DETECT:
4548                 if (ap->ack_match != 0) {
4549                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4550                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4551                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4552                         } else {
4553                                 ap->state = ANEG_STATE_AN_ENABLE;
4554                         }
4555                 } else if (ap->ability_match != 0 &&
4556                            ap->rxconfig == 0) {
4557                         ap->state = ANEG_STATE_AN_ENABLE;
4558                 }
4559                 break;
4560
4561         case ANEG_STATE_COMPLETE_ACK_INIT:
4562                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4563                         ret = ANEG_FAILED;
4564                         break;
4565                 }
4566                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4567                                MR_LP_ADV_HALF_DUPLEX |
4568                                MR_LP_ADV_SYM_PAUSE |
4569                                MR_LP_ADV_ASYM_PAUSE |
4570                                MR_LP_ADV_REMOTE_FAULT1 |
4571                                MR_LP_ADV_REMOTE_FAULT2 |
4572                                MR_LP_ADV_NEXT_PAGE |
4573                                MR_TOGGLE_RX |
4574                                MR_NP_RX);
4575                 if (ap->rxconfig & ANEG_CFG_FD)
4576                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4577                 if (ap->rxconfig & ANEG_CFG_HD)
4578                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4579                 if (ap->rxconfig & ANEG_CFG_PS1)
4580                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4581                 if (ap->rxconfig & ANEG_CFG_PS2)
4582                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4583                 if (ap->rxconfig & ANEG_CFG_RF1)
4584                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4585                 if (ap->rxconfig & ANEG_CFG_RF2)
4586                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4587                 if (ap->rxconfig & ANEG_CFG_NP)
4588                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4589
4590                 ap->link_time = ap->cur_time;
4591
4592                 ap->flags ^= (MR_TOGGLE_TX);
4593                 if (ap->rxconfig & 0x0008)
4594                         ap->flags |= MR_TOGGLE_RX;
4595                 if (ap->rxconfig & ANEG_CFG_NP)
4596                         ap->flags |= MR_NP_RX;
4597                 ap->flags |= MR_PAGE_RX;
4598
4599                 ap->state = ANEG_STATE_COMPLETE_ACK;
4600                 ret = ANEG_TIMER_ENAB;
4601                 break;
4602
4603         case ANEG_STATE_COMPLETE_ACK:
4604                 if (ap->ability_match != 0 &&
4605                     ap->rxconfig == 0) {
4606                         ap->state = ANEG_STATE_AN_ENABLE;
4607                         break;
4608                 }
4609                 delta = ap->cur_time - ap->link_time;
4610                 if (delta > ANEG_STATE_SETTLE_TIME) {
4611                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4612                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4613                         } else {
4614                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4615                                     !(ap->flags & MR_NP_RX)) {
4616                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4617                                 } else {
4618                                         ret = ANEG_FAILED;
4619                                 }
4620                         }
4621                 }
4622                 break;
4623
4624         case ANEG_STATE_IDLE_DETECT_INIT:
4625                 ap->link_time = ap->cur_time;
4626                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4627                 tw32_f(MAC_MODE, tp->mac_mode);
4628                 udelay(40);
4629
4630                 ap->state = ANEG_STATE_IDLE_DETECT;
4631                 ret = ANEG_TIMER_ENAB;
4632                 break;
4633
4634         case ANEG_STATE_IDLE_DETECT:
4635                 if (ap->ability_match != 0 &&
4636                     ap->rxconfig == 0) {
4637                         ap->state = ANEG_STATE_AN_ENABLE;
4638                         break;
4639                 }
4640                 delta = ap->cur_time - ap->link_time;
4641                 if (delta > ANEG_STATE_SETTLE_TIME) {
4642                         /* XXX another gem from the Broadcom driver :( */
4643                         ap->state = ANEG_STATE_LINK_OK;
4644                 }
4645                 break;
4646
4647         case ANEG_STATE_LINK_OK:
4648                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4649                 ret = ANEG_DONE;
4650                 break;
4651
4652         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4653                 /* ??? unimplemented */
4654                 break;
4655
4656         case ANEG_STATE_NEXT_PAGE_WAIT:
4657                 /* ??? unimplemented */
4658                 break;
4659
4660         default:
4661                 ret = ANEG_FAILED;
4662                 break;
4663         }
4664
4665         return ret;
4666 }
4667
4668 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4669 {
4670         int res = 0;
4671         struct tg3_fiber_aneginfo aninfo;
4672         int status = ANEG_FAILED;
4673         unsigned int tick;
4674         u32 tmp;
4675
4676         tw32_f(MAC_TX_AUTO_NEG, 0);
4677
4678         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4679         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4680         udelay(40);
4681
4682         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4683         udelay(40);
4684
4685         memset(&aninfo, 0, sizeof(aninfo));
4686         aninfo.flags |= MR_AN_ENABLE;
4687         aninfo.state = ANEG_STATE_UNKNOWN;
4688         aninfo.cur_time = 0;
4689         tick = 0;
4690         while (++tick < 195000) {
4691                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4692                 if (status == ANEG_DONE || status == ANEG_FAILED)
4693                         break;
4694
4695                 udelay(1);
4696         }
4697
4698         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4699         tw32_f(MAC_MODE, tp->mac_mode);
4700         udelay(40);
4701
4702         *txflags = aninfo.txconfig;
4703         *rxflags = aninfo.flags;
4704
4705         if (status == ANEG_DONE &&
4706             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4707                              MR_LP_ADV_FULL_DUPLEX)))
4708                 res = 1;
4709
4710         return res;
4711 }
4712
4713 static void tg3_init_bcm8002(struct tg3 *tp)
4714 {
4715         u32 mac_status = tr32(MAC_STATUS);
4716         int i;
4717
4718         /* Reset when initting first time or we have a link. */
4719         if (tg3_flag(tp, INIT_COMPLETE) &&
4720             !(mac_status & MAC_STATUS_PCS_SYNCED))
4721                 return;
4722
4723         /* Set PLL lock range. */
4724         tg3_writephy(tp, 0x16, 0x8007);
4725
4726         /* SW reset */
4727         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4728
4729         /* Wait for reset to complete. */
4730         /* XXX schedule_timeout() ... */
4731         for (i = 0; i < 500; i++)
4732                 udelay(10);
4733
4734         /* Config mode; select PMA/Ch 1 regs. */
4735         tg3_writephy(tp, 0x10, 0x8411);
4736
4737         /* Enable auto-lock and comdet, select txclk for tx. */
4738         tg3_writephy(tp, 0x11, 0x0a10);
4739
4740         tg3_writephy(tp, 0x18, 0x00a0);
4741         tg3_writephy(tp, 0x16, 0x41ff);
4742
4743         /* Assert and deassert POR. */
4744         tg3_writephy(tp, 0x13, 0x0400);
4745         udelay(40);
4746         tg3_writephy(tp, 0x13, 0x0000);
4747
4748         tg3_writephy(tp, 0x11, 0x0a50);
4749         udelay(40);
4750         tg3_writephy(tp, 0x11, 0x0a10);
4751
4752         /* Wait for signal to stabilize */
4753         /* XXX schedule_timeout() ... */
4754         for (i = 0; i < 15000; i++)
4755                 udelay(10);
4756
4757         /* Deselect the channel register so we can read the PHYID
4758          * later.
4759          */
4760         tg3_writephy(tp, 0x10, 0x8011);
4761 }
4762
4763 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4764 {
4765         u16 flowctrl;
4766         u32 sg_dig_ctrl, sg_dig_status;
4767         u32 serdes_cfg, expected_sg_dig_ctrl;
4768         int workaround, port_a;
4769         int current_link_up;
4770
4771         serdes_cfg = 0;
4772         expected_sg_dig_ctrl = 0;
4773         workaround = 0;
4774         port_a = 1;
4775         current_link_up = 0;
4776
4777         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4778             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4779                 workaround = 1;
4780                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4781                         port_a = 0;
4782
4783                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4784                 /* preserve bits 20-23 for voltage regulator */
4785                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4786         }
4787
4788         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4789
4790         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4791                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4792                         if (workaround) {
4793                                 u32 val = serdes_cfg;
4794
4795                                 if (port_a)
4796                                         val |= 0xc010000;
4797                                 else
4798                                         val |= 0x4010000;
4799                                 tw32_f(MAC_SERDES_CFG, val);
4800                         }
4801
4802                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4803                 }
4804                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4805                         tg3_setup_flow_control(tp, 0, 0);
4806                         current_link_up = 1;
4807                 }
4808                 goto out;
4809         }
4810
4811         /* Want auto-negotiation.  */
4812         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4813
4814         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4815         if (flowctrl & ADVERTISE_1000XPAUSE)
4816                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4817         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4818                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4819
4820         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4821                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4822                     tp->serdes_counter &&
4823                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4824                                     MAC_STATUS_RCVD_CFG)) ==
4825                      MAC_STATUS_PCS_SYNCED)) {
4826                         tp->serdes_counter--;
4827                         current_link_up = 1;
4828                         goto out;
4829                 }
4830 restart_autoneg:
4831                 if (workaround)
4832                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4833                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4834                 udelay(5);
4835                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4836
4837                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4838                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4839         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4840                                  MAC_STATUS_SIGNAL_DET)) {
4841                 sg_dig_status = tr32(SG_DIG_STATUS);
4842                 mac_status = tr32(MAC_STATUS);
4843
4844                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4845                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4846                         u32 local_adv = 0, remote_adv = 0;
4847
4848                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4849                                 local_adv |= ADVERTISE_1000XPAUSE;
4850                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4851                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4852
4853                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4854                                 remote_adv |= LPA_1000XPAUSE;
4855                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4856                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4857
4858                         tp->link_config.rmt_adv =
4859                                            mii_adv_to_ethtool_adv_x(remote_adv);
4860
4861                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4862                         current_link_up = 1;
4863                         tp->serdes_counter = 0;
4864                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4865                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4866                         if (tp->serdes_counter)
4867                                 tp->serdes_counter--;
4868                         else {
4869                                 if (workaround) {
4870                                         u32 val = serdes_cfg;
4871
4872                                         if (port_a)
4873                                                 val |= 0xc010000;
4874                                         else
4875                                                 val |= 0x4010000;
4876
4877                                         tw32_f(MAC_SERDES_CFG, val);
4878                                 }
4879
4880                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4881                                 udelay(40);
4882
4883                                 /* Link parallel detection - link is up */
4884                                 /* only if we have PCS_SYNC and not */
4885                                 /* receiving config code words */
4886                                 mac_status = tr32(MAC_STATUS);
4887                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4888                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4889                                         tg3_setup_flow_control(tp, 0, 0);
4890                                         current_link_up = 1;
4891                                         tp->phy_flags |=
4892                                                 TG3_PHYFLG_PARALLEL_DETECT;
4893                                         tp->serdes_counter =
4894                                                 SERDES_PARALLEL_DET_TIMEOUT;
4895                                 } else
4896                                         goto restart_autoneg;
4897                         }
4898                 }
4899         } else {
4900                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4901                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4902         }
4903
4904 out:
4905         return current_link_up;
4906 }
4907
4908 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4909 {
4910         int current_link_up = 0;
4911
4912         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4913                 goto out;
4914
4915         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4916                 u32 txflags, rxflags;
4917                 int i;
4918
4919                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4920                         u32 local_adv = 0, remote_adv = 0;
4921
4922                         if (txflags & ANEG_CFG_PS1)
4923                                 local_adv |= ADVERTISE_1000XPAUSE;
4924                         if (txflags & ANEG_CFG_PS2)
4925                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4926
4927                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4928                                 remote_adv |= LPA_1000XPAUSE;
4929                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4930                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4931
4932                         tp->link_config.rmt_adv =
4933                                            mii_adv_to_ethtool_adv_x(remote_adv);
4934
4935                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4936
4937                         current_link_up = 1;
4938                 }
4939                 for (i = 0; i < 30; i++) {
4940                         udelay(20);
4941                         tw32_f(MAC_STATUS,
4942                                (MAC_STATUS_SYNC_CHANGED |
4943                                 MAC_STATUS_CFG_CHANGED));
4944                         udelay(40);
4945                         if ((tr32(MAC_STATUS) &
4946                              (MAC_STATUS_SYNC_CHANGED |
4947                               MAC_STATUS_CFG_CHANGED)) == 0)
4948                                 break;
4949                 }
4950
4951                 mac_status = tr32(MAC_STATUS);
4952                 if (current_link_up == 0 &&
4953                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4954                     !(mac_status & MAC_STATUS_RCVD_CFG))
4955                         current_link_up = 1;
4956         } else {
4957                 tg3_setup_flow_control(tp, 0, 0);
4958
4959                 /* Forcing 1000FD link up. */
4960                 current_link_up = 1;
4961
4962                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4963                 udelay(40);
4964
4965                 tw32_f(MAC_MODE, tp->mac_mode);
4966                 udelay(40);
4967         }
4968
4969 out:
4970         return current_link_up;
4971 }
4972
4973 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4974 {
4975         u32 orig_pause_cfg;
4976         u16 orig_active_speed;
4977         u8 orig_active_duplex;
4978         u32 mac_status;
4979         int current_link_up;
4980         int i;
4981
4982         orig_pause_cfg = tp->link_config.active_flowctrl;
4983         orig_active_speed = tp->link_config.active_speed;
4984         orig_active_duplex = tp->link_config.active_duplex;
4985
4986         if (!tg3_flag(tp, HW_AUTONEG) &&
4987             netif_carrier_ok(tp->dev) &&
4988             tg3_flag(tp, INIT_COMPLETE)) {
4989                 mac_status = tr32(MAC_STATUS);
4990                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4991                                MAC_STATUS_SIGNAL_DET |
4992                                MAC_STATUS_CFG_CHANGED |
4993                                MAC_STATUS_RCVD_CFG);
4994                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4995                                    MAC_STATUS_SIGNAL_DET)) {
4996                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4997                                             MAC_STATUS_CFG_CHANGED));
4998                         return 0;
4999                 }
5000         }
5001
5002         tw32_f(MAC_TX_AUTO_NEG, 0);
5003
5004         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5005         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5006         tw32_f(MAC_MODE, tp->mac_mode);
5007         udelay(40);
5008
5009         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5010                 tg3_init_bcm8002(tp);
5011
5012         /* Enable link change event even when serdes polling.  */
5013         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5014         udelay(40);
5015
5016         current_link_up = 0;
5017         tp->link_config.rmt_adv = 0;
5018         mac_status = tr32(MAC_STATUS);
5019
5020         if (tg3_flag(tp, HW_AUTONEG))
5021                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5022         else
5023                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5024
5025         tp->napi[0].hw_status->status =
5026                 (SD_STATUS_UPDATED |
5027                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5028
5029         for (i = 0; i < 100; i++) {
5030                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5031                                     MAC_STATUS_CFG_CHANGED));
5032                 udelay(5);
5033                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5034                                          MAC_STATUS_CFG_CHANGED |
5035                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5036                         break;
5037         }
5038
5039         mac_status = tr32(MAC_STATUS);
5040         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5041                 current_link_up = 0;
5042                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5043                     tp->serdes_counter == 0) {
5044                         tw32_f(MAC_MODE, (tp->mac_mode |
5045                                           MAC_MODE_SEND_CONFIGS));
5046                         udelay(1);
5047                         tw32_f(MAC_MODE, tp->mac_mode);
5048                 }
5049         }
5050
5051         if (current_link_up == 1) {
5052                 tp->link_config.active_speed = SPEED_1000;
5053                 tp->link_config.active_duplex = DUPLEX_FULL;
5054                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5055                                     LED_CTRL_LNKLED_OVERRIDE |
5056                                     LED_CTRL_1000MBPS_ON));
5057         } else {
5058                 tp->link_config.active_speed = SPEED_UNKNOWN;
5059                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5060                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5061                                     LED_CTRL_LNKLED_OVERRIDE |
5062                                     LED_CTRL_TRAFFIC_OVERRIDE));
5063         }
5064
5065         if (current_link_up != netif_carrier_ok(tp->dev)) {
5066                 if (current_link_up)
5067                         netif_carrier_on(tp->dev);
5068                 else
5069                         netif_carrier_off(tp->dev);
5070                 tg3_link_report(tp);
5071         } else {
5072                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5073                 if (orig_pause_cfg != now_pause_cfg ||
5074                     orig_active_speed != tp->link_config.active_speed ||
5075                     orig_active_duplex != tp->link_config.active_duplex)
5076                         tg3_link_report(tp);
5077         }
5078
5079         return 0;
5080 }
5081
5082 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5083 {
5084         int current_link_up, err = 0;
5085         u32 bmsr, bmcr;
5086         u16 current_speed;
5087         u8 current_duplex;
5088         u32 local_adv, remote_adv;
5089
5090         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5091         tw32_f(MAC_MODE, tp->mac_mode);
5092         udelay(40);
5093
5094         tw32(MAC_EVENT, 0);
5095
5096         tw32_f(MAC_STATUS,
5097              (MAC_STATUS_SYNC_CHANGED |
5098               MAC_STATUS_CFG_CHANGED |
5099               MAC_STATUS_MI_COMPLETION |
5100               MAC_STATUS_LNKSTATE_CHANGED));
5101         udelay(40);
5102
5103         if (force_reset)
5104                 tg3_phy_reset(tp);
5105
5106         current_link_up = 0;
5107         current_speed = SPEED_UNKNOWN;
5108         current_duplex = DUPLEX_UNKNOWN;
5109         tp->link_config.rmt_adv = 0;
5110
5111         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5112         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5113         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5114                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5115                         bmsr |= BMSR_LSTATUS;
5116                 else
5117                         bmsr &= ~BMSR_LSTATUS;
5118         }
5119
5120         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5121
5122         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5123             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5124                 /* do nothing, just check for link up at the end */
5125         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5126                 u32 adv, newadv;
5127
5128                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5129                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5130                                  ADVERTISE_1000XPAUSE |
5131                                  ADVERTISE_1000XPSE_ASYM |
5132                                  ADVERTISE_SLCT);
5133
5134                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5135                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5136
5137                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5138                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5139                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5140                         tg3_writephy(tp, MII_BMCR, bmcr);
5141
5142                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5143                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5144                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5145
5146                         return err;
5147                 }
5148         } else {
5149                 u32 new_bmcr;
5150
5151                 bmcr &= ~BMCR_SPEED1000;
5152                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5153
5154                 if (tp->link_config.duplex == DUPLEX_FULL)
5155                         new_bmcr |= BMCR_FULLDPLX;
5156
5157                 if (new_bmcr != bmcr) {
5158                         /* BMCR_SPEED1000 is a reserved bit that needs
5159                          * to be set on write.
5160                          */
5161                         new_bmcr |= BMCR_SPEED1000;
5162
5163                         /* Force a linkdown */
5164                         if (netif_carrier_ok(tp->dev)) {
5165                                 u32 adv;
5166
5167                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5168                                 adv &= ~(ADVERTISE_1000XFULL |
5169                                          ADVERTISE_1000XHALF |
5170                                          ADVERTISE_SLCT);
5171                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5172                                 tg3_writephy(tp, MII_BMCR, bmcr |
5173                                                            BMCR_ANRESTART |
5174                                                            BMCR_ANENABLE);
5175                                 udelay(10);
5176                                 netif_carrier_off(tp->dev);
5177                         }
5178                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5179                         bmcr = new_bmcr;
5180                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5181                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5182                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5183                             ASIC_REV_5714) {
5184                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5185                                         bmsr |= BMSR_LSTATUS;
5186                                 else
5187                                         bmsr &= ~BMSR_LSTATUS;
5188                         }
5189                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5190                 }
5191         }
5192
5193         if (bmsr & BMSR_LSTATUS) {
5194                 current_speed = SPEED_1000;
5195                 current_link_up = 1;
5196                 if (bmcr & BMCR_FULLDPLX)
5197                         current_duplex = DUPLEX_FULL;
5198                 else
5199                         current_duplex = DUPLEX_HALF;
5200
5201                 local_adv = 0;
5202                 remote_adv = 0;
5203
5204                 if (bmcr & BMCR_ANENABLE) {
5205                         u32 common;
5206
5207                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5208                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5209                         common = local_adv & remote_adv;
5210                         if (common & (ADVERTISE_1000XHALF |
5211                                       ADVERTISE_1000XFULL)) {
5212                                 if (common & ADVERTISE_1000XFULL)
5213                                         current_duplex = DUPLEX_FULL;
5214                                 else
5215                                         current_duplex = DUPLEX_HALF;
5216
5217                                 tp->link_config.rmt_adv =
5218                                            mii_adv_to_ethtool_adv_x(remote_adv);
5219                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5220                                 /* Link is up via parallel detect */
5221                         } else {
5222                                 current_link_up = 0;
5223                         }
5224                 }
5225         }
5226
5227         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5228                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5229
5230         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5231         if (tp->link_config.active_duplex == DUPLEX_HALF)
5232                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5233
5234         tw32_f(MAC_MODE, tp->mac_mode);
5235         udelay(40);
5236
5237         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5238
5239         tp->link_config.active_speed = current_speed;
5240         tp->link_config.active_duplex = current_duplex;
5241
5242         if (current_link_up != netif_carrier_ok(tp->dev)) {
5243                 if (current_link_up)
5244                         netif_carrier_on(tp->dev);
5245                 else {
5246                         netif_carrier_off(tp->dev);
5247                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5248                 }
5249                 tg3_link_report(tp);
5250         }
5251         return err;
5252 }
5253
5254 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5255 {
5256         if (tp->serdes_counter) {
5257                 /* Give autoneg time to complete. */
5258                 tp->serdes_counter--;
5259                 return;
5260         }
5261
5262         if (!netif_carrier_ok(tp->dev) &&
5263             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5264                 u32 bmcr;
5265
5266                 tg3_readphy(tp, MII_BMCR, &bmcr);
5267                 if (bmcr & BMCR_ANENABLE) {
5268                         u32 phy1, phy2;
5269
5270                         /* Select shadow register 0x1f */
5271                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5272                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5273
5274                         /* Select expansion interrupt status register */
5275                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5276                                          MII_TG3_DSP_EXP1_INT_STAT);
5277                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5278                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5279
5280                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5281                                 /* We have signal detect and not receiving
5282                                  * config code words, link is up by parallel
5283                                  * detection.
5284                                  */
5285
5286                                 bmcr &= ~BMCR_ANENABLE;
5287                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5288                                 tg3_writephy(tp, MII_BMCR, bmcr);
5289                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5290                         }
5291                 }
5292         } else if (netif_carrier_ok(tp->dev) &&
5293                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5294                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5295                 u32 phy2;
5296
5297                 /* Select expansion interrupt status register */
5298                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5299                                  MII_TG3_DSP_EXP1_INT_STAT);
5300                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5301                 if (phy2 & 0x20) {
5302                         u32 bmcr;
5303
5304                         /* Config code words received, turn on autoneg. */
5305                         tg3_readphy(tp, MII_BMCR, &bmcr);
5306                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5307
5308                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5309
5310                 }
5311         }
5312 }
5313
5314 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5315 {
5316         u32 val;
5317         int err;
5318
5319         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5320                 err = tg3_setup_fiber_phy(tp, force_reset);
5321         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5322                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5323         else
5324                 err = tg3_setup_copper_phy(tp, force_reset);
5325
5326         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5327                 u32 scale;
5328
5329                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5330                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5331                         scale = 65;
5332                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5333                         scale = 6;
5334                 else
5335                         scale = 12;
5336
5337                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5338                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5339                 tw32(GRC_MISC_CFG, val);
5340         }
5341
5342         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5343               (6 << TX_LENGTHS_IPG_SHIFT);
5344         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5345                 val |= tr32(MAC_TX_LENGTHS) &
5346                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5347                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5348
5349         if (tp->link_config.active_speed == SPEED_1000 &&
5350             tp->link_config.active_duplex == DUPLEX_HALF)
5351                 tw32(MAC_TX_LENGTHS, val |
5352                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5353         else
5354                 tw32(MAC_TX_LENGTHS, val |
5355                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5356
5357         if (!tg3_flag(tp, 5705_PLUS)) {
5358                 if (netif_carrier_ok(tp->dev)) {
5359                         tw32(HOSTCC_STAT_COAL_TICKS,
5360                              tp->coal.stats_block_coalesce_usecs);
5361                 } else {
5362                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5363                 }
5364         }
5365
5366         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5367                 val = tr32(PCIE_PWR_MGMT_THRESH);
5368                 if (!netif_carrier_ok(tp->dev))
5369                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5370                               tp->pwrmgmt_thresh;
5371                 else
5372                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5373                 tw32(PCIE_PWR_MGMT_THRESH, val);
5374         }
5375
5376         return err;
5377 }
5378
5379 static inline int tg3_irq_sync(struct tg3 *tp)
5380 {
5381         return tp->irq_sync;
5382 }
5383
5384 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5385 {
5386         int i;
5387
5388         dst = (u32 *)((u8 *)dst + off);
5389         for (i = 0; i < len; i += sizeof(u32))
5390                 *dst++ = tr32(off + i);
5391 }
5392
5393 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5394 {
5395         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5396         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5397         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5398         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5399         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5400         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5401         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5402         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5403         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5404         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5405         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5406         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5407         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5408         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5409         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5410         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5411         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5412         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5413         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5414
5415         if (tg3_flag(tp, SUPPORT_MSIX))
5416                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5417
5418         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5419         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5420         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5421         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5422         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5423         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5424         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5425         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5426
5427         if (!tg3_flag(tp, 5705_PLUS)) {
5428                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5429                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5430                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5431         }
5432
5433         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5434         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5435         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5436         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5437         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5438
5439         if (tg3_flag(tp, NVRAM))
5440                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5441 }
5442
5443 static void tg3_dump_state(struct tg3 *tp)
5444 {
5445         int i;
5446         u32 *regs;
5447
5448         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5449         if (!regs) {
5450                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5451                 return;
5452         }
5453
5454         if (tg3_flag(tp, PCI_EXPRESS)) {
5455                 /* Read up to but not including private PCI registers */
5456                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5457                         regs[i / sizeof(u32)] = tr32(i);
5458         } else
5459                 tg3_dump_legacy_regs(tp, regs);
5460
5461         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5462                 if (!regs[i + 0] && !regs[i + 1] &&
5463                     !regs[i + 2] && !regs[i + 3])
5464                         continue;
5465
5466                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5467                            i * 4,
5468                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5469         }
5470
5471         kfree(regs);
5472
5473         for (i = 0; i < tp->irq_cnt; i++) {
5474                 struct tg3_napi *tnapi = &tp->napi[i];
5475
5476                 /* SW status block */
5477                 netdev_err(tp->dev,
5478                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5479                            i,
5480                            tnapi->hw_status->status,
5481                            tnapi->hw_status->status_tag,
5482                            tnapi->hw_status->rx_jumbo_consumer,
5483                            tnapi->hw_status->rx_consumer,
5484                            tnapi->hw_status->rx_mini_consumer,
5485                            tnapi->hw_status->idx[0].rx_producer,
5486                            tnapi->hw_status->idx[0].tx_consumer);
5487
5488                 netdev_err(tp->dev,
5489                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5490                            i,
5491                            tnapi->last_tag, tnapi->last_irq_tag,
5492                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5493                            tnapi->rx_rcb_ptr,
5494                            tnapi->prodring.rx_std_prod_idx,
5495                            tnapi->prodring.rx_std_cons_idx,
5496                            tnapi->prodring.rx_jmb_prod_idx,
5497                            tnapi->prodring.rx_jmb_cons_idx);
5498         }
5499 }
5500
5501 /* This is called whenever we suspect that the system chipset is re-
5502  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5503  * is bogus tx completions. We try to recover by setting the
5504  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5505  * in the workqueue.
5506  */
5507 static void tg3_tx_recover(struct tg3 *tp)
5508 {
5509         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5510                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5511
5512         netdev_warn(tp->dev,
5513                     "The system may be re-ordering memory-mapped I/O "
5514                     "cycles to the network device, attempting to recover. "
5515                     "Please report the problem to the driver maintainer "
5516                     "and include system chipset information.\n");
5517
5518         spin_lock(&tp->lock);
5519         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5520         spin_unlock(&tp->lock);
5521 }
5522
5523 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5524 {
5525         /* Tell compiler to fetch tx indices from memory. */
5526         barrier();
5527         return tnapi->tx_pending -
5528                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5529 }
5530
5531 /* Tigon3 never reports partial packet sends.  So we do not
5532  * need special logic to handle SKBs that have not had all
5533  * of their frags sent yet, like SunGEM does.
5534  */
5535 static void tg3_tx(struct tg3_napi *tnapi)
5536 {
5537         struct tg3 *tp = tnapi->tp;
5538         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5539         u32 sw_idx = tnapi->tx_cons;
5540         struct netdev_queue *txq;
5541         int index = tnapi - tp->napi;
5542         unsigned int pkts_compl = 0, bytes_compl = 0;
5543
5544         if (tg3_flag(tp, ENABLE_TSS))
5545                 index--;
5546
5547         txq = netdev_get_tx_queue(tp->dev, index);
5548
5549         while (sw_idx != hw_idx) {
5550                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5551                 struct sk_buff *skb = ri->skb;
5552                 int i, tx_bug = 0;
5553
5554                 if (unlikely(skb == NULL)) {
5555                         tg3_tx_recover(tp);
5556                         return;
5557                 }
5558
5559                 pci_unmap_single(tp->pdev,
5560                                  dma_unmap_addr(ri, mapping),
5561                                  skb_headlen(skb),
5562                                  PCI_DMA_TODEVICE);
5563
5564                 ri->skb = NULL;
5565
5566                 while (ri->fragmented) {
5567                         ri->fragmented = false;
5568                         sw_idx = NEXT_TX(sw_idx);
5569                         ri = &tnapi->tx_buffers[sw_idx];
5570                 }
5571
5572                 sw_idx = NEXT_TX(sw_idx);
5573
5574                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5575                         ri = &tnapi->tx_buffers[sw_idx];
5576                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5577                                 tx_bug = 1;
5578
5579                         pci_unmap_page(tp->pdev,
5580                                        dma_unmap_addr(ri, mapping),
5581                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5582                                        PCI_DMA_TODEVICE);
5583
5584                         while (ri->fragmented) {
5585                                 ri->fragmented = false;
5586                                 sw_idx = NEXT_TX(sw_idx);
5587                                 ri = &tnapi->tx_buffers[sw_idx];
5588                         }
5589
5590                         sw_idx = NEXT_TX(sw_idx);
5591                 }
5592
5593                 pkts_compl++;
5594                 bytes_compl += skb->len;
5595
5596                 dev_kfree_skb(skb);
5597
5598                 if (unlikely(tx_bug)) {
5599                         tg3_tx_recover(tp);
5600                         return;
5601                 }
5602         }
5603
5604         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
5605
5606         tnapi->tx_cons = sw_idx;
5607
5608         /* Need to make the tx_cons update visible to tg3_start_xmit()
5609          * before checking for netif_queue_stopped().  Without the
5610          * memory barrier, there is a small possibility that tg3_start_xmit()
5611          * will miss it and cause the queue to be stopped forever.
5612          */
5613         smp_mb();
5614
5615         if (unlikely(netif_tx_queue_stopped(txq) &&
5616                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5617                 __netif_tx_lock(txq, smp_processor_id());
5618                 if (netif_tx_queue_stopped(txq) &&
5619                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5620                         netif_tx_wake_queue(txq);
5621                 __netif_tx_unlock(txq);
5622         }
5623 }
5624
5625 static void tg3_frag_free(bool is_frag, void *data)
5626 {
5627         if (is_frag)
5628                 put_page(virt_to_head_page(data));
5629         else
5630                 kfree(data);
5631 }
5632
5633 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5634 {
5635         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
5636                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5637
5638         if (!ri->data)
5639                 return;
5640
5641         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5642                          map_sz, PCI_DMA_FROMDEVICE);
5643         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
5644         ri->data = NULL;
5645 }
5646
5647
5648 /* Returns size of skb allocated or < 0 on error.
5649  *
5650  * We only need to fill in the address because the other members
5651  * of the RX descriptor are invariant, see tg3_init_rings.
5652  *
5653  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5654  * posting buffers we only dirty the first cache line of the RX
5655  * descriptor (containing the address).  Whereas for the RX status
5656  * buffers the cpu only reads the last cacheline of the RX descriptor
5657  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5658  */
5659 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5660                              u32 opaque_key, u32 dest_idx_unmasked,
5661                              unsigned int *frag_size)
5662 {
5663         struct tg3_rx_buffer_desc *desc;
5664         struct ring_info *map;
5665         u8 *data;
5666         dma_addr_t mapping;
5667         int skb_size, data_size, dest_idx;
5668
5669         switch (opaque_key) {
5670         case RXD_OPAQUE_RING_STD:
5671                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5672                 desc = &tpr->rx_std[dest_idx];
5673                 map = &tpr->rx_std_buffers[dest_idx];
5674                 data_size = tp->rx_pkt_map_sz;
5675                 break;
5676
5677         case RXD_OPAQUE_RING_JUMBO:
5678                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5679                 desc = &tpr->rx_jmb[dest_idx].std;
5680                 map = &tpr->rx_jmb_buffers[dest_idx];
5681                 data_size = TG3_RX_JMB_MAP_SZ;
5682                 break;
5683
5684         default:
5685                 return -EINVAL;
5686         }
5687
5688         /* Do not overwrite any of the map or rp information
5689          * until we are sure we can commit to a new buffer.
5690          *
5691          * Callers depend upon this behavior and assume that
5692          * we leave everything unchanged if we fail.
5693          */
5694         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5695                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5696         if (skb_size <= PAGE_SIZE) {
5697                 data = netdev_alloc_frag(skb_size);
5698                 *frag_size = skb_size;
5699         } else {
5700                 data = kmalloc(skb_size, GFP_ATOMIC);
5701                 *frag_size = 0;
5702         }
5703         if (!data)
5704                 return -ENOMEM;
5705
5706         mapping = pci_map_single(tp->pdev,
5707                                  data + TG3_RX_OFFSET(tp),
5708                                  data_size,
5709                                  PCI_DMA_FROMDEVICE);
5710         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
5711                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
5712                 return -EIO;
5713         }
5714
5715         map->data = data;
5716         dma_unmap_addr_set(map, mapping, mapping);
5717
5718         desc->addr_hi = ((u64)mapping >> 32);
5719         desc->addr_lo = ((u64)mapping & 0xffffffff);
5720
5721         return data_size;
5722 }
5723
5724 /* We only need to move over in the address because the other
5725  * members of the RX descriptor are invariant.  See notes above
5726  * tg3_alloc_rx_data for full details.
5727  */
5728 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5729                            struct tg3_rx_prodring_set *dpr,
5730                            u32 opaque_key, int src_idx,
5731                            u32 dest_idx_unmasked)
5732 {
5733         struct tg3 *tp = tnapi->tp;
5734         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5735         struct ring_info *src_map, *dest_map;
5736         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5737         int dest_idx;
5738
5739         switch (opaque_key) {
5740         case RXD_OPAQUE_RING_STD:
5741                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5742                 dest_desc = &dpr->rx_std[dest_idx];
5743                 dest_map = &dpr->rx_std_buffers[dest_idx];
5744                 src_desc = &spr->rx_std[src_idx];
5745                 src_map = &spr->rx_std_buffers[src_idx];
5746                 break;
5747
5748         case RXD_OPAQUE_RING_JUMBO:
5749                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5750                 dest_desc = &dpr->rx_jmb[dest_idx].std;
5751                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5752                 src_desc = &spr->rx_jmb[src_idx].std;
5753                 src_map = &spr->rx_jmb_buffers[src_idx];
5754                 break;
5755
5756         default:
5757                 return;
5758         }
5759
5760         dest_map->data = src_map->data;
5761         dma_unmap_addr_set(dest_map, mapping,
5762                            dma_unmap_addr(src_map, mapping));
5763         dest_desc->addr_hi = src_desc->addr_hi;
5764         dest_desc->addr_lo = src_desc->addr_lo;
5765
5766         /* Ensure that the update to the skb happens after the physical
5767          * addresses have been transferred to the new BD location.
5768          */
5769         smp_wmb();
5770
5771         src_map->data = NULL;
5772 }
5773
5774 /* The RX ring scheme is composed of multiple rings which post fresh
5775  * buffers to the chip, and one special ring the chip uses to report
5776  * status back to the host.
5777  *
5778  * The special ring reports the status of received packets to the
5779  * host.  The chip does not write into the original descriptor the
5780  * RX buffer was obtained from.  The chip simply takes the original
5781  * descriptor as provided by the host, updates the status and length
5782  * field, then writes this into the next status ring entry.
5783  *
5784  * Each ring the host uses to post buffers to the chip is described
5785  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5786  * it is first placed into the on-chip ram.  When the packet's length
5787  * is known, it walks down the TG3_BDINFO entries to select the ring.
5788  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5789  * which is within the range of the new packet's length is chosen.
5790  *
5791  * The "separate ring for rx status" scheme may sound queer, but it makes
5792  * sense from a cache coherency perspective.  If only the host writes
5793  * to the buffer post rings, and only the chip writes to the rx status
5794  * rings, then cache lines never move beyond shared-modified state.
5795  * If both the host and chip were to write into the same ring, cache line
5796  * eviction could occur since both entities want it in an exclusive state.
5797  */
5798 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5799 {
5800         struct tg3 *tp = tnapi->tp;
5801         u32 work_mask, rx_std_posted = 0;
5802         u32 std_prod_idx, jmb_prod_idx;
5803         u32 sw_idx = tnapi->rx_rcb_ptr;
5804         u16 hw_idx;
5805         int received;
5806         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5807
5808         hw_idx = *(tnapi->rx_rcb_prod_idx);
5809         /*
5810          * We need to order the read of hw_idx and the read of
5811          * the opaque cookie.
5812          */
5813         rmb();
5814         work_mask = 0;
5815         received = 0;
5816         std_prod_idx = tpr->rx_std_prod_idx;
5817         jmb_prod_idx = tpr->rx_jmb_prod_idx;
5818         while (sw_idx != hw_idx && budget > 0) {
5819                 struct ring_info *ri;
5820                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5821                 unsigned int len;
5822                 struct sk_buff *skb;
5823                 dma_addr_t dma_addr;
5824                 u32 opaque_key, desc_idx, *post_ptr;
5825                 u8 *data;
5826
5827                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5828                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5829                 if (opaque_key == RXD_OPAQUE_RING_STD) {
5830                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5831                         dma_addr = dma_unmap_addr(ri, mapping);
5832                         data = ri->data;
5833                         post_ptr = &std_prod_idx;
5834                         rx_std_posted++;
5835                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5836                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5837                         dma_addr = dma_unmap_addr(ri, mapping);
5838                         data = ri->data;
5839                         post_ptr = &jmb_prod_idx;
5840                 } else
5841                         goto next_pkt_nopost;
5842
5843                 work_mask |= opaque_key;
5844
5845                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5846                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5847                 drop_it:
5848                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5849                                        desc_idx, *post_ptr);
5850                 drop_it_no_recycle:
5851                         /* Other statistics kept track of by card. */
5852                         tp->rx_dropped++;
5853                         goto next_pkt;
5854                 }
5855
5856                 prefetch(data + TG3_RX_OFFSET(tp));
5857                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5858                       ETH_FCS_LEN;
5859
5860                 if (len > TG3_RX_COPY_THRESH(tp)) {
5861                         int skb_size;
5862                         unsigned int frag_size;
5863
5864                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5865                                                     *post_ptr, &frag_size);
5866                         if (skb_size < 0)
5867                                 goto drop_it;
5868
5869                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
5870                                          PCI_DMA_FROMDEVICE);
5871
5872                         skb = build_skb(data, frag_size);
5873                         if (!skb) {
5874                                 tg3_frag_free(frag_size != 0, data);
5875                                 goto drop_it_no_recycle;
5876                         }
5877                         skb_reserve(skb, TG3_RX_OFFSET(tp));
5878                         /* Ensure that the update to the data happens
5879                          * after the usage of the old DMA mapping.
5880                          */
5881                         smp_wmb();
5882
5883                         ri->data = NULL;
5884
5885                 } else {
5886                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5887                                        desc_idx, *post_ptr);
5888
5889                         skb = netdev_alloc_skb(tp->dev,
5890                                                len + TG3_RAW_IP_ALIGN);
5891                         if (skb == NULL)
5892                                 goto drop_it_no_recycle;
5893
5894                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
5895                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5896                         memcpy(skb->data,
5897                                data + TG3_RX_OFFSET(tp),
5898                                len);
5899                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5900                 }
5901
5902                 skb_put(skb, len);
5903                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5904                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5905                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5906                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
5907                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5908                 else
5909                         skb_checksum_none_assert(skb);
5910
5911                 skb->protocol = eth_type_trans(skb, tp->dev);
5912
5913                 if (len > (tp->dev->mtu + ETH_HLEN) &&
5914                     skb->protocol != htons(ETH_P_8021Q)) {
5915                         dev_kfree_skb(skb);
5916                         goto drop_it_no_recycle;
5917                 }
5918
5919                 if (desc->type_flags & RXD_FLAG_VLAN &&
5920                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5921                         __vlan_hwaccel_put_tag(skb,
5922                                                desc->err_vlan & RXD_VLAN_MASK);
5923
5924                 napi_gro_receive(&tnapi->napi, skb);
5925
5926                 received++;
5927                 budget--;
5928
5929 next_pkt:
5930                 (*post_ptr)++;
5931
5932                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5933                         tpr->rx_std_prod_idx = std_prod_idx &
5934                                                tp->rx_std_ring_mask;
5935                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5936                                      tpr->rx_std_prod_idx);
5937                         work_mask &= ~RXD_OPAQUE_RING_STD;
5938                         rx_std_posted = 0;
5939                 }
5940 next_pkt_nopost:
5941                 sw_idx++;
5942                 sw_idx &= tp->rx_ret_ring_mask;
5943
5944                 /* Refresh hw_idx to see if there is new work */
5945                 if (sw_idx == hw_idx) {
5946                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5947                         rmb();
5948                 }
5949         }
5950
5951         /* ACK the status ring. */
5952         tnapi->rx_rcb_ptr = sw_idx;
5953         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5954
5955         /* Refill RX ring(s). */
5956         if (!tg3_flag(tp, ENABLE_RSS)) {
5957                 /* Sync BD data before updating mailbox */
5958                 wmb();
5959
5960                 if (work_mask & RXD_OPAQUE_RING_STD) {
5961                         tpr->rx_std_prod_idx = std_prod_idx &
5962                                                tp->rx_std_ring_mask;
5963                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5964                                      tpr->rx_std_prod_idx);
5965                 }
5966                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5967                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5968                                                tp->rx_jmb_ring_mask;
5969                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5970                                      tpr->rx_jmb_prod_idx);
5971                 }
5972                 mmiowb();
5973         } else if (work_mask) {
5974                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5975                  * updated before the producer indices can be updated.
5976                  */
5977                 smp_wmb();
5978
5979                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5980                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5981
5982                 if (tnapi != &tp->napi[1]) {
5983                         tp->rx_refill = true;
5984                         napi_schedule(&tp->napi[1].napi);
5985                 }
5986         }
5987
5988         return received;
5989 }
5990
5991 static void tg3_poll_link(struct tg3 *tp)
5992 {
5993         /* handle link change and other phy events */
5994         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5995                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5996
5997                 if (sblk->status & SD_STATUS_LINK_CHG) {
5998                         sblk->status = SD_STATUS_UPDATED |
5999                                        (sblk->status & ~SD_STATUS_LINK_CHG);
6000                         spin_lock(&tp->lock);
6001                         if (tg3_flag(tp, USE_PHYLIB)) {
6002                                 tw32_f(MAC_STATUS,
6003                                      (MAC_STATUS_SYNC_CHANGED |
6004                                       MAC_STATUS_CFG_CHANGED |
6005                                       MAC_STATUS_MI_COMPLETION |
6006                                       MAC_STATUS_LNKSTATE_CHANGED));
6007                                 udelay(40);
6008                         } else
6009                                 tg3_setup_phy(tp, 0);
6010                         spin_unlock(&tp->lock);
6011                 }
6012         }
6013 }
6014
6015 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6016                                 struct tg3_rx_prodring_set *dpr,
6017                                 struct tg3_rx_prodring_set *spr)
6018 {
6019         u32 si, di, cpycnt, src_prod_idx;
6020         int i, err = 0;
6021
6022         while (1) {
6023                 src_prod_idx = spr->rx_std_prod_idx;
6024
6025                 /* Make sure updates to the rx_std_buffers[] entries and the
6026                  * standard producer index are seen in the correct order.
6027                  */
6028                 smp_rmb();
6029
6030                 if (spr->rx_std_cons_idx == src_prod_idx)
6031                         break;
6032
6033                 if (spr->rx_std_cons_idx < src_prod_idx)
6034                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6035                 else
6036                         cpycnt = tp->rx_std_ring_mask + 1 -
6037                                  spr->rx_std_cons_idx;
6038
6039                 cpycnt = min(cpycnt,
6040                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6041
6042                 si = spr->rx_std_cons_idx;
6043                 di = dpr->rx_std_prod_idx;
6044
6045                 for (i = di; i < di + cpycnt; i++) {
6046                         if (dpr->rx_std_buffers[i].data) {
6047                                 cpycnt = i - di;
6048                                 err = -ENOSPC;
6049                                 break;
6050                         }
6051                 }
6052
6053                 if (!cpycnt)
6054                         break;
6055
6056                 /* Ensure that updates to the rx_std_buffers ring and the
6057                  * shadowed hardware producer ring from tg3_recycle_skb() are
6058                  * ordered correctly WRT the skb check above.
6059                  */
6060                 smp_rmb();
6061
6062                 memcpy(&dpr->rx_std_buffers[di],
6063                        &spr->rx_std_buffers[si],
6064                        cpycnt * sizeof(struct ring_info));
6065
6066                 for (i = 0; i < cpycnt; i++, di++, si++) {
6067                         struct tg3_rx_buffer_desc *sbd, *dbd;
6068                         sbd = &spr->rx_std[si];
6069                         dbd = &dpr->rx_std[di];
6070                         dbd->addr_hi = sbd->addr_hi;
6071                         dbd->addr_lo = sbd->addr_lo;
6072                 }
6073
6074                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6075                                        tp->rx_std_ring_mask;
6076                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6077                                        tp->rx_std_ring_mask;
6078         }
6079
6080         while (1) {
6081                 src_prod_idx = spr->rx_jmb_prod_idx;
6082
6083                 /* Make sure updates to the rx_jmb_buffers[] entries and
6084                  * the jumbo producer index are seen in the correct order.
6085                  */
6086                 smp_rmb();
6087
6088                 if (spr->rx_jmb_cons_idx == src_prod_idx)
6089                         break;
6090
6091                 if (spr->rx_jmb_cons_idx < src_prod_idx)
6092                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6093                 else
6094                         cpycnt = tp->rx_jmb_ring_mask + 1 -
6095                                  spr->rx_jmb_cons_idx;
6096
6097                 cpycnt = min(cpycnt,
6098                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6099
6100                 si = spr->rx_jmb_cons_idx;
6101                 di = dpr->rx_jmb_prod_idx;
6102
6103                 for (i = di; i < di + cpycnt; i++) {
6104                         if (dpr->rx_jmb_buffers[i].data) {
6105                                 cpycnt = i - di;
6106                                 err = -ENOSPC;
6107                                 break;
6108                         }
6109                 }
6110
6111                 if (!cpycnt)
6112                         break;
6113
6114                 /* Ensure that updates to the rx_jmb_buffers ring and the
6115                  * shadowed hardware producer ring from tg3_recycle_skb() are
6116                  * ordered correctly WRT the skb check above.
6117                  */
6118                 smp_rmb();
6119
6120                 memcpy(&dpr->rx_jmb_buffers[di],
6121                        &spr->rx_jmb_buffers[si],
6122                        cpycnt * sizeof(struct ring_info));
6123
6124                 for (i = 0; i < cpycnt; i++, di++, si++) {
6125                         struct tg3_rx_buffer_desc *sbd, *dbd;
6126                         sbd = &spr->rx_jmb[si].std;
6127                         dbd = &dpr->rx_jmb[di].std;
6128                         dbd->addr_hi = sbd->addr_hi;
6129                         dbd->addr_lo = sbd->addr_lo;
6130                 }
6131
6132                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6133                                        tp->rx_jmb_ring_mask;
6134                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6135                                        tp->rx_jmb_ring_mask;
6136         }
6137
6138         return err;
6139 }
6140
6141 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6142 {
6143         struct tg3 *tp = tnapi->tp;
6144
6145         /* run TX completion thread */
6146         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6147                 tg3_tx(tnapi);
6148                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6149                         return work_done;
6150         }
6151
6152         if (!tnapi->rx_rcb_prod_idx)
6153                 return work_done;
6154
6155         /* run RX thread, within the bounds set by NAPI.
6156          * All RX "locking" is done by ensuring outside
6157          * code synchronizes with tg3->napi.poll()
6158          */
6159         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6160                 work_done += tg3_rx(tnapi, budget - work_done);
6161
6162         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6163                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6164                 int i, err = 0;
6165                 u32 std_prod_idx = dpr->rx_std_prod_idx;
6166                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6167
6168                 tp->rx_refill = false;
6169                 for (i = 1; i < tp->irq_cnt; i++)
6170                         err |= tg3_rx_prodring_xfer(tp, dpr,
6171                                                     &tp->napi[i].prodring);
6172
6173                 wmb();
6174
6175                 if (std_prod_idx != dpr->rx_std_prod_idx)
6176                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6177                                      dpr->rx_std_prod_idx);
6178
6179                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6180                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6181                                      dpr->rx_jmb_prod_idx);
6182
6183                 mmiowb();
6184
6185                 if (err)
6186                         tw32_f(HOSTCC_MODE, tp->coal_now);
6187         }
6188
6189         return work_done;
6190 }
6191
6192 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6193 {
6194         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6195                 schedule_work(&tp->reset_task);
6196 }
6197
6198 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6199 {
6200         cancel_work_sync(&tp->reset_task);
6201         tg3_flag_clear(tp, RESET_TASK_PENDING);
6202         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6203 }
6204
6205 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6206 {
6207         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6208         struct tg3 *tp = tnapi->tp;
6209         int work_done = 0;
6210         struct tg3_hw_status *sblk = tnapi->hw_status;
6211
6212         while (1) {
6213                 work_done = tg3_poll_work(tnapi, work_done, budget);
6214
6215                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6216                         goto tx_recovery;
6217
6218                 if (unlikely(work_done >= budget))
6219                         break;
6220
6221                 /* tp->last_tag is used in tg3_int_reenable() below
6222                  * to tell the hw how much work has been processed,
6223                  * so we must read it before checking for more work.
6224                  */
6225                 tnapi->last_tag = sblk->status_tag;
6226                 tnapi->last_irq_tag = tnapi->last_tag;
6227                 rmb();
6228
6229                 /* check for RX/TX work to do */
6230                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6231                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6232
6233                         /* This test here is not race free, but will reduce
6234                          * the number of interrupts by looping again.
6235                          */
6236                         if (tnapi == &tp->napi[1] && tp->rx_refill)
6237                                 continue;
6238
6239                         napi_complete(napi);
6240                         /* Reenable interrupts. */
6241                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6242
6243                         /* This test here is synchronized by napi_schedule()
6244                          * and napi_complete() to close the race condition.
6245                          */
6246                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6247                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
6248                                                   HOSTCC_MODE_ENABLE |
6249                                                   tnapi->coal_now);
6250                         }
6251                         mmiowb();
6252                         break;
6253                 }
6254         }
6255
6256         return work_done;
6257
6258 tx_recovery:
6259         /* work_done is guaranteed to be less than budget. */
6260         napi_complete(napi);
6261         tg3_reset_task_schedule(tp);
6262         return work_done;
6263 }
6264
6265 static void tg3_process_error(struct tg3 *tp)
6266 {
6267         u32 val;
6268         bool real_error = false;
6269
6270         if (tg3_flag(tp, ERROR_PROCESSED))
6271                 return;
6272
6273         /* Check Flow Attention register */
6274         val = tr32(HOSTCC_FLOW_ATTN);
6275         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6276                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6277                 real_error = true;
6278         }
6279
6280         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6281                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6282                 real_error = true;
6283         }
6284
6285         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6286                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6287                 real_error = true;
6288         }
6289
6290         if (!real_error)
6291                 return;
6292
6293         tg3_dump_state(tp);
6294
6295         tg3_flag_set(tp, ERROR_PROCESSED);
6296         tg3_reset_task_schedule(tp);
6297 }
6298
6299 static int tg3_poll(struct napi_struct *napi, int budget)
6300 {
6301         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6302         struct tg3 *tp = tnapi->tp;
6303         int work_done = 0;
6304         struct tg3_hw_status *sblk = tnapi->hw_status;
6305
6306         while (1) {
6307                 if (sblk->status & SD_STATUS_ERROR)
6308                         tg3_process_error(tp);
6309
6310                 tg3_poll_link(tp);
6311
6312                 work_done = tg3_poll_work(tnapi, work_done, budget);
6313
6314                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6315                         goto tx_recovery;
6316
6317                 if (unlikely(work_done >= budget))
6318                         break;
6319
6320                 if (tg3_flag(tp, TAGGED_STATUS)) {
6321                         /* tp->last_tag is used in tg3_int_reenable() below
6322                          * to tell the hw how much work has been processed,
6323                          * so we must read it before checking for more work.
6324                          */
6325                         tnapi->last_tag = sblk->status_tag;
6326                         tnapi->last_irq_tag = tnapi->last_tag;
6327                         rmb();
6328                 } else
6329                         sblk->status &= ~SD_STATUS_UPDATED;
6330
6331                 if (likely(!tg3_has_work(tnapi))) {
6332                         napi_complete(napi);
6333                         tg3_int_reenable(tnapi);
6334                         break;
6335                 }
6336         }
6337
6338         return work_done;
6339
6340 tx_recovery:
6341         /* work_done is guaranteed to be less than budget. */
6342         napi_complete(napi);
6343         tg3_reset_task_schedule(tp);
6344         return work_done;
6345 }
6346
6347 static void tg3_napi_disable(struct tg3 *tp)
6348 {
6349         int i;
6350
6351         for (i = tp->irq_cnt - 1; i >= 0; i--)
6352                 napi_disable(&tp->napi[i].napi);
6353 }
6354
6355 static void tg3_napi_enable(struct tg3 *tp)
6356 {
6357         int i;
6358
6359         for (i = 0; i < tp->irq_cnt; i++)
6360                 napi_enable(&tp->napi[i].napi);
6361 }
6362
6363 static void tg3_napi_init(struct tg3 *tp)
6364 {
6365         int i;
6366
6367         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6368         for (i = 1; i < tp->irq_cnt; i++)
6369                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6370 }
6371
6372 static void tg3_napi_fini(struct tg3 *tp)
6373 {
6374         int i;
6375
6376         for (i = 0; i < tp->irq_cnt; i++)
6377                 netif_napi_del(&tp->napi[i].napi);
6378 }
6379
6380 static inline void tg3_netif_stop(struct tg3 *tp)
6381 {
6382         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6383         tg3_napi_disable(tp);
6384         netif_tx_disable(tp->dev);
6385 }
6386
6387 static inline void tg3_netif_start(struct tg3 *tp)
6388 {
6389         /* NOTE: unconditional netif_tx_wake_all_queues is only
6390          * appropriate so long as all callers are assured to
6391          * have free tx slots (such as after tg3_init_hw)
6392          */
6393         netif_tx_wake_all_queues(tp->dev);
6394
6395         tg3_napi_enable(tp);
6396         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6397         tg3_enable_ints(tp);
6398 }
6399
6400 static void tg3_irq_quiesce(struct tg3 *tp)
6401 {
6402         int i;
6403
6404         BUG_ON(tp->irq_sync);
6405
6406         tp->irq_sync = 1;
6407         smp_mb();
6408
6409         for (i = 0; i < tp->irq_cnt; i++)
6410                 synchronize_irq(tp->napi[i].irq_vec);
6411 }
6412
6413 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6414  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6415  * with as well.  Most of the time, this is not necessary except when
6416  * shutting down the device.
6417  */
6418 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6419 {
6420         spin_lock_bh(&tp->lock);
6421         if (irq_sync)
6422                 tg3_irq_quiesce(tp);
6423 }
6424
6425 static inline void tg3_full_unlock(struct tg3 *tp)
6426 {
6427         spin_unlock_bh(&tp->lock);
6428 }
6429
6430 /* One-shot MSI handler - Chip automatically disables interrupt
6431  * after sending MSI so driver doesn't have to do it.
6432  */
6433 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6434 {
6435         struct tg3_napi *tnapi = dev_id;
6436         struct tg3 *tp = tnapi->tp;
6437
6438         prefetch(tnapi->hw_status);
6439         if (tnapi->rx_rcb)
6440                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6441
6442         if (likely(!tg3_irq_sync(tp)))
6443                 napi_schedule(&tnapi->napi);
6444
6445         return IRQ_HANDLED;
6446 }
6447
6448 /* MSI ISR - No need to check for interrupt sharing and no need to
6449  * flush status block and interrupt mailbox. PCI ordering rules
6450  * guarantee that MSI will arrive after the status block.
6451  */
6452 static irqreturn_t tg3_msi(int irq, void *dev_id)
6453 {
6454         struct tg3_napi *tnapi = dev_id;
6455         struct tg3 *tp = tnapi->tp;
6456
6457         prefetch(tnapi->hw_status);
6458         if (tnapi->rx_rcb)
6459                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6460         /*
6461          * Writing any value to intr-mbox-0 clears PCI INTA# and
6462          * chip-internal interrupt pending events.
6463          * Writing non-zero to intr-mbox-0 additional tells the
6464          * NIC to stop sending us irqs, engaging "in-intr-handler"
6465          * event coalescing.
6466          */
6467         tw32_mailbox(tnapi->int_mbox, 0x00000001);
6468         if (likely(!tg3_irq_sync(tp)))
6469                 napi_schedule(&tnapi->napi);
6470
6471         return IRQ_RETVAL(1);
6472 }
6473
6474 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6475 {
6476         struct tg3_napi *tnapi = dev_id;
6477         struct tg3 *tp = tnapi->tp;
6478         struct tg3_hw_status *sblk = tnapi->hw_status;
6479         unsigned int handled = 1;
6480
6481         /* In INTx mode, it is possible for the interrupt to arrive at
6482          * the CPU before the status block posted prior to the interrupt.
6483          * Reading the PCI State register will confirm whether the
6484          * interrupt is ours and will flush the status block.
6485          */
6486         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6487                 if (tg3_flag(tp, CHIP_RESETTING) ||
6488                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6489                         handled = 0;
6490                         goto out;
6491                 }
6492         }
6493
6494         /*
6495          * Writing any value to intr-mbox-0 clears PCI INTA# and
6496          * chip-internal interrupt pending events.
6497          * Writing non-zero to intr-mbox-0 additional tells the
6498          * NIC to stop sending us irqs, engaging "in-intr-handler"
6499          * event coalescing.
6500          *
6501          * Flush the mailbox to de-assert the IRQ immediately to prevent
6502          * spurious interrupts.  The flush impacts performance but
6503          * excessive spurious interrupts can be worse in some cases.
6504          */
6505         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6506         if (tg3_irq_sync(tp))
6507                 goto out;
6508         sblk->status &= ~SD_STATUS_UPDATED;
6509         if (likely(tg3_has_work(tnapi))) {
6510                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6511                 napi_schedule(&tnapi->napi);
6512         } else {
6513                 /* No work, shared interrupt perhaps?  re-enable
6514                  * interrupts, and flush that PCI write
6515                  */
6516                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6517                                0x00000000);
6518         }
6519 out:
6520         return IRQ_RETVAL(handled);
6521 }
6522
6523 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6524 {
6525         struct tg3_napi *tnapi = dev_id;
6526         struct tg3 *tp = tnapi->tp;
6527         struct tg3_hw_status *sblk = tnapi->hw_status;
6528         unsigned int handled = 1;
6529
6530         /* In INTx mode, it is possible for the interrupt to arrive at
6531          * the CPU before the status block posted prior to the interrupt.
6532          * Reading the PCI State register will confirm whether the
6533          * interrupt is ours and will flush the status block.
6534          */
6535         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6536                 if (tg3_flag(tp, CHIP_RESETTING) ||
6537                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6538                         handled = 0;
6539                         goto out;
6540                 }
6541         }
6542
6543         /*
6544          * writing any value to intr-mbox-0 clears PCI INTA# and
6545          * chip-internal interrupt pending events.
6546          * writing non-zero to intr-mbox-0 additional tells the
6547          * NIC to stop sending us irqs, engaging "in-intr-handler"
6548          * event coalescing.
6549          *
6550          * Flush the mailbox to de-assert the IRQ immediately to prevent
6551          * spurious interrupts.  The flush impacts performance but
6552          * excessive spurious interrupts can be worse in some cases.
6553          */
6554         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6555
6556         /*
6557          * In a shared interrupt configuration, sometimes other devices'
6558          * interrupts will scream.  We record the current status tag here
6559          * so that the above check can report that the screaming interrupts
6560          * are unhandled.  Eventually they will be silenced.
6561          */
6562         tnapi->last_irq_tag = sblk->status_tag;
6563
6564         if (tg3_irq_sync(tp))
6565                 goto out;
6566
6567         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6568
6569         napi_schedule(&tnapi->napi);
6570
6571 out:
6572         return IRQ_RETVAL(handled);
6573 }
6574
6575 /* ISR for interrupt test */
6576 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6577 {
6578         struct tg3_napi *tnapi = dev_id;
6579         struct tg3 *tp = tnapi->tp;
6580         struct tg3_hw_status *sblk = tnapi->hw_status;
6581
6582         if ((sblk->status & SD_STATUS_UPDATED) ||
6583             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6584                 tg3_disable_ints(tp);
6585                 return IRQ_RETVAL(1);
6586         }
6587         return IRQ_RETVAL(0);
6588 }
6589
6590 #ifdef CONFIG_NET_POLL_CONTROLLER
6591 static void tg3_poll_controller(struct net_device *dev)
6592 {
6593         int i;
6594         struct tg3 *tp = netdev_priv(dev);
6595
6596         for (i = 0; i < tp->irq_cnt; i++)
6597                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6598 }
6599 #endif
6600
6601 static void tg3_tx_timeout(struct net_device *dev)
6602 {
6603         struct tg3 *tp = netdev_priv(dev);
6604
6605         if (netif_msg_tx_err(tp)) {
6606                 netdev_err(dev, "transmit timed out, resetting\n");
6607                 tg3_dump_state(tp);
6608         }
6609
6610         tg3_reset_task_schedule(tp);
6611 }
6612
6613 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6614 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6615 {
6616         u32 base = (u32) mapping & 0xffffffff;
6617
6618         return (base > 0xffffdcc0) && (base + len + 8 < base);
6619 }
6620
6621 /* Test for DMA addresses > 40-bit */
6622 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6623                                           int len)
6624 {
6625 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6626         if (tg3_flag(tp, 40BIT_DMA_BUG))
6627                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6628         return 0;
6629 #else
6630         return 0;
6631 #endif
6632 }
6633
6634 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6635                                  dma_addr_t mapping, u32 len, u32 flags,
6636                                  u32 mss, u32 vlan)
6637 {
6638         txbd->addr_hi = ((u64) mapping >> 32);
6639         txbd->addr_lo = ((u64) mapping & 0xffffffff);
6640         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6641         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6642 }
6643
6644 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6645                             dma_addr_t map, u32 len, u32 flags,
6646                             u32 mss, u32 vlan)
6647 {
6648         struct tg3 *tp = tnapi->tp;
6649         bool hwbug = false;
6650
6651         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6652                 hwbug = true;
6653
6654         if (tg3_4g_overflow_test(map, len))
6655                 hwbug = true;
6656
6657         if (tg3_40bit_overflow_test(tp, map, len))
6658                 hwbug = true;
6659
6660         if (tp->dma_limit) {
6661                 u32 prvidx = *entry;
6662                 u32 tmp_flag = flags & ~TXD_FLAG_END;
6663                 while (len > tp->dma_limit && *budget) {
6664                         u32 frag_len = tp->dma_limit;
6665                         len -= tp->dma_limit;
6666
6667                         /* Avoid the 8byte DMA problem */
6668                         if (len <= 8) {
6669                                 len += tp->dma_limit / 2;
6670                                 frag_len = tp->dma_limit / 2;
6671                         }
6672
6673                         tnapi->tx_buffers[*entry].fragmented = true;
6674
6675                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6676                                       frag_len, tmp_flag, mss, vlan);
6677                         *budget -= 1;
6678                         prvidx = *entry;
6679                         *entry = NEXT_TX(*entry);
6680
6681                         map += frag_len;
6682                 }
6683
6684                 if (len) {
6685                         if (*budget) {
6686                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6687                                               len, flags, mss, vlan);
6688                                 *budget -= 1;
6689                                 *entry = NEXT_TX(*entry);
6690                         } else {
6691                                 hwbug = true;
6692                                 tnapi->tx_buffers[prvidx].fragmented = false;
6693                         }
6694                 }
6695         } else {
6696                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6697                               len, flags, mss, vlan);
6698                 *entry = NEXT_TX(*entry);
6699         }
6700
6701         return hwbug;
6702 }
6703
6704 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6705 {
6706         int i;
6707         struct sk_buff *skb;
6708         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6709
6710         skb = txb->skb;
6711         txb->skb = NULL;
6712
6713         pci_unmap_single(tnapi->tp->pdev,
6714                          dma_unmap_addr(txb, mapping),
6715                          skb_headlen(skb),
6716                          PCI_DMA_TODEVICE);
6717
6718         while (txb->fragmented) {
6719                 txb->fragmented = false;
6720                 entry = NEXT_TX(entry);
6721                 txb = &tnapi->tx_buffers[entry];
6722         }
6723
6724         for (i = 0; i <= last; i++) {
6725                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6726
6727                 entry = NEXT_TX(entry);
6728                 txb = &tnapi->tx_buffers[entry];
6729
6730                 pci_unmap_page(tnapi->tp->pdev,
6731                                dma_unmap_addr(txb, mapping),
6732                                skb_frag_size(frag), PCI_DMA_TODEVICE);
6733
6734                 while (txb->fragmented) {
6735                         txb->fragmented = false;
6736                         entry = NEXT_TX(entry);
6737                         txb = &tnapi->tx_buffers[entry];
6738                 }
6739         }
6740 }
6741
6742 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6743 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6744                                        struct sk_buff **pskb,
6745                                        u32 *entry, u32 *budget,
6746                                        u32 base_flags, u32 mss, u32 vlan)
6747 {
6748         struct tg3 *tp = tnapi->tp;
6749         struct sk_buff *new_skb, *skb = *pskb;
6750         dma_addr_t new_addr = 0;
6751         int ret = 0;
6752
6753         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6754                 new_skb = skb_copy(skb, GFP_ATOMIC);
6755         else {
6756                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6757
6758                 new_skb = skb_copy_expand(skb,
6759                                           skb_headroom(skb) + more_headroom,
6760                                           skb_tailroom(skb), GFP_ATOMIC);
6761         }
6762
6763         if (!new_skb) {
6764                 ret = -1;
6765         } else {
6766                 /* New SKB is guaranteed to be linear. */
6767                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6768                                           PCI_DMA_TODEVICE);
6769                 /* Make sure the mapping succeeded */
6770                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6771                         dev_kfree_skb(new_skb);
6772                         ret = -1;
6773                 } else {
6774                         u32 save_entry = *entry;
6775
6776                         base_flags |= TXD_FLAG_END;
6777
6778                         tnapi->tx_buffers[*entry].skb = new_skb;
6779                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6780                                            mapping, new_addr);
6781
6782                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6783                                             new_skb->len, base_flags,
6784                                             mss, vlan)) {
6785                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6786                                 dev_kfree_skb(new_skb);
6787                                 ret = -1;
6788                         }
6789                 }
6790         }
6791
6792         dev_kfree_skb(skb);
6793         *pskb = new_skb;
6794         return ret;
6795 }
6796
6797 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6798
6799 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6800  * TSO header is greater than 80 bytes.
6801  */
6802 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6803 {
6804         struct sk_buff *segs, *nskb;
6805         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6806
6807         /* Estimate the number of fragments in the worst case */
6808         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6809                 netif_stop_queue(tp->dev);
6810
6811                 /* netif_tx_stop_queue() must be done before checking
6812                  * checking tx index in tg3_tx_avail() below, because in
6813                  * tg3_tx(), we update tx index before checking for
6814                  * netif_tx_queue_stopped().
6815                  */
6816                 smp_mb();
6817                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6818                         return NETDEV_TX_BUSY;
6819
6820                 netif_wake_queue(tp->dev);
6821         }
6822
6823         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6824         if (IS_ERR(segs))
6825                 goto tg3_tso_bug_end;
6826
6827         do {
6828                 nskb = segs;
6829                 segs = segs->next;
6830                 nskb->next = NULL;
6831                 tg3_start_xmit(nskb, tp->dev);
6832         } while (segs);
6833
6834 tg3_tso_bug_end:
6835         dev_kfree_skb(skb);
6836
6837         return NETDEV_TX_OK;
6838 }
6839
6840 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6841  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6842  */
6843 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6844 {
6845         struct tg3 *tp = netdev_priv(dev);
6846         u32 len, entry, base_flags, mss, vlan = 0;
6847         u32 budget;
6848         int i = -1, would_hit_hwbug;
6849         dma_addr_t mapping;
6850         struct tg3_napi *tnapi;
6851         struct netdev_queue *txq;
6852         unsigned int last;
6853
6854         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6855         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6856         if (tg3_flag(tp, ENABLE_TSS))
6857                 tnapi++;
6858
6859         budget = tg3_tx_avail(tnapi);
6860
6861         /* We are running in BH disabled context with netif_tx_lock
6862          * and TX reclaim runs via tp->napi.poll inside of a software
6863          * interrupt.  Furthermore, IRQ processing runs lockless so we have
6864          * no IRQ context deadlocks to worry about either.  Rejoice!
6865          */
6866         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6867                 if (!netif_tx_queue_stopped(txq)) {
6868                         netif_tx_stop_queue(txq);
6869
6870                         /* This is a hard error, log it. */
6871                         netdev_err(dev,
6872                                    "BUG! Tx Ring full when queue awake!\n");
6873                 }
6874                 return NETDEV_TX_BUSY;
6875         }
6876
6877         entry = tnapi->tx_prod;
6878         base_flags = 0;
6879         if (skb->ip_summed == CHECKSUM_PARTIAL)
6880                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6881
6882         mss = skb_shinfo(skb)->gso_size;
6883         if (mss) {
6884                 struct iphdr *iph;
6885                 u32 tcp_opt_len, hdr_len;
6886
6887                 if (skb_header_cloned(skb) &&
6888                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6889                         goto drop;
6890
6891                 iph = ip_hdr(skb);
6892                 tcp_opt_len = tcp_optlen(skb);
6893
6894                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
6895
6896                 if (!skb_is_gso_v6(skb)) {
6897                         iph->check = 0;
6898                         iph->tot_len = htons(mss + hdr_len);
6899                 }
6900
6901                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6902                     tg3_flag(tp, TSO_BUG))
6903                         return tg3_tso_bug(tp, skb);
6904
6905                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6906                                TXD_FLAG_CPU_POST_DMA);
6907
6908                 if (tg3_flag(tp, HW_TSO_1) ||
6909                     tg3_flag(tp, HW_TSO_2) ||
6910                     tg3_flag(tp, HW_TSO_3)) {
6911                         tcp_hdr(skb)->check = 0;
6912                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6913                 } else
6914                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6915                                                                  iph->daddr, 0,
6916                                                                  IPPROTO_TCP,
6917                                                                  0);
6918
6919                 if (tg3_flag(tp, HW_TSO_3)) {
6920                         mss |= (hdr_len & 0xc) << 12;
6921                         if (hdr_len & 0x10)
6922                                 base_flags |= 0x00000010;
6923                         base_flags |= (hdr_len & 0x3e0) << 5;
6924                 } else if (tg3_flag(tp, HW_TSO_2))
6925                         mss |= hdr_len << 9;
6926                 else if (tg3_flag(tp, HW_TSO_1) ||
6927                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6928                         if (tcp_opt_len || iph->ihl > 5) {
6929                                 int tsflags;
6930
6931                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6932                                 mss |= (tsflags << 11);
6933                         }
6934                 } else {
6935                         if (tcp_opt_len || iph->ihl > 5) {
6936                                 int tsflags;
6937
6938                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6939                                 base_flags |= tsflags << 12;
6940                         }
6941                 }
6942         }
6943
6944         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6945             !mss && skb->len > VLAN_ETH_FRAME_LEN)
6946                 base_flags |= TXD_FLAG_JMB_PKT;
6947
6948         if (vlan_tx_tag_present(skb)) {
6949                 base_flags |= TXD_FLAG_VLAN;
6950                 vlan = vlan_tx_tag_get(skb);
6951         }
6952
6953         len = skb_headlen(skb);
6954
6955         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6956         if (pci_dma_mapping_error(tp->pdev, mapping))
6957                 goto drop;
6958
6959
6960         tnapi->tx_buffers[entry].skb = skb;
6961         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6962
6963         would_hit_hwbug = 0;
6964
6965         if (tg3_flag(tp, 5701_DMA_BUG))
6966                 would_hit_hwbug = 1;
6967
6968         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6969                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6970                             mss, vlan)) {
6971                 would_hit_hwbug = 1;
6972         } else if (skb_shinfo(skb)->nr_frags > 0) {
6973                 u32 tmp_mss = mss;
6974
6975                 if (!tg3_flag(tp, HW_TSO_1) &&
6976                     !tg3_flag(tp, HW_TSO_2) &&
6977                     !tg3_flag(tp, HW_TSO_3))
6978                         tmp_mss = 0;
6979
6980                 /* Now loop through additional data
6981                  * fragments, and queue them.
6982                  */
6983                 last = skb_shinfo(skb)->nr_frags - 1;
6984                 for (i = 0; i <= last; i++) {
6985                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6986
6987                         len = skb_frag_size(frag);
6988                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6989                                                    len, DMA_TO_DEVICE);
6990
6991                         tnapi->tx_buffers[entry].skb = NULL;
6992                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6993                                            mapping);
6994                         if (dma_mapping_error(&tp->pdev->dev, mapping))
6995                                 goto dma_error;
6996
6997                         if (!budget ||
6998                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6999                                             len, base_flags |
7000                                             ((i == last) ? TXD_FLAG_END : 0),
7001                                             tmp_mss, vlan)) {
7002                                 would_hit_hwbug = 1;
7003                                 break;
7004                         }
7005                 }
7006         }
7007
7008         if (would_hit_hwbug) {
7009                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7010
7011                 /* If the workaround fails due to memory/mapping
7012                  * failure, silently drop this packet.
7013                  */
7014                 entry = tnapi->tx_prod;
7015                 budget = tg3_tx_avail(tnapi);
7016                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7017                                                 base_flags, mss, vlan))
7018                         goto drop_nofree;
7019         }
7020
7021         skb_tx_timestamp(skb);
7022         netdev_tx_sent_queue(txq, skb->len);
7023
7024         /* Sync BD data before updating mailbox */
7025         wmb();
7026
7027         /* Packets are ready, update Tx producer idx local and on card. */
7028         tw32_tx_mbox(tnapi->prodmbox, entry);
7029
7030         tnapi->tx_prod = entry;
7031         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7032                 netif_tx_stop_queue(txq);
7033
7034                 /* netif_tx_stop_queue() must be done before checking
7035                  * checking tx index in tg3_tx_avail() below, because in
7036                  * tg3_tx(), we update tx index before checking for
7037                  * netif_tx_queue_stopped().
7038                  */
7039                 smp_mb();
7040                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7041                         netif_tx_wake_queue(txq);
7042         }
7043
7044         mmiowb();
7045         return NETDEV_TX_OK;
7046
7047 dma_error:
7048         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7049         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7050 drop:
7051         dev_kfree_skb(skb);
7052 drop_nofree:
7053         tp->tx_dropped++;
7054         return NETDEV_TX_OK;
7055 }
7056
7057 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7058 {
7059         if (enable) {
7060                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7061                                   MAC_MODE_PORT_MODE_MASK);
7062
7063                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7064
7065                 if (!tg3_flag(tp, 5705_PLUS))
7066                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7067
7068                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7069                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7070                 else
7071                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7072         } else {
7073                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7074
7075                 if (tg3_flag(tp, 5705_PLUS) ||
7076                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7077                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7078                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7079         }
7080
7081         tw32(MAC_MODE, tp->mac_mode);
7082         udelay(40);
7083 }
7084
7085 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7086 {
7087         u32 val, bmcr, mac_mode, ptest = 0;
7088
7089         tg3_phy_toggle_apd(tp, false);
7090         tg3_phy_toggle_automdix(tp, 0);
7091
7092         if (extlpbk && tg3_phy_set_extloopbk(tp))
7093                 return -EIO;
7094
7095         bmcr = BMCR_FULLDPLX;
7096         switch (speed) {
7097         case SPEED_10:
7098                 break;
7099         case SPEED_100:
7100                 bmcr |= BMCR_SPEED100;
7101                 break;
7102         case SPEED_1000:
7103         default:
7104                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7105                         speed = SPEED_100;
7106                         bmcr |= BMCR_SPEED100;
7107                 } else {
7108                         speed = SPEED_1000;
7109                         bmcr |= BMCR_SPEED1000;
7110                 }
7111         }
7112
7113         if (extlpbk) {
7114                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7115                         tg3_readphy(tp, MII_CTRL1000, &val);
7116                         val |= CTL1000_AS_MASTER |
7117                                CTL1000_ENABLE_MASTER;
7118                         tg3_writephy(tp, MII_CTRL1000, val);
7119                 } else {
7120                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7121                                 MII_TG3_FET_PTEST_TRIM_2;
7122                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7123                 }
7124         } else
7125                 bmcr |= BMCR_LOOPBACK;
7126
7127         tg3_writephy(tp, MII_BMCR, bmcr);
7128
7129         /* The write needs to be flushed for the FETs */
7130         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7131                 tg3_readphy(tp, MII_BMCR, &bmcr);
7132
7133         udelay(40);
7134
7135         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7136             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7137                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7138                              MII_TG3_FET_PTEST_FRC_TX_LINK |
7139                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
7140
7141                 /* The write needs to be flushed for the AC131 */
7142                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7143         }
7144
7145         /* Reset to prevent losing 1st rx packet intermittently */
7146         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7147             tg3_flag(tp, 5780_CLASS)) {
7148                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7149                 udelay(10);
7150                 tw32_f(MAC_RX_MODE, tp->rx_mode);
7151         }
7152
7153         mac_mode = tp->mac_mode &
7154                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7155         if (speed == SPEED_1000)
7156                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7157         else
7158                 mac_mode |= MAC_MODE_PORT_MODE_MII;
7159
7160         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7161                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7162
7163                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7164                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
7165                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7166                         mac_mode |= MAC_MODE_LINK_POLARITY;
7167
7168                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7169                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7170         }
7171
7172         tw32(MAC_MODE, mac_mode);
7173         udelay(40);
7174
7175         return 0;
7176 }
7177
7178 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7179 {
7180         struct tg3 *tp = netdev_priv(dev);
7181
7182         if (features & NETIF_F_LOOPBACK) {
7183                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7184                         return;
7185
7186                 spin_lock_bh(&tp->lock);
7187                 tg3_mac_loopback(tp, true);
7188                 netif_carrier_on(tp->dev);
7189                 spin_unlock_bh(&tp->lock);
7190                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7191         } else {
7192                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7193                         return;
7194
7195                 spin_lock_bh(&tp->lock);
7196                 tg3_mac_loopback(tp, false);
7197                 /* Force link status check */
7198                 tg3_setup_phy(tp, 1);
7199                 spin_unlock_bh(&tp->lock);
7200                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7201         }
7202 }
7203
7204 static netdev_features_t tg3_fix_features(struct net_device *dev,
7205         netdev_features_t features)
7206 {
7207         struct tg3 *tp = netdev_priv(dev);
7208
7209         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7210                 features &= ~NETIF_F_ALL_TSO;
7211
7212         return features;
7213 }
7214
7215 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7216 {
7217         netdev_features_t changed = dev->features ^ features;
7218
7219         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7220                 tg3_set_loopback(dev, features);
7221
7222         return 0;
7223 }
7224
7225 static void tg3_rx_prodring_free(struct tg3 *tp,
7226                                  struct tg3_rx_prodring_set *tpr)
7227 {
7228         int i;
7229
7230         if (tpr != &tp->napi[0].prodring) {
7231                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7232                      i = (i + 1) & tp->rx_std_ring_mask)
7233                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7234                                         tp->rx_pkt_map_sz);
7235
7236                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7237                         for (i = tpr->rx_jmb_cons_idx;
7238                              i != tpr->rx_jmb_prod_idx;
7239                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7240                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7241                                                 TG3_RX_JMB_MAP_SZ);
7242                         }
7243                 }
7244
7245                 return;
7246         }
7247
7248         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7249                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7250                                 tp->rx_pkt_map_sz);
7251
7252         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7253                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7254                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7255                                         TG3_RX_JMB_MAP_SZ);
7256         }
7257 }
7258
7259 /* Initialize rx rings for packet processing.
7260  *
7261  * The chip has been shut down and the driver detached from
7262  * the networking, so no interrupts or new tx packets will
7263  * end up in the driver.  tp->{tx,}lock are held and thus
7264  * we may not sleep.
7265  */
7266 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7267                                  struct tg3_rx_prodring_set *tpr)
7268 {
7269         u32 i, rx_pkt_dma_sz;
7270
7271         tpr->rx_std_cons_idx = 0;
7272         tpr->rx_std_prod_idx = 0;
7273         tpr->rx_jmb_cons_idx = 0;
7274         tpr->rx_jmb_prod_idx = 0;
7275
7276         if (tpr != &tp->napi[0].prodring) {
7277                 memset(&tpr->rx_std_buffers[0], 0,
7278                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7279                 if (tpr->rx_jmb_buffers)
7280                         memset(&tpr->rx_jmb_buffers[0], 0,
7281                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7282                 goto done;
7283         }
7284
7285         /* Zero out all descriptors. */
7286         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7287
7288         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7289         if (tg3_flag(tp, 5780_CLASS) &&
7290             tp->dev->mtu > ETH_DATA_LEN)
7291                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7292         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7293
7294         /* Initialize invariants of the rings, we only set this
7295          * stuff once.  This works because the card does not
7296          * write into the rx buffer posting rings.
7297          */
7298         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7299                 struct tg3_rx_buffer_desc *rxd;
7300
7301                 rxd = &tpr->rx_std[i];
7302                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7303                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7304                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7305                                (i << RXD_OPAQUE_INDEX_SHIFT));
7306         }
7307
7308         /* Now allocate fresh SKBs for each rx ring. */
7309         for (i = 0; i < tp->rx_pending; i++) {
7310                 unsigned int frag_size;
7311
7312                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7313                                       &frag_size) < 0) {
7314                         netdev_warn(tp->dev,
7315                                     "Using a smaller RX standard ring. Only "
7316                                     "%d out of %d buffers were allocated "
7317                                     "successfully\n", i, tp->rx_pending);
7318                         if (i == 0)
7319                                 goto initfail;
7320                         tp->rx_pending = i;
7321                         break;
7322                 }
7323         }
7324
7325         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7326                 goto done;
7327
7328         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7329
7330         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7331                 goto done;
7332
7333         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7334                 struct tg3_rx_buffer_desc *rxd;
7335
7336                 rxd = &tpr->rx_jmb[i].std;
7337                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7338                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7339                                   RXD_FLAG_JUMBO;
7340                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7341                        (i << RXD_OPAQUE_INDEX_SHIFT));
7342         }
7343
7344         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7345                 unsigned int frag_size;
7346
7347                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7348                                       &frag_size) < 0) {
7349                         netdev_warn(tp->dev,
7350                                     "Using a smaller RX jumbo ring. Only %d "
7351                                     "out of %d buffers were allocated "
7352                                     "successfully\n", i, tp->rx_jumbo_pending);
7353                         if (i == 0)
7354                                 goto initfail;
7355                         tp->rx_jumbo_pending = i;
7356                         break;
7357                 }
7358         }
7359
7360 done:
7361         return 0;
7362
7363 initfail:
7364         tg3_rx_prodring_free(tp, tpr);
7365         return -ENOMEM;
7366 }
7367
7368 static void tg3_rx_prodring_fini(struct tg3 *tp,
7369                                  struct tg3_rx_prodring_set *tpr)
7370 {
7371         kfree(tpr->rx_std_buffers);
7372         tpr->rx_std_buffers = NULL;
7373         kfree(tpr->rx_jmb_buffers);
7374         tpr->rx_jmb_buffers = NULL;
7375         if (tpr->rx_std) {
7376                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7377                                   tpr->rx_std, tpr->rx_std_mapping);
7378                 tpr->rx_std = NULL;
7379         }
7380         if (tpr->rx_jmb) {
7381                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7382                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7383                 tpr->rx_jmb = NULL;
7384         }
7385 }
7386
7387 static int tg3_rx_prodring_init(struct tg3 *tp,
7388                                 struct tg3_rx_prodring_set *tpr)
7389 {
7390         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7391                                       GFP_KERNEL);
7392         if (!tpr->rx_std_buffers)
7393                 return -ENOMEM;
7394
7395         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7396                                          TG3_RX_STD_RING_BYTES(tp),
7397                                          &tpr->rx_std_mapping,
7398                                          GFP_KERNEL);
7399         if (!tpr->rx_std)
7400                 goto err_out;
7401
7402         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7403                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7404                                               GFP_KERNEL);
7405                 if (!tpr->rx_jmb_buffers)
7406                         goto err_out;
7407
7408                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7409                                                  TG3_RX_JMB_RING_BYTES(tp),
7410                                                  &tpr->rx_jmb_mapping,
7411                                                  GFP_KERNEL);
7412                 if (!tpr->rx_jmb)
7413                         goto err_out;
7414         }
7415
7416         return 0;
7417
7418 err_out:
7419         tg3_rx_prodring_fini(tp, tpr);
7420         return -ENOMEM;
7421 }
7422
7423 /* Free up pending packets in all rx/tx rings.
7424  *
7425  * The chip has been shut down and the driver detached from
7426  * the networking, so no interrupts or new tx packets will
7427  * end up in the driver.  tp->{tx,}lock is not held and we are not
7428  * in an interrupt context and thus may sleep.
7429  */
7430 static void tg3_free_rings(struct tg3 *tp)
7431 {
7432         int i, j;
7433
7434         for (j = 0; j < tp->irq_cnt; j++) {
7435                 struct tg3_napi *tnapi = &tp->napi[j];
7436
7437                 tg3_rx_prodring_free(tp, &tnapi->prodring);
7438
7439                 if (!tnapi->tx_buffers)
7440                         continue;
7441
7442                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7443                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7444
7445                         if (!skb)
7446                                 continue;
7447
7448                         tg3_tx_skb_unmap(tnapi, i,
7449                                          skb_shinfo(skb)->nr_frags - 1);
7450
7451                         dev_kfree_skb_any(skb);
7452                 }
7453                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7454         }
7455 }
7456
7457 /* Initialize tx/rx rings for packet processing.
7458  *
7459  * The chip has been shut down and the driver detached from
7460  * the networking, so no interrupts or new tx packets will
7461  * end up in the driver.  tp->{tx,}lock are held and thus
7462  * we may not sleep.
7463  */
7464 static int tg3_init_rings(struct tg3 *tp)
7465 {
7466         int i;
7467
7468         /* Free up all the SKBs. */
7469         tg3_free_rings(tp);
7470
7471         for (i = 0; i < tp->irq_cnt; i++) {
7472                 struct tg3_napi *tnapi = &tp->napi[i];
7473
7474                 tnapi->last_tag = 0;
7475                 tnapi->last_irq_tag = 0;
7476                 tnapi->hw_status->status = 0;
7477                 tnapi->hw_status->status_tag = 0;
7478                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7479
7480                 tnapi->tx_prod = 0;
7481                 tnapi->tx_cons = 0;
7482                 if (tnapi->tx_ring)
7483                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7484
7485                 tnapi->rx_rcb_ptr = 0;
7486                 if (tnapi->rx_rcb)
7487                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7488
7489                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7490                         tg3_free_rings(tp);
7491                         return -ENOMEM;
7492                 }
7493         }
7494
7495         return 0;
7496 }
7497
7498 /*
7499  * Must not be invoked with interrupt sources disabled and
7500  * the hardware shutdown down.
7501  */
7502 static void tg3_free_consistent(struct tg3 *tp)
7503 {
7504         int i;
7505
7506         for (i = 0; i < tp->irq_cnt; i++) {
7507                 struct tg3_napi *tnapi = &tp->napi[i];
7508
7509                 if (tnapi->tx_ring) {
7510                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7511                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
7512                         tnapi->tx_ring = NULL;
7513                 }
7514
7515                 kfree(tnapi->tx_buffers);
7516                 tnapi->tx_buffers = NULL;
7517
7518                 if (tnapi->rx_rcb) {
7519                         dma_free_coherent(&tp->pdev->dev,
7520                                           TG3_RX_RCB_RING_BYTES(tp),
7521                                           tnapi->rx_rcb,
7522                                           tnapi->rx_rcb_mapping);
7523                         tnapi->rx_rcb = NULL;
7524                 }
7525
7526                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7527
7528                 if (tnapi->hw_status) {
7529                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7530                                           tnapi->hw_status,
7531                                           tnapi->status_mapping);
7532                         tnapi->hw_status = NULL;
7533                 }
7534         }
7535
7536         if (tp->hw_stats) {
7537                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7538                                   tp->hw_stats, tp->stats_mapping);
7539                 tp->hw_stats = NULL;
7540         }
7541 }
7542
7543 /*
7544  * Must not be invoked with interrupt sources disabled and
7545  * the hardware shutdown down.  Can sleep.
7546  */
7547 static int tg3_alloc_consistent(struct tg3 *tp)
7548 {
7549         int i;
7550
7551         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7552                                           sizeof(struct tg3_hw_stats),
7553                                           &tp->stats_mapping,
7554                                           GFP_KERNEL);
7555         if (!tp->hw_stats)
7556                 goto err_out;
7557
7558         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7559
7560         for (i = 0; i < tp->irq_cnt; i++) {
7561                 struct tg3_napi *tnapi = &tp->napi[i];
7562                 struct tg3_hw_status *sblk;
7563
7564                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7565                                                       TG3_HW_STATUS_SIZE,
7566                                                       &tnapi->status_mapping,
7567                                                       GFP_KERNEL);
7568                 if (!tnapi->hw_status)
7569                         goto err_out;
7570
7571                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7572                 sblk = tnapi->hw_status;
7573
7574                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7575                         goto err_out;
7576
7577                 /* If multivector TSS is enabled, vector 0 does not handle
7578                  * tx interrupts.  Don't allocate any resources for it.
7579                  */
7580                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7581                     (i && tg3_flag(tp, ENABLE_TSS))) {
7582                         tnapi->tx_buffers = kzalloc(
7583                                                sizeof(struct tg3_tx_ring_info) *
7584                                                TG3_TX_RING_SIZE, GFP_KERNEL);
7585                         if (!tnapi->tx_buffers)
7586                                 goto err_out;
7587
7588                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7589                                                             TG3_TX_RING_BYTES,
7590                                                         &tnapi->tx_desc_mapping,
7591                                                             GFP_KERNEL);
7592                         if (!tnapi->tx_ring)
7593                                 goto err_out;
7594                 }
7595
7596                 /*
7597                  * When RSS is enabled, the status block format changes
7598                  * slightly.  The "rx_jumbo_consumer", "reserved",
7599                  * and "rx_mini_consumer" members get mapped to the
7600                  * other three rx return ring producer indexes.
7601                  */
7602                 switch (i) {
7603                 default:
7604                         if (tg3_flag(tp, ENABLE_RSS)) {
7605                                 tnapi->rx_rcb_prod_idx = NULL;
7606                                 break;
7607                         }
7608                         /* Fall through */
7609                 case 1:
7610                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7611                         break;
7612                 case 2:
7613                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7614                         break;
7615                 case 3:
7616                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
7617                         break;
7618                 case 4:
7619                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7620                         break;
7621                 }
7622
7623                 /*
7624                  * If multivector RSS is enabled, vector 0 does not handle
7625                  * rx or tx interrupts.  Don't allocate any resources for it.
7626                  */
7627                 if (!i && tg3_flag(tp, ENABLE_RSS))
7628                         continue;
7629
7630                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7631                                                    TG3_RX_RCB_RING_BYTES(tp),
7632                                                    &tnapi->rx_rcb_mapping,
7633                                                    GFP_KERNEL);
7634                 if (!tnapi->rx_rcb)
7635                         goto err_out;
7636
7637                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7638         }
7639
7640         return 0;
7641
7642 err_out:
7643         tg3_free_consistent(tp);
7644         return -ENOMEM;
7645 }
7646
7647 #define MAX_WAIT_CNT 1000
7648
7649 /* To stop a block, clear the enable bit and poll till it
7650  * clears.  tp->lock is held.
7651  */
7652 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7653 {
7654         unsigned int i;
7655         u32 val;
7656
7657         if (tg3_flag(tp, 5705_PLUS)) {
7658                 switch (ofs) {
7659                 case RCVLSC_MODE:
7660                 case DMAC_MODE:
7661                 case MBFREE_MODE:
7662                 case BUFMGR_MODE:
7663                 case MEMARB_MODE:
7664                         /* We can't enable/disable these bits of the
7665                          * 5705/5750, just say success.
7666                          */
7667                         return 0;
7668
7669                 default:
7670                         break;
7671                 }
7672         }
7673
7674         val = tr32(ofs);
7675         val &= ~enable_bit;
7676         tw32_f(ofs, val);
7677
7678         for (i = 0; i < MAX_WAIT_CNT; i++) {
7679                 udelay(100);
7680                 val = tr32(ofs);
7681                 if ((val & enable_bit) == 0)
7682                         break;
7683         }
7684
7685         if (i == MAX_WAIT_CNT && !silent) {
7686                 dev_err(&tp->pdev->dev,
7687                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7688                         ofs, enable_bit);
7689                 return -ENODEV;
7690         }
7691
7692         return 0;
7693 }
7694
7695 /* tp->lock is held. */
7696 static int tg3_abort_hw(struct tg3 *tp, int silent)
7697 {
7698         int i, err;
7699
7700         tg3_disable_ints(tp);
7701
7702         tp->rx_mode &= ~RX_MODE_ENABLE;
7703         tw32_f(MAC_RX_MODE, tp->rx_mode);
7704         udelay(10);
7705
7706         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7707         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7708         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7709         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7710         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7711         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7712
7713         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7714         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7715         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7716         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7717         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7718         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7719         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7720
7721         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7722         tw32_f(MAC_MODE, tp->mac_mode);
7723         udelay(40);
7724
7725         tp->tx_mode &= ~TX_MODE_ENABLE;
7726         tw32_f(MAC_TX_MODE, tp->tx_mode);
7727
7728         for (i = 0; i < MAX_WAIT_CNT; i++) {
7729                 udelay(100);
7730                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7731                         break;
7732         }
7733         if (i >= MAX_WAIT_CNT) {
7734                 dev_err(&tp->pdev->dev,
7735                         "%s timed out, TX_MODE_ENABLE will not clear "
7736                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7737                 err |= -ENODEV;
7738         }
7739
7740         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7741         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7742         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7743
7744         tw32(FTQ_RESET, 0xffffffff);
7745         tw32(FTQ_RESET, 0x00000000);
7746
7747         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7748         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7749
7750         for (i = 0; i < tp->irq_cnt; i++) {
7751                 struct tg3_napi *tnapi = &tp->napi[i];
7752                 if (tnapi->hw_status)
7753                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7754         }
7755
7756         return err;
7757 }
7758
7759 /* Save PCI command register before chip reset */
7760 static void tg3_save_pci_state(struct tg3 *tp)
7761 {
7762         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7763 }
7764
7765 /* Restore PCI state after chip reset */
7766 static void tg3_restore_pci_state(struct tg3 *tp)
7767 {
7768         u32 val;
7769
7770         /* Re-enable indirect register accesses. */
7771         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7772                                tp->misc_host_ctrl);
7773
7774         /* Set MAX PCI retry to zero. */
7775         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7776         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7777             tg3_flag(tp, PCIX_MODE))
7778                 val |= PCISTATE_RETRY_SAME_DMA;
7779         /* Allow reads and writes to the APE register and memory space. */
7780         if (tg3_flag(tp, ENABLE_APE))
7781                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7782                        PCISTATE_ALLOW_APE_SHMEM_WR |
7783                        PCISTATE_ALLOW_APE_PSPACE_WR;
7784         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7785
7786         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7787
7788         if (!tg3_flag(tp, PCI_EXPRESS)) {
7789                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7790                                       tp->pci_cacheline_sz);
7791                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7792                                       tp->pci_lat_timer);
7793         }
7794
7795         /* Make sure PCI-X relaxed ordering bit is clear. */
7796         if (tg3_flag(tp, PCIX_MODE)) {
7797                 u16 pcix_cmd;
7798
7799                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7800                                      &pcix_cmd);
7801                 pcix_cmd &= ~PCI_X_CMD_ERO;
7802                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7803                                       pcix_cmd);
7804         }
7805
7806         if (tg3_flag(tp, 5780_CLASS)) {
7807
7808                 /* Chip reset on 5780 will reset MSI enable bit,
7809                  * so need to restore it.
7810                  */
7811                 if (tg3_flag(tp, USING_MSI)) {
7812                         u16 ctrl;
7813
7814                         pci_read_config_word(tp->pdev,
7815                                              tp->msi_cap + PCI_MSI_FLAGS,
7816                                              &ctrl);
7817                         pci_write_config_word(tp->pdev,
7818                                               tp->msi_cap + PCI_MSI_FLAGS,
7819                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7820                         val = tr32(MSGINT_MODE);
7821                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7822                 }
7823         }
7824 }
7825
7826 /* tp->lock is held. */
7827 static int tg3_chip_reset(struct tg3 *tp)
7828 {
7829         u32 val;
7830         void (*write_op)(struct tg3 *, u32, u32);
7831         int i, err;
7832
7833         tg3_nvram_lock(tp);
7834
7835         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7836
7837         /* No matching tg3_nvram_unlock() after this because
7838          * chip reset below will undo the nvram lock.
7839          */
7840         tp->nvram_lock_cnt = 0;
7841
7842         /* GRC_MISC_CFG core clock reset will clear the memory
7843          * enable bit in PCI register 4 and the MSI enable bit
7844          * on some chips, so we save relevant registers here.
7845          */
7846         tg3_save_pci_state(tp);
7847
7848         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7849             tg3_flag(tp, 5755_PLUS))
7850                 tw32(GRC_FASTBOOT_PC, 0);
7851
7852         /*
7853          * We must avoid the readl() that normally takes place.
7854          * It locks machines, causes machine checks, and other
7855          * fun things.  So, temporarily disable the 5701
7856          * hardware workaround, while we do the reset.
7857          */
7858         write_op = tp->write32;
7859         if (write_op == tg3_write_flush_reg32)
7860                 tp->write32 = tg3_write32;
7861
7862         /* Prevent the irq handler from reading or writing PCI registers
7863          * during chip reset when the memory enable bit in the PCI command
7864          * register may be cleared.  The chip does not generate interrupt
7865          * at this time, but the irq handler may still be called due to irq
7866          * sharing or irqpoll.
7867          */
7868         tg3_flag_set(tp, CHIP_RESETTING);
7869         for (i = 0; i < tp->irq_cnt; i++) {
7870                 struct tg3_napi *tnapi = &tp->napi[i];
7871                 if (tnapi->hw_status) {
7872                         tnapi->hw_status->status = 0;
7873                         tnapi->hw_status->status_tag = 0;
7874                 }
7875                 tnapi->last_tag = 0;
7876                 tnapi->last_irq_tag = 0;
7877         }
7878         smp_mb();
7879
7880         for (i = 0; i < tp->irq_cnt; i++)
7881                 synchronize_irq(tp->napi[i].irq_vec);
7882
7883         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7884                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7885                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7886         }
7887
7888         /* do the reset */
7889         val = GRC_MISC_CFG_CORECLK_RESET;
7890
7891         if (tg3_flag(tp, PCI_EXPRESS)) {
7892                 /* Force PCIe 1.0a mode */
7893                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7894                     !tg3_flag(tp, 57765_PLUS) &&
7895                     tr32(TG3_PCIE_PHY_TSTCTL) ==
7896                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7897                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7898
7899                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7900                         tw32(GRC_MISC_CFG, (1 << 29));
7901                         val |= (1 << 29);
7902                 }
7903         }
7904
7905         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7906                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7907                 tw32(GRC_VCPU_EXT_CTRL,
7908                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7909         }
7910
7911         /* Manage gphy power for all CPMU absent PCIe devices. */
7912         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7913                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7914
7915         tw32(GRC_MISC_CFG, val);
7916
7917         /* restore 5701 hardware bug workaround write method */
7918         tp->write32 = write_op;
7919
7920         /* Unfortunately, we have to delay before the PCI read back.
7921          * Some 575X chips even will not respond to a PCI cfg access
7922          * when the reset command is given to the chip.
7923          *
7924          * How do these hardware designers expect things to work
7925          * properly if the PCI write is posted for a long period
7926          * of time?  It is always necessary to have some method by
7927          * which a register read back can occur to push the write
7928          * out which does the reset.
7929          *
7930          * For most tg3 variants the trick below was working.
7931          * Ho hum...
7932          */
7933         udelay(120);
7934
7935         /* Flush PCI posted writes.  The normal MMIO registers
7936          * are inaccessible at this time so this is the only
7937          * way to make this reliably (actually, this is no longer
7938          * the case, see above).  I tried to use indirect
7939          * register read/write but this upset some 5701 variants.
7940          */
7941         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7942
7943         udelay(120);
7944
7945         if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7946                 u16 val16;
7947
7948                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7949                         int i;
7950                         u32 cfg_val;
7951
7952                         /* Wait for link training to complete.  */
7953                         for (i = 0; i < 5000; i++)
7954                                 udelay(100);
7955
7956                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7957                         pci_write_config_dword(tp->pdev, 0xc4,
7958                                                cfg_val | (1 << 15));
7959                 }
7960
7961                 /* Clear the "no snoop" and "relaxed ordering" bits. */
7962                 pci_read_config_word(tp->pdev,
7963                                      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7964                                      &val16);
7965                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7966                            PCI_EXP_DEVCTL_NOSNOOP_EN);
7967                 /*
7968                  * Older PCIe devices only support the 128 byte
7969                  * MPS setting.  Enforce the restriction.
7970                  */
7971                 if (!tg3_flag(tp, CPMU_PRESENT))
7972                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7973                 pci_write_config_word(tp->pdev,
7974                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7975                                       val16);
7976
7977                 /* Clear error status */
7978                 pci_write_config_word(tp->pdev,
7979                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7980                                       PCI_EXP_DEVSTA_CED |
7981                                       PCI_EXP_DEVSTA_NFED |
7982                                       PCI_EXP_DEVSTA_FED |
7983                                       PCI_EXP_DEVSTA_URD);
7984         }
7985
7986         tg3_restore_pci_state(tp);
7987
7988         tg3_flag_clear(tp, CHIP_RESETTING);
7989         tg3_flag_clear(tp, ERROR_PROCESSED);
7990
7991         val = 0;
7992         if (tg3_flag(tp, 5780_CLASS))
7993                 val = tr32(MEMARB_MODE);
7994         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7995
7996         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7997                 tg3_stop_fw(tp);
7998                 tw32(0x5000, 0x400);
7999         }
8000
8001         tw32(GRC_MODE, tp->grc_mode);
8002
8003         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
8004                 val = tr32(0xc4);
8005
8006                 tw32(0xc4, val | (1 << 15));
8007         }
8008
8009         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8010             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8011                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8012                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
8013                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8014                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8015         }
8016
8017         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8018                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8019                 val = tp->mac_mode;
8020         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8021                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8022                 val = tp->mac_mode;
8023         } else
8024                 val = 0;
8025
8026         tw32_f(MAC_MODE, val);
8027         udelay(40);
8028
8029         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8030
8031         err = tg3_poll_fw(tp);
8032         if (err)
8033                 return err;
8034
8035         tg3_mdio_start(tp);
8036
8037         if (tg3_flag(tp, PCI_EXPRESS) &&
8038             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8039             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8040             !tg3_flag(tp, 57765_PLUS)) {
8041                 val = tr32(0x7c00);
8042
8043                 tw32(0x7c00, val | (1 << 25));
8044         }
8045
8046         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8047                 val = tr32(TG3_CPMU_CLCK_ORIDE);
8048                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8049         }
8050
8051         /* Reprobe ASF enable state.  */
8052         tg3_flag_clear(tp, ENABLE_ASF);
8053         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8054         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8055         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8056                 u32 nic_cfg;
8057
8058                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8059                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8060                         tg3_flag_set(tp, ENABLE_ASF);
8061                         tp->last_event_jiffies = jiffies;
8062                         if (tg3_flag(tp, 5750_PLUS))
8063                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8064                 }
8065         }
8066
8067         return 0;
8068 }
8069
8070 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8071 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8072
8073 /* tp->lock is held. */
8074 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8075 {
8076         int err;
8077
8078         tg3_stop_fw(tp);
8079
8080         tg3_write_sig_pre_reset(tp, kind);
8081
8082         tg3_abort_hw(tp, silent);
8083         err = tg3_chip_reset(tp);
8084
8085         __tg3_set_mac_addr(tp, 0);
8086
8087         tg3_write_sig_legacy(tp, kind);
8088         tg3_write_sig_post_reset(tp, kind);
8089
8090         if (tp->hw_stats) {
8091                 /* Save the stats across chip resets... */
8092                 tg3_get_nstats(tp, &tp->net_stats_prev);
8093                 tg3_get_estats(tp, &tp->estats_prev);
8094
8095                 /* And make sure the next sample is new data */
8096                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8097         }
8098
8099         if (err)
8100                 return err;
8101
8102         return 0;
8103 }
8104
8105 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8106 {
8107         struct tg3 *tp = netdev_priv(dev);
8108         struct sockaddr *addr = p;
8109         int err = 0, skip_mac_1 = 0;
8110
8111         if (!is_valid_ether_addr(addr->sa_data))
8112                 return -EADDRNOTAVAIL;
8113
8114         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8115
8116         if (!netif_running(dev))
8117                 return 0;
8118
8119         if (tg3_flag(tp, ENABLE_ASF)) {
8120                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8121
8122                 addr0_high = tr32(MAC_ADDR_0_HIGH);
8123                 addr0_low = tr32(MAC_ADDR_0_LOW);
8124                 addr1_high = tr32(MAC_ADDR_1_HIGH);
8125                 addr1_low = tr32(MAC_ADDR_1_LOW);
8126
8127                 /* Skip MAC addr 1 if ASF is using it. */
8128                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8129                     !(addr1_high == 0 && addr1_low == 0))
8130                         skip_mac_1 = 1;
8131         }
8132         spin_lock_bh(&tp->lock);
8133         __tg3_set_mac_addr(tp, skip_mac_1);
8134         spin_unlock_bh(&tp->lock);
8135
8136         return err;
8137 }
8138
8139 /* tp->lock is held. */
8140 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8141                            dma_addr_t mapping, u32 maxlen_flags,
8142                            u32 nic_addr)
8143 {
8144         tg3_write_mem(tp,
8145                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8146                       ((u64) mapping >> 32));
8147         tg3_write_mem(tp,
8148                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8149                       ((u64) mapping & 0xffffffff));
8150         tg3_write_mem(tp,
8151                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8152                        maxlen_flags);
8153
8154         if (!tg3_flag(tp, 5705_PLUS))
8155                 tg3_write_mem(tp,
8156                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8157                               nic_addr);
8158 }
8159
8160 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8161 {
8162         int i;
8163
8164         if (!tg3_flag(tp, ENABLE_TSS)) {
8165                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8166                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8167                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8168         } else {
8169                 tw32(HOSTCC_TXCOL_TICKS, 0);
8170                 tw32(HOSTCC_TXMAX_FRAMES, 0);
8171                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8172         }
8173
8174         if (!tg3_flag(tp, ENABLE_RSS)) {
8175                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8176                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8177                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8178         } else {
8179                 tw32(HOSTCC_RXCOL_TICKS, 0);
8180                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8181                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8182         }
8183
8184         if (!tg3_flag(tp, 5705_PLUS)) {
8185                 u32 val = ec->stats_block_coalesce_usecs;
8186
8187                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8188                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8189
8190                 if (!netif_carrier_ok(tp->dev))
8191                         val = 0;
8192
8193                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8194         }
8195
8196         for (i = 0; i < tp->irq_cnt - 1; i++) {
8197                 u32 reg;
8198
8199                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8200                 tw32(reg, ec->rx_coalesce_usecs);
8201                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8202                 tw32(reg, ec->rx_max_coalesced_frames);
8203                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8204                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8205
8206                 if (tg3_flag(tp, ENABLE_TSS)) {
8207                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8208                         tw32(reg, ec->tx_coalesce_usecs);
8209                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8210                         tw32(reg, ec->tx_max_coalesced_frames);
8211                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8212                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8213                 }
8214         }
8215
8216         for (; i < tp->irq_max - 1; i++) {
8217                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8218                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8219                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8220
8221                 if (tg3_flag(tp, ENABLE_TSS)) {
8222                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8223                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8224                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8225                 }
8226         }
8227 }
8228
8229 /* tp->lock is held. */
8230 static void tg3_rings_reset(struct tg3 *tp)
8231 {
8232         int i;
8233         u32 stblk, txrcb, rxrcb, limit;
8234         struct tg3_napi *tnapi = &tp->napi[0];
8235
8236         /* Disable all transmit rings but the first. */
8237         if (!tg3_flag(tp, 5705_PLUS))
8238                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8239         else if (tg3_flag(tp, 5717_PLUS))
8240                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8241         else if (tg3_flag(tp, 57765_CLASS))
8242                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8243         else
8244                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8245
8246         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8247              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8248                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8249                               BDINFO_FLAGS_DISABLED);
8250
8251
8252         /* Disable all receive return rings but the first. */
8253         if (tg3_flag(tp, 5717_PLUS))
8254                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8255         else if (!tg3_flag(tp, 5705_PLUS))
8256                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8257         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8258                  tg3_flag(tp, 57765_CLASS))
8259                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8260         else
8261                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8262
8263         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8264              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8265                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8266                               BDINFO_FLAGS_DISABLED);
8267
8268         /* Disable interrupts */
8269         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8270         tp->napi[0].chk_msi_cnt = 0;
8271         tp->napi[0].last_rx_cons = 0;
8272         tp->napi[0].last_tx_cons = 0;
8273
8274         /* Zero mailbox registers. */
8275         if (tg3_flag(tp, SUPPORT_MSIX)) {
8276                 for (i = 1; i < tp->irq_max; i++) {
8277                         tp->napi[i].tx_prod = 0;
8278                         tp->napi[i].tx_cons = 0;
8279                         if (tg3_flag(tp, ENABLE_TSS))
8280                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8281                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8282                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8283                         tp->napi[i].chk_msi_cnt = 0;
8284                         tp->napi[i].last_rx_cons = 0;
8285                         tp->napi[i].last_tx_cons = 0;
8286                 }
8287                 if (!tg3_flag(tp, ENABLE_TSS))
8288                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8289         } else {
8290                 tp->napi[0].tx_prod = 0;
8291                 tp->napi[0].tx_cons = 0;
8292                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8293                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8294         }
8295
8296         /* Make sure the NIC-based send BD rings are disabled. */
8297         if (!tg3_flag(tp, 5705_PLUS)) {
8298                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8299                 for (i = 0; i < 16; i++)
8300                         tw32_tx_mbox(mbox + i * 8, 0);
8301         }
8302
8303         txrcb = NIC_SRAM_SEND_RCB;
8304         rxrcb = NIC_SRAM_RCV_RET_RCB;
8305
8306         /* Clear status block in ram. */
8307         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8308
8309         /* Set status block DMA address */
8310         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8311              ((u64) tnapi->status_mapping >> 32));
8312         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8313              ((u64) tnapi->status_mapping & 0xffffffff));
8314
8315         if (tnapi->tx_ring) {
8316                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8317                                (TG3_TX_RING_SIZE <<
8318                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8319                                NIC_SRAM_TX_BUFFER_DESC);
8320                 txrcb += TG3_BDINFO_SIZE;
8321         }
8322
8323         if (tnapi->rx_rcb) {
8324                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8325                                (tp->rx_ret_ring_mask + 1) <<
8326                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8327                 rxrcb += TG3_BDINFO_SIZE;
8328         }
8329
8330         stblk = HOSTCC_STATBLCK_RING1;
8331
8332         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8333                 u64 mapping = (u64)tnapi->status_mapping;
8334                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8335                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8336
8337                 /* Clear status block in ram. */
8338                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8339
8340                 if (tnapi->tx_ring) {
8341                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8342                                        (TG3_TX_RING_SIZE <<
8343                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8344                                        NIC_SRAM_TX_BUFFER_DESC);
8345                         txrcb += TG3_BDINFO_SIZE;
8346                 }
8347
8348                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8349                                ((tp->rx_ret_ring_mask + 1) <<
8350                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8351
8352                 stblk += 8;
8353                 rxrcb += TG3_BDINFO_SIZE;
8354         }
8355 }
8356
8357 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8358 {
8359         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8360
8361         if (!tg3_flag(tp, 5750_PLUS) ||
8362             tg3_flag(tp, 5780_CLASS) ||
8363             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8364             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8365             tg3_flag(tp, 57765_PLUS))
8366                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8367         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8368                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8369                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8370         else
8371                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8372
8373         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8374         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8375
8376         val = min(nic_rep_thresh, host_rep_thresh);
8377         tw32(RCVBDI_STD_THRESH, val);
8378
8379         if (tg3_flag(tp, 57765_PLUS))
8380                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8381
8382         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8383                 return;
8384
8385         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8386
8387         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8388
8389         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8390         tw32(RCVBDI_JUMBO_THRESH, val);
8391
8392         if (tg3_flag(tp, 57765_PLUS))
8393                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8394 }
8395
8396 static inline u32 calc_crc(unsigned char *buf, int len)
8397 {
8398         u32 reg;
8399         u32 tmp;
8400         int j, k;
8401
8402         reg = 0xffffffff;
8403
8404         for (j = 0; j < len; j++) {
8405                 reg ^= buf[j];
8406
8407                 for (k = 0; k < 8; k++) {
8408                         tmp = reg & 0x01;
8409
8410                         reg >>= 1;
8411
8412                         if (tmp)
8413                                 reg ^= 0xedb88320;
8414                 }
8415         }
8416
8417         return ~reg;
8418 }
8419
8420 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8421 {
8422         /* accept or reject all multicast frames */
8423         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8424         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8425         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8426         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8427 }
8428
8429 static void __tg3_set_rx_mode(struct net_device *dev)
8430 {
8431         struct tg3 *tp = netdev_priv(dev);
8432         u32 rx_mode;
8433
8434         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8435                                   RX_MODE_KEEP_VLAN_TAG);
8436
8437 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8438         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8439          * flag clear.
8440          */
8441         if (!tg3_flag(tp, ENABLE_ASF))
8442                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8443 #endif
8444
8445         if (dev->flags & IFF_PROMISC) {
8446                 /* Promiscuous mode. */
8447                 rx_mode |= RX_MODE_PROMISC;
8448         } else if (dev->flags & IFF_ALLMULTI) {
8449                 /* Accept all multicast. */
8450                 tg3_set_multi(tp, 1);
8451         } else if (netdev_mc_empty(dev)) {
8452                 /* Reject all multicast. */
8453                 tg3_set_multi(tp, 0);
8454         } else {
8455                 /* Accept one or more multicast(s). */
8456                 struct netdev_hw_addr *ha;
8457                 u32 mc_filter[4] = { 0, };
8458                 u32 regidx;
8459                 u32 bit;
8460                 u32 crc;
8461
8462                 netdev_for_each_mc_addr(ha, dev) {
8463                         crc = calc_crc(ha->addr, ETH_ALEN);
8464                         bit = ~crc & 0x7f;
8465                         regidx = (bit & 0x60) >> 5;
8466                         bit &= 0x1f;
8467                         mc_filter[regidx] |= (1 << bit);
8468                 }
8469
8470                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8471                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8472                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8473                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8474         }
8475
8476         if (rx_mode != tp->rx_mode) {
8477                 tp->rx_mode = rx_mode;
8478                 tw32_f(MAC_RX_MODE, rx_mode);
8479                 udelay(10);
8480         }
8481 }
8482
8483 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
8484 {
8485         int i;
8486
8487         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8488                 tp->rss_ind_tbl[i] =
8489                         ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
8490 }
8491
8492 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8493 {
8494         int i;
8495
8496         if (!tg3_flag(tp, SUPPORT_MSIX))
8497                 return;
8498
8499         if (tp->irq_cnt <= 2) {
8500                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8501                 return;
8502         }
8503
8504         /* Validate table against current IRQ count */
8505         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8506                 if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8507                         break;
8508         }
8509
8510         if (i != TG3_RSS_INDIR_TBL_SIZE)
8511                 tg3_rss_init_dflt_indir_tbl(tp);
8512 }
8513
8514 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8515 {
8516         int i = 0;
8517         u32 reg = MAC_RSS_INDIR_TBL_0;
8518
8519         while (i < TG3_RSS_INDIR_TBL_SIZE) {
8520                 u32 val = tp->rss_ind_tbl[i];
8521                 i++;
8522                 for (; i % 8; i++) {
8523                         val <<= 4;
8524                         val |= tp->rss_ind_tbl[i];
8525                 }
8526                 tw32(reg, val);
8527                 reg += 4;
8528         }
8529 }
8530
8531 /* tp->lock is held. */
8532 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8533 {
8534         u32 val, rdmac_mode;
8535         int i, err, limit;
8536         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8537
8538         tg3_disable_ints(tp);
8539
8540         tg3_stop_fw(tp);
8541
8542         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8543
8544         if (tg3_flag(tp, INIT_COMPLETE))
8545                 tg3_abort_hw(tp, 1);
8546
8547         /* Enable MAC control of LPI */
8548         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8549                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8550                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8551                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
8552
8553                 tw32_f(TG3_CPMU_EEE_CTRL,
8554                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8555
8556                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8557                       TG3_CPMU_EEEMD_LPI_IN_TX |
8558                       TG3_CPMU_EEEMD_LPI_IN_RX |
8559                       TG3_CPMU_EEEMD_EEE_ENABLE;
8560
8561                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8562                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8563
8564                 if (tg3_flag(tp, ENABLE_APE))
8565                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8566
8567                 tw32_f(TG3_CPMU_EEE_MODE, val);
8568
8569                 tw32_f(TG3_CPMU_EEE_DBTMR1,
8570                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8571                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8572
8573                 tw32_f(TG3_CPMU_EEE_DBTMR2,
8574                        TG3_CPMU_DBTMR2_APE_TX_2047US |
8575                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8576         }
8577
8578         if (reset_phy)
8579                 tg3_phy_reset(tp);
8580
8581         err = tg3_chip_reset(tp);
8582         if (err)
8583                 return err;
8584
8585         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8586
8587         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8588                 val = tr32(TG3_CPMU_CTRL);
8589                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8590                 tw32(TG3_CPMU_CTRL, val);
8591
8592                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8593                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8594                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8595                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8596
8597                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8598                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8599                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8600                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8601
8602                 val = tr32(TG3_CPMU_HST_ACC);
8603                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8604                 val |= CPMU_HST_ACC_MACCLK_6_25;
8605                 tw32(TG3_CPMU_HST_ACC, val);
8606         }
8607
8608         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8609                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8610                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8611                        PCIE_PWR_MGMT_L1_THRESH_4MS;
8612                 tw32(PCIE_PWR_MGMT_THRESH, val);
8613
8614                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8615                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8616
8617                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8618
8619                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8620                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8621         }
8622
8623         if (tg3_flag(tp, L1PLLPD_EN)) {
8624                 u32 grc_mode = tr32(GRC_MODE);
8625
8626                 /* Access the lower 1K of PL PCIE block registers. */
8627                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8628                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8629
8630                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8631                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8632                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8633
8634                 tw32(GRC_MODE, grc_mode);
8635         }
8636
8637         if (tg3_flag(tp, 57765_CLASS)) {
8638                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8639                         u32 grc_mode = tr32(GRC_MODE);
8640
8641                         /* Access the lower 1K of PL PCIE block registers. */
8642                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8643                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8644
8645                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8646                                    TG3_PCIE_PL_LO_PHYCTL5);
8647                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8648                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8649
8650                         tw32(GRC_MODE, grc_mode);
8651                 }
8652
8653                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8654                         u32 grc_mode = tr32(GRC_MODE);
8655
8656                         /* Access the lower 1K of DL PCIE block registers. */
8657                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8658                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8659
8660                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8661                                    TG3_PCIE_DL_LO_FTSMAX);
8662                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8663                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8664                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8665
8666                         tw32(GRC_MODE, grc_mode);
8667                 }
8668
8669                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8670                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8671                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8672                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8673         }
8674
8675         /* This works around an issue with Athlon chipsets on
8676          * B3 tigon3 silicon.  This bit has no effect on any
8677          * other revision.  But do not set this on PCI Express
8678          * chips and don't even touch the clocks if the CPMU is present.
8679          */
8680         if (!tg3_flag(tp, CPMU_PRESENT)) {
8681                 if (!tg3_flag(tp, PCI_EXPRESS))
8682                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8683                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8684         }
8685
8686         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8687             tg3_flag(tp, PCIX_MODE)) {
8688                 val = tr32(TG3PCI_PCISTATE);
8689                 val |= PCISTATE_RETRY_SAME_DMA;
8690                 tw32(TG3PCI_PCISTATE, val);
8691         }
8692
8693         if (tg3_flag(tp, ENABLE_APE)) {
8694                 /* Allow reads and writes to the
8695                  * APE register and memory space.
8696                  */
8697                 val = tr32(TG3PCI_PCISTATE);
8698                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8699                        PCISTATE_ALLOW_APE_SHMEM_WR |
8700                        PCISTATE_ALLOW_APE_PSPACE_WR;
8701                 tw32(TG3PCI_PCISTATE, val);
8702         }
8703
8704         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8705                 /* Enable some hw fixes.  */
8706                 val = tr32(TG3PCI_MSI_DATA);
8707                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8708                 tw32(TG3PCI_MSI_DATA, val);
8709         }
8710
8711         /* Descriptor ring init may make accesses to the
8712          * NIC SRAM area to setup the TX descriptors, so we
8713          * can only do this after the hardware has been
8714          * successfully reset.
8715          */
8716         err = tg3_init_rings(tp);
8717         if (err)
8718                 return err;
8719
8720         if (tg3_flag(tp, 57765_PLUS)) {
8721                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8722                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8723                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8724                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8725                 if (!tg3_flag(tp, 57765_CLASS) &&
8726                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8727                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8728                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8729         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8730                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8731                 /* This value is determined during the probe time DMA
8732                  * engine test, tg3_test_dma.
8733                  */
8734                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8735         }
8736
8737         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8738                           GRC_MODE_4X_NIC_SEND_RINGS |
8739                           GRC_MODE_NO_TX_PHDR_CSUM |
8740                           GRC_MODE_NO_RX_PHDR_CSUM);
8741         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8742
8743         /* Pseudo-header checksum is done by hardware logic and not
8744          * the offload processers, so make the chip do the pseudo-
8745          * header checksums on receive.  For transmit it is more
8746          * convenient to do the pseudo-header checksum in software
8747          * as Linux does that on transmit for us in all cases.
8748          */
8749         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8750
8751         tw32(GRC_MODE,
8752              tp->grc_mode |
8753              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8754
8755         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8756         val = tr32(GRC_MISC_CFG);
8757         val &= ~0xff;
8758         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8759         tw32(GRC_MISC_CFG, val);
8760
8761         /* Initialize MBUF/DESC pool. */
8762         if (tg3_flag(tp, 5750_PLUS)) {
8763                 /* Do nothing.  */
8764         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8765                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8766                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8767                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8768                 else
8769                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8770                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8771                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8772         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8773                 int fw_len;
8774
8775                 fw_len = tp->fw_len;
8776                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8777                 tw32(BUFMGR_MB_POOL_ADDR,
8778                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8779                 tw32(BUFMGR_MB_POOL_SIZE,
8780                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8781         }
8782
8783         if (tp->dev->mtu <= ETH_DATA_LEN) {
8784                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8785                      tp->bufmgr_config.mbuf_read_dma_low_water);
8786                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8787                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8788                 tw32(BUFMGR_MB_HIGH_WATER,
8789                      tp->bufmgr_config.mbuf_high_water);
8790         } else {
8791                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8792                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8793                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8794                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8795                 tw32(BUFMGR_MB_HIGH_WATER,
8796                      tp->bufmgr_config.mbuf_high_water_jumbo);
8797         }
8798         tw32(BUFMGR_DMA_LOW_WATER,
8799              tp->bufmgr_config.dma_low_water);
8800         tw32(BUFMGR_DMA_HIGH_WATER,
8801              tp->bufmgr_config.dma_high_water);
8802
8803         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8804         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8805                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8806         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8807             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8808             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8809                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8810         tw32(BUFMGR_MODE, val);
8811         for (i = 0; i < 2000; i++) {
8812                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8813                         break;
8814                 udelay(10);
8815         }
8816         if (i >= 2000) {
8817                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8818                 return -ENODEV;
8819         }
8820
8821         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8822                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8823
8824         tg3_setup_rxbd_thresholds(tp);
8825
8826         /* Initialize TG3_BDINFO's at:
8827          *  RCVDBDI_STD_BD:     standard eth size rx ring
8828          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8829          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8830          *
8831          * like so:
8832          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8833          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8834          *                              ring attribute flags
8835          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8836          *
8837          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8838          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8839          *
8840          * The size of each ring is fixed in the firmware, but the location is
8841          * configurable.
8842          */
8843         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8844              ((u64) tpr->rx_std_mapping >> 32));
8845         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8846              ((u64) tpr->rx_std_mapping & 0xffffffff));
8847         if (!tg3_flag(tp, 5717_PLUS))
8848                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8849                      NIC_SRAM_RX_BUFFER_DESC);
8850
8851         /* Disable the mini ring */
8852         if (!tg3_flag(tp, 5705_PLUS))
8853                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8854                      BDINFO_FLAGS_DISABLED);
8855
8856         /* Program the jumbo buffer descriptor ring control
8857          * blocks on those devices that have them.
8858          */
8859         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8860             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8861
8862                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8863                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8864                              ((u64) tpr->rx_jmb_mapping >> 32));
8865                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8866                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8867                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8868                               BDINFO_FLAGS_MAXLEN_SHIFT;
8869                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8870                              val | BDINFO_FLAGS_USE_EXT_RECV);
8871                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8872                             tg3_flag(tp, 57765_CLASS))
8873                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8874                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8875                 } else {
8876                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8877                              BDINFO_FLAGS_DISABLED);
8878                 }
8879
8880                 if (tg3_flag(tp, 57765_PLUS)) {
8881                         val = TG3_RX_STD_RING_SIZE(tp);
8882                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8883                         val |= (TG3_RX_STD_DMA_SZ << 2);
8884                 } else
8885                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8886         } else
8887                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8888
8889         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8890
8891         tpr->rx_std_prod_idx = tp->rx_pending;
8892         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8893
8894         tpr->rx_jmb_prod_idx =
8895                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8896         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8897
8898         tg3_rings_reset(tp);
8899
8900         /* Initialize MAC address and backoff seed. */
8901         __tg3_set_mac_addr(tp, 0);
8902
8903         /* MTU + ethernet header + FCS + optional VLAN tag */
8904         tw32(MAC_RX_MTU_SIZE,
8905              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8906
8907         /* The slot time is changed by tg3_setup_phy if we
8908          * run at gigabit with half duplex.
8909          */
8910         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8911               (6 << TX_LENGTHS_IPG_SHIFT) |
8912               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8913
8914         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8915                 val |= tr32(MAC_TX_LENGTHS) &
8916                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
8917                         TX_LENGTHS_CNT_DWN_VAL_MSK);
8918
8919         tw32(MAC_TX_LENGTHS, val);
8920
8921         /* Receive rules. */
8922         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8923         tw32(RCVLPC_CONFIG, 0x0181);
8924
8925         /* Calculate RDMAC_MODE setting early, we need it to determine
8926          * the RCVLPC_STATE_ENABLE mask.
8927          */
8928         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8929                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8930                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8931                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8932                       RDMAC_MODE_LNGREAD_ENAB);
8933
8934         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8935                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8936
8937         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8938             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8939             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8940                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8941                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8942                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8943
8944         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8945             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8946                 if (tg3_flag(tp, TSO_CAPABLE) &&
8947                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8948                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8949                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8950                            !tg3_flag(tp, IS_5788)) {
8951                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8952                 }
8953         }
8954
8955         if (tg3_flag(tp, PCI_EXPRESS))
8956                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8957
8958         if (tg3_flag(tp, HW_TSO_1) ||
8959             tg3_flag(tp, HW_TSO_2) ||
8960             tg3_flag(tp, HW_TSO_3))
8961                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8962
8963         if (tg3_flag(tp, 57765_PLUS) ||
8964             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8965             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8966                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8967
8968         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8969                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8970
8971         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8972             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8973             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8974             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8975             tg3_flag(tp, 57765_PLUS)) {
8976                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8977                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8978                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8979                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8980                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8981                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8982                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8983                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8984                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8985                 }
8986                 tw32(TG3_RDMA_RSRVCTRL_REG,
8987                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8988         }
8989
8990         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8991             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8992                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8993                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8994                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8995                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8996         }
8997
8998         /* Receive/send statistics. */
8999         if (tg3_flag(tp, 5750_PLUS)) {
9000                 val = tr32(RCVLPC_STATS_ENABLE);
9001                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9002                 tw32(RCVLPC_STATS_ENABLE, val);
9003         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9004                    tg3_flag(tp, TSO_CAPABLE)) {
9005                 val = tr32(RCVLPC_STATS_ENABLE);
9006                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9007                 tw32(RCVLPC_STATS_ENABLE, val);
9008         } else {
9009                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9010         }
9011         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9012         tw32(SNDDATAI_STATSENAB, 0xffffff);
9013         tw32(SNDDATAI_STATSCTRL,
9014              (SNDDATAI_SCTRL_ENABLE |
9015               SNDDATAI_SCTRL_FASTUPD));
9016
9017         /* Setup host coalescing engine. */
9018         tw32(HOSTCC_MODE, 0);
9019         for (i = 0; i < 2000; i++) {
9020                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9021                         break;
9022                 udelay(10);
9023         }
9024
9025         __tg3_set_coalesce(tp, &tp->coal);
9026
9027         if (!tg3_flag(tp, 5705_PLUS)) {
9028                 /* Status/statistics block address.  See tg3_timer,
9029                  * the tg3_periodic_fetch_stats call there, and
9030                  * tg3_get_stats to see how this works for 5705/5750 chips.
9031                  */
9032                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9033                      ((u64) tp->stats_mapping >> 32));
9034                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9035                      ((u64) tp->stats_mapping & 0xffffffff));
9036                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9037
9038                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9039
9040                 /* Clear statistics and status block memory areas */
9041                 for (i = NIC_SRAM_STATS_BLK;
9042                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9043                      i += sizeof(u32)) {
9044                         tg3_write_mem(tp, i, 0);
9045                         udelay(40);
9046                 }
9047         }
9048
9049         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9050
9051         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9052         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9053         if (!tg3_flag(tp, 5705_PLUS))
9054                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9055
9056         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9057                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9058                 /* reset to prevent losing 1st rx packet intermittently */
9059                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9060                 udelay(10);
9061         }
9062
9063         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9064                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9065                         MAC_MODE_FHDE_ENABLE;
9066         if (tg3_flag(tp, ENABLE_APE))
9067                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9068         if (!tg3_flag(tp, 5705_PLUS) &&
9069             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9070             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9071                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9072         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9073         udelay(40);
9074
9075         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9076          * If TG3_FLAG_IS_NIC is zero, we should read the
9077          * register to preserve the GPIO settings for LOMs. The GPIOs,
9078          * whether used as inputs or outputs, are set by boot code after
9079          * reset.
9080          */
9081         if (!tg3_flag(tp, IS_NIC)) {
9082                 u32 gpio_mask;
9083
9084                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9085                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9086                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9087
9088                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9089                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9090                                      GRC_LCLCTRL_GPIO_OUTPUT3;
9091
9092                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9093                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9094
9095                 tp->grc_local_ctrl &= ~gpio_mask;
9096                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9097
9098                 /* GPIO1 must be driven high for eeprom write protect */
9099                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9100                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9101                                                GRC_LCLCTRL_GPIO_OUTPUT1);
9102         }
9103         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9104         udelay(100);
9105
9106         if (tg3_flag(tp, USING_MSIX)) {
9107                 val = tr32(MSGINT_MODE);
9108                 val |= MSGINT_MODE_ENABLE;
9109                 if (tp->irq_cnt > 1)
9110                         val |= MSGINT_MODE_MULTIVEC_EN;
9111                 if (!tg3_flag(tp, 1SHOT_MSI))
9112                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9113                 tw32(MSGINT_MODE, val);
9114         }
9115
9116         if (!tg3_flag(tp, 5705_PLUS)) {
9117                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9118                 udelay(40);
9119         }
9120
9121         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9122                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9123                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9124                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9125                WDMAC_MODE_LNGREAD_ENAB);
9126
9127         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9128             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9129                 if (tg3_flag(tp, TSO_CAPABLE) &&
9130                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9131                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9132                         /* nothing */
9133                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9134                            !tg3_flag(tp, IS_5788)) {
9135                         val |= WDMAC_MODE_RX_ACCEL;
9136                 }
9137         }
9138
9139         /* Enable host coalescing bug fix */
9140         if (tg3_flag(tp, 5755_PLUS))
9141                 val |= WDMAC_MODE_STATUS_TAG_FIX;
9142
9143         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9144                 val |= WDMAC_MODE_BURST_ALL_DATA;
9145
9146         tw32_f(WDMAC_MODE, val);
9147         udelay(40);
9148
9149         if (tg3_flag(tp, PCIX_MODE)) {
9150                 u16 pcix_cmd;
9151
9152                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9153                                      &pcix_cmd);
9154                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9155                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9156                         pcix_cmd |= PCI_X_CMD_READ_2K;
9157                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9158                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9159                         pcix_cmd |= PCI_X_CMD_READ_2K;
9160                 }
9161                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9162                                       pcix_cmd);
9163         }
9164
9165         tw32_f(RDMAC_MODE, rdmac_mode);
9166         udelay(40);
9167
9168         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9169         if (!tg3_flag(tp, 5705_PLUS))
9170                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9171
9172         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9173                 tw32(SNDDATAC_MODE,
9174                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9175         else
9176                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9177
9178         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9179         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9180         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9181         if (tg3_flag(tp, LRG_PROD_RING_CAP))
9182                 val |= RCVDBDI_MODE_LRG_RING_SZ;
9183         tw32(RCVDBDI_MODE, val);
9184         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9185         if (tg3_flag(tp, HW_TSO_1) ||
9186             tg3_flag(tp, HW_TSO_2) ||
9187             tg3_flag(tp, HW_TSO_3))
9188                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9189         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9190         if (tg3_flag(tp, ENABLE_TSS))
9191                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9192         tw32(SNDBDI_MODE, val);
9193         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9194
9195         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9196                 err = tg3_load_5701_a0_firmware_fix(tp);
9197                 if (err)
9198                         return err;
9199         }
9200
9201         if (tg3_flag(tp, TSO_CAPABLE)) {
9202                 err = tg3_load_tso_firmware(tp);
9203                 if (err)
9204                         return err;
9205         }
9206
9207         tp->tx_mode = TX_MODE_ENABLE;
9208
9209         if (tg3_flag(tp, 5755_PLUS) ||
9210             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9211                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9212
9213         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9214                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9215                 tp->tx_mode &= ~val;
9216                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9217         }
9218
9219         tw32_f(MAC_TX_MODE, tp->tx_mode);
9220         udelay(100);
9221
9222         if (tg3_flag(tp, ENABLE_RSS)) {
9223                 tg3_rss_write_indir_tbl(tp);
9224
9225                 /* Setup the "secret" hash key. */
9226                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9227                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9228                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9229                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9230                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9231                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9232                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9233                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9234                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9235                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9236         }
9237
9238         tp->rx_mode = RX_MODE_ENABLE;
9239         if (tg3_flag(tp, 5755_PLUS))
9240                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9241
9242         if (tg3_flag(tp, ENABLE_RSS))
9243                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9244                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
9245                                RX_MODE_RSS_IPV6_HASH_EN |
9246                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
9247                                RX_MODE_RSS_IPV4_HASH_EN |
9248                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
9249
9250         tw32_f(MAC_RX_MODE, tp->rx_mode);
9251         udelay(10);
9252
9253         tw32(MAC_LED_CTRL, tp->led_ctrl);
9254
9255         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9256         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9257                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9258                 udelay(10);
9259         }
9260         tw32_f(MAC_RX_MODE, tp->rx_mode);
9261         udelay(10);
9262
9263         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9264                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9265                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9266                         /* Set drive transmission level to 1.2V  */
9267                         /* only if the signal pre-emphasis bit is not set  */
9268                         val = tr32(MAC_SERDES_CFG);
9269                         val &= 0xfffff000;
9270                         val |= 0x880;
9271                         tw32(MAC_SERDES_CFG, val);
9272                 }
9273                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9274                         tw32(MAC_SERDES_CFG, 0x616000);
9275         }
9276
9277         /* Prevent chip from dropping frames when flow control
9278          * is enabled.
9279          */
9280         if (tg3_flag(tp, 57765_CLASS))
9281                 val = 1;
9282         else
9283                 val = 2;
9284         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9285
9286         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9287             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9288                 /* Use hardware link auto-negotiation */
9289                 tg3_flag_set(tp, HW_AUTONEG);
9290         }
9291
9292         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9293             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9294                 u32 tmp;
9295
9296                 tmp = tr32(SERDES_RX_CTRL);
9297                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9298                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9299                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9300                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9301         }
9302
9303         if (!tg3_flag(tp, USE_PHYLIB)) {
9304                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9305                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9306
9307                 err = tg3_setup_phy(tp, 0);
9308                 if (err)
9309                         return err;
9310
9311                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9312                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9313                         u32 tmp;
9314
9315                         /* Clear CRC stats. */
9316                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9317                                 tg3_writephy(tp, MII_TG3_TEST1,
9318                                              tmp | MII_TG3_TEST1_CRC_EN);
9319                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9320                         }
9321                 }
9322         }
9323
9324         __tg3_set_rx_mode(tp->dev);
9325
9326         /* Initialize receive rules. */
9327         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9328         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9329         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9330         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9331
9332         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9333                 limit = 8;
9334         else
9335                 limit = 16;
9336         if (tg3_flag(tp, ENABLE_ASF))
9337                 limit -= 4;
9338         switch (limit) {
9339         case 16:
9340                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9341         case 15:
9342                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9343         case 14:
9344                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9345         case 13:
9346                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9347         case 12:
9348                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9349         case 11:
9350                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9351         case 10:
9352                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9353         case 9:
9354                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9355         case 8:
9356                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9357         case 7:
9358                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9359         case 6:
9360                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9361         case 5:
9362                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9363         case 4:
9364                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9365         case 3:
9366                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9367         case 2:
9368         case 1:
9369
9370         default:
9371                 break;
9372         }
9373
9374         if (tg3_flag(tp, ENABLE_APE))
9375                 /* Write our heartbeat update interval to APE. */
9376                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9377                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9378
9379         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9380
9381         return 0;
9382 }
9383
9384 /* Called at device open time to get the chip ready for
9385  * packet processing.  Invoked with tp->lock held.
9386  */
9387 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9388 {
9389         tg3_switch_clocks(tp);
9390
9391         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9392
9393         return tg3_reset_hw(tp, reset_phy);
9394 }
9395
9396 #define TG3_STAT_ADD32(PSTAT, REG) \
9397 do {    u32 __val = tr32(REG); \
9398         (PSTAT)->low += __val; \
9399         if ((PSTAT)->low < __val) \
9400                 (PSTAT)->high += 1; \
9401 } while (0)
9402
9403 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9404 {
9405         struct tg3_hw_stats *sp = tp->hw_stats;
9406
9407         if (!netif_carrier_ok(tp->dev))
9408                 return;
9409
9410         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9411         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9412         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9413         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9414         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9415         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9416         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9417         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9418         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9419         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9420         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9421         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9422         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9423
9424         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9425         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9426         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9427         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9428         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9429         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9430         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9431         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9432         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9433         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9434         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9435         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9436         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9437         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9438
9439         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9440         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9441             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9442             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9443                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9444         } else {
9445                 u32 val = tr32(HOSTCC_FLOW_ATTN);
9446                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9447                 if (val) {
9448                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9449                         sp->rx_discards.low += val;
9450                         if (sp->rx_discards.low < val)
9451                                 sp->rx_discards.high += 1;
9452                 }
9453                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9454         }
9455         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9456 }
9457
9458 static void tg3_chk_missed_msi(struct tg3 *tp)
9459 {
9460         u32 i;
9461
9462         for (i = 0; i < tp->irq_cnt; i++) {
9463                 struct tg3_napi *tnapi = &tp->napi[i];
9464
9465                 if (tg3_has_work(tnapi)) {
9466                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9467                             tnapi->last_tx_cons == tnapi->tx_cons) {
9468                                 if (tnapi->chk_msi_cnt < 1) {
9469                                         tnapi->chk_msi_cnt++;
9470                                         return;
9471                                 }
9472                                 tg3_msi(0, tnapi);
9473                         }
9474                 }
9475                 tnapi->chk_msi_cnt = 0;
9476                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9477                 tnapi->last_tx_cons = tnapi->tx_cons;
9478         }
9479 }
9480
9481 static void tg3_timer(unsigned long __opaque)
9482 {
9483         struct tg3 *tp = (struct tg3 *) __opaque;
9484
9485         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9486                 goto restart_timer;
9487
9488         spin_lock(&tp->lock);
9489
9490         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9491             tg3_flag(tp, 57765_CLASS))
9492                 tg3_chk_missed_msi(tp);
9493
9494         if (!tg3_flag(tp, TAGGED_STATUS)) {
9495                 /* All of this garbage is because when using non-tagged
9496                  * IRQ status the mailbox/status_block protocol the chip
9497                  * uses with the cpu is race prone.
9498                  */
9499                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9500                         tw32(GRC_LOCAL_CTRL,
9501                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9502                 } else {
9503                         tw32(HOSTCC_MODE, tp->coalesce_mode |
9504                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9505                 }
9506
9507                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9508                         spin_unlock(&tp->lock);
9509                         tg3_reset_task_schedule(tp);
9510                         goto restart_timer;
9511                 }
9512         }
9513
9514         /* This part only runs once per second. */
9515         if (!--tp->timer_counter) {
9516                 if (tg3_flag(tp, 5705_PLUS))
9517                         tg3_periodic_fetch_stats(tp);
9518
9519                 if (tp->setlpicnt && !--tp->setlpicnt)
9520                         tg3_phy_eee_enable(tp);
9521
9522                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9523                         u32 mac_stat;
9524                         int phy_event;
9525
9526                         mac_stat = tr32(MAC_STATUS);
9527
9528                         phy_event = 0;
9529                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9530                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9531                                         phy_event = 1;
9532                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9533                                 phy_event = 1;
9534
9535                         if (phy_event)
9536                                 tg3_setup_phy(tp, 0);
9537                 } else if (tg3_flag(tp, POLL_SERDES)) {
9538                         u32 mac_stat = tr32(MAC_STATUS);
9539                         int need_setup = 0;
9540
9541                         if (netif_carrier_ok(tp->dev) &&
9542                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9543                                 need_setup = 1;
9544                         }
9545                         if (!netif_carrier_ok(tp->dev) &&
9546                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
9547                                          MAC_STATUS_SIGNAL_DET))) {
9548                                 need_setup = 1;
9549                         }
9550                         if (need_setup) {
9551                                 if (!tp->serdes_counter) {
9552                                         tw32_f(MAC_MODE,
9553                                              (tp->mac_mode &
9554                                               ~MAC_MODE_PORT_MODE_MASK));
9555                                         udelay(40);
9556                                         tw32_f(MAC_MODE, tp->mac_mode);
9557                                         udelay(40);
9558                                 }
9559                                 tg3_setup_phy(tp, 0);
9560                         }
9561                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9562                            tg3_flag(tp, 5780_CLASS)) {
9563                         tg3_serdes_parallel_detect(tp);
9564                 }
9565
9566                 tp->timer_counter = tp->timer_multiplier;
9567         }
9568
9569         /* Heartbeat is only sent once every 2 seconds.
9570          *
9571          * The heartbeat is to tell the ASF firmware that the host
9572          * driver is still alive.  In the event that the OS crashes,
9573          * ASF needs to reset the hardware to free up the FIFO space
9574          * that may be filled with rx packets destined for the host.
9575          * If the FIFO is full, ASF will no longer function properly.
9576          *
9577          * Unintended resets have been reported on real time kernels
9578          * where the timer doesn't run on time.  Netpoll will also have
9579          * same problem.
9580          *
9581          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9582          * to check the ring condition when the heartbeat is expiring
9583          * before doing the reset.  This will prevent most unintended
9584          * resets.
9585          */
9586         if (!--tp->asf_counter) {
9587                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9588                         tg3_wait_for_event_ack(tp);
9589
9590                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9591                                       FWCMD_NICDRV_ALIVE3);
9592                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9593                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9594                                       TG3_FW_UPDATE_TIMEOUT_SEC);
9595
9596                         tg3_generate_fw_event(tp);
9597                 }
9598                 tp->asf_counter = tp->asf_multiplier;
9599         }
9600
9601         spin_unlock(&tp->lock);
9602
9603 restart_timer:
9604         tp->timer.expires = jiffies + tp->timer_offset;
9605         add_timer(&tp->timer);
9606 }
9607
9608 static void __devinit tg3_timer_init(struct tg3 *tp)
9609 {
9610         if (tg3_flag(tp, TAGGED_STATUS) &&
9611             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9612             !tg3_flag(tp, 57765_CLASS))
9613                 tp->timer_offset = HZ;
9614         else
9615                 tp->timer_offset = HZ / 10;
9616
9617         BUG_ON(tp->timer_offset > HZ);
9618
9619         tp->timer_multiplier = (HZ / tp->timer_offset);
9620         tp->asf_multiplier = (HZ / tp->timer_offset) *
9621                              TG3_FW_UPDATE_FREQ_SEC;
9622
9623         init_timer(&tp->timer);
9624         tp->timer.data = (unsigned long) tp;
9625         tp->timer.function = tg3_timer;
9626 }
9627
9628 static void tg3_timer_start(struct tg3 *tp)
9629 {
9630         tp->asf_counter   = tp->asf_multiplier;
9631         tp->timer_counter = tp->timer_multiplier;
9632
9633         tp->timer.expires = jiffies + tp->timer_offset;
9634         add_timer(&tp->timer);
9635 }
9636
9637 static void tg3_timer_stop(struct tg3 *tp)
9638 {
9639         del_timer_sync(&tp->timer);
9640 }
9641
9642 /* Restart hardware after configuration changes, self-test, etc.
9643  * Invoked with tp->lock held.
9644  */
9645 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9646         __releases(tp->lock)
9647         __acquires(tp->lock)
9648 {
9649         int err;
9650
9651         err = tg3_init_hw(tp, reset_phy);
9652         if (err) {
9653                 netdev_err(tp->dev,
9654                            "Failed to re-initialize device, aborting\n");
9655                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9656                 tg3_full_unlock(tp);
9657                 tg3_timer_stop(tp);
9658                 tp->irq_sync = 0;
9659                 tg3_napi_enable(tp);
9660                 dev_close(tp->dev);
9661                 tg3_full_lock(tp, 0);
9662         }
9663         return err;
9664 }
9665
9666 static void tg3_reset_task(struct work_struct *work)
9667 {
9668         struct tg3 *tp = container_of(work, struct tg3, reset_task);
9669         int err;
9670
9671         tg3_full_lock(tp, 0);
9672
9673         if (!netif_running(tp->dev)) {
9674                 tg3_flag_clear(tp, RESET_TASK_PENDING);
9675                 tg3_full_unlock(tp);
9676                 return;
9677         }
9678
9679         tg3_full_unlock(tp);
9680
9681         tg3_phy_stop(tp);
9682
9683         tg3_netif_stop(tp);
9684
9685         tg3_full_lock(tp, 1);
9686
9687         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
9688                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9689                 tp->write32_rx_mbox = tg3_write_flush_reg32;
9690                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
9691                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
9692         }
9693
9694         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
9695         err = tg3_init_hw(tp, 1);
9696         if (err)
9697                 goto out;
9698
9699         tg3_netif_start(tp);
9700
9701 out:
9702         tg3_full_unlock(tp);
9703
9704         if (!err)
9705                 tg3_phy_start(tp);
9706
9707         tg3_flag_clear(tp, RESET_TASK_PENDING);
9708 }
9709
9710 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9711 {
9712         irq_handler_t fn;
9713         unsigned long flags;
9714         char *name;
9715         struct tg3_napi *tnapi = &tp->napi[irq_num];
9716
9717         if (tp->irq_cnt == 1)
9718                 name = tp->dev->name;
9719         else {
9720                 name = &tnapi->irq_lbl[0];
9721                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9722                 name[IFNAMSIZ-1] = 0;
9723         }
9724
9725         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9726                 fn = tg3_msi;
9727                 if (tg3_flag(tp, 1SHOT_MSI))
9728                         fn = tg3_msi_1shot;
9729                 flags = 0;
9730         } else {
9731                 fn = tg3_interrupt;
9732                 if (tg3_flag(tp, TAGGED_STATUS))
9733                         fn = tg3_interrupt_tagged;
9734                 flags = IRQF_SHARED;
9735         }
9736
9737         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9738 }
9739
9740 static int tg3_test_interrupt(struct tg3 *tp)
9741 {
9742         struct tg3_napi *tnapi = &tp->napi[0];
9743         struct net_device *dev = tp->dev;
9744         int err, i, intr_ok = 0;
9745         u32 val;
9746
9747         if (!netif_running(dev))
9748                 return -ENODEV;
9749
9750         tg3_disable_ints(tp);
9751
9752         free_irq(tnapi->irq_vec, tnapi);
9753
9754         /*
9755          * Turn off MSI one shot mode.  Otherwise this test has no
9756          * observable way to know whether the interrupt was delivered.
9757          */
9758         if (tg3_flag(tp, 57765_PLUS)) {
9759                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9760                 tw32(MSGINT_MODE, val);
9761         }
9762
9763         err = request_irq(tnapi->irq_vec, tg3_test_isr,
9764                           IRQF_SHARED, dev->name, tnapi);
9765         if (err)
9766                 return err;
9767
9768         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9769         tg3_enable_ints(tp);
9770
9771         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9772                tnapi->coal_now);
9773
9774         for (i = 0; i < 5; i++) {
9775                 u32 int_mbox, misc_host_ctrl;
9776
9777                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9778                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9779
9780                 if ((int_mbox != 0) ||
9781                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9782                         intr_ok = 1;
9783                         break;
9784                 }
9785
9786                 if (tg3_flag(tp, 57765_PLUS) &&
9787                     tnapi->hw_status->status_tag != tnapi->last_tag)
9788                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9789
9790                 msleep(10);
9791         }
9792
9793         tg3_disable_ints(tp);
9794
9795         free_irq(tnapi->irq_vec, tnapi);
9796
9797         err = tg3_request_irq(tp, 0);
9798
9799         if (err)
9800                 return err;
9801
9802         if (intr_ok) {
9803                 /* Reenable MSI one shot mode. */
9804                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9805                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9806                         tw32(MSGINT_MODE, val);
9807                 }
9808                 return 0;
9809         }
9810
9811         return -EIO;
9812 }
9813
9814 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9815  * successfully restored
9816  */
9817 static int tg3_test_msi(struct tg3 *tp)
9818 {
9819         int err;
9820         u16 pci_cmd;
9821
9822         if (!tg3_flag(tp, USING_MSI))
9823                 return 0;
9824
9825         /* Turn off SERR reporting in case MSI terminates with Master
9826          * Abort.
9827          */
9828         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9829         pci_write_config_word(tp->pdev, PCI_COMMAND,
9830                               pci_cmd & ~PCI_COMMAND_SERR);
9831
9832         err = tg3_test_interrupt(tp);
9833
9834         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9835
9836         if (!err)
9837                 return 0;
9838
9839         /* other failures */
9840         if (err != -EIO)
9841                 return err;
9842
9843         /* MSI test failed, go back to INTx mode */
9844         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9845                     "to INTx mode. Please report this failure to the PCI "
9846                     "maintainer and include system chipset information\n");
9847
9848         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9849
9850         pci_disable_msi(tp->pdev);
9851
9852         tg3_flag_clear(tp, USING_MSI);
9853         tp->napi[0].irq_vec = tp->pdev->irq;
9854
9855         err = tg3_request_irq(tp, 0);
9856         if (err)
9857                 return err;
9858
9859         /* Need to reset the chip because the MSI cycle may have terminated
9860          * with Master Abort.
9861          */
9862         tg3_full_lock(tp, 1);
9863
9864         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9865         err = tg3_init_hw(tp, 1);
9866
9867         tg3_full_unlock(tp);
9868
9869         if (err)
9870                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9871
9872         return err;
9873 }
9874
9875 static int tg3_request_firmware(struct tg3 *tp)
9876 {
9877         const __be32 *fw_data;
9878
9879         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9880                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9881                            tp->fw_needed);
9882                 return -ENOENT;
9883         }
9884
9885         fw_data = (void *)tp->fw->data;
9886
9887         /* Firmware blob starts with version numbers, followed by
9888          * start address and _full_ length including BSS sections
9889          * (which must be longer than the actual data, of course
9890          */
9891
9892         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
9893         if (tp->fw_len < (tp->fw->size - 12)) {
9894                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9895                            tp->fw_len, tp->fw_needed);
9896                 release_firmware(tp->fw);
9897                 tp->fw = NULL;
9898                 return -EINVAL;
9899         }
9900
9901         /* We no longer need firmware; we have it. */
9902         tp->fw_needed = NULL;
9903         return 0;
9904 }
9905
9906 static bool tg3_enable_msix(struct tg3 *tp)
9907 {
9908         int i, rc;
9909         struct msix_entry msix_ent[tp->irq_max];
9910
9911         tp->irq_cnt = num_online_cpus();
9912         if (tp->irq_cnt > 1) {
9913                 /* We want as many rx rings enabled as there are cpus.
9914                  * In multiqueue MSI-X mode, the first MSI-X vector
9915                  * only deals with link interrupts, etc, so we add
9916                  * one to the number of vectors we are requesting.
9917                  */
9918                 tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
9919         }
9920
9921         for (i = 0; i < tp->irq_max; i++) {
9922                 msix_ent[i].entry  = i;
9923                 msix_ent[i].vector = 0;
9924         }
9925
9926         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9927         if (rc < 0) {
9928                 return false;
9929         } else if (rc != 0) {
9930                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9931                         return false;
9932                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9933                               tp->irq_cnt, rc);
9934                 tp->irq_cnt = rc;
9935         }
9936
9937         for (i = 0; i < tp->irq_max; i++)
9938                 tp->napi[i].irq_vec = msix_ent[i].vector;
9939
9940         netif_set_real_num_tx_queues(tp->dev, 1);
9941         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9942         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9943                 pci_disable_msix(tp->pdev);
9944                 return false;
9945         }
9946
9947         if (tp->irq_cnt > 1) {
9948                 tg3_flag_set(tp, ENABLE_RSS);
9949
9950                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9951                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9952                         tg3_flag_set(tp, ENABLE_TSS);
9953                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9954                 }
9955         }
9956
9957         return true;
9958 }
9959
9960 static void tg3_ints_init(struct tg3 *tp)
9961 {
9962         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9963             !tg3_flag(tp, TAGGED_STATUS)) {
9964                 /* All MSI supporting chips should support tagged
9965                  * status.  Assert that this is the case.
9966                  */
9967                 netdev_warn(tp->dev,
9968                             "MSI without TAGGED_STATUS? Not using MSI\n");
9969                 goto defcfg;
9970         }
9971
9972         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9973                 tg3_flag_set(tp, USING_MSIX);
9974         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9975                 tg3_flag_set(tp, USING_MSI);
9976
9977         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9978                 u32 msi_mode = tr32(MSGINT_MODE);
9979                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9980                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9981                 if (!tg3_flag(tp, 1SHOT_MSI))
9982                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9983                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9984         }
9985 defcfg:
9986         if (!tg3_flag(tp, USING_MSIX)) {
9987                 tp->irq_cnt = 1;
9988                 tp->napi[0].irq_vec = tp->pdev->irq;
9989                 netif_set_real_num_tx_queues(tp->dev, 1);
9990                 netif_set_real_num_rx_queues(tp->dev, 1);
9991         }
9992 }
9993
9994 static void tg3_ints_fini(struct tg3 *tp)
9995 {
9996         if (tg3_flag(tp, USING_MSIX))
9997                 pci_disable_msix(tp->pdev);
9998         else if (tg3_flag(tp, USING_MSI))
9999                 pci_disable_msi(tp->pdev);
10000         tg3_flag_clear(tp, USING_MSI);
10001         tg3_flag_clear(tp, USING_MSIX);
10002         tg3_flag_clear(tp, ENABLE_RSS);
10003         tg3_flag_clear(tp, ENABLE_TSS);
10004 }
10005
10006 static int tg3_open(struct net_device *dev)
10007 {
10008         struct tg3 *tp = netdev_priv(dev);
10009         int i, err;
10010
10011         if (tp->fw_needed) {
10012                 err = tg3_request_firmware(tp);
10013                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
10014                         if (err)
10015                                 return err;
10016                 } else if (err) {
10017                         netdev_warn(tp->dev, "TSO capability disabled\n");
10018                         tg3_flag_clear(tp, TSO_CAPABLE);
10019                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10020                         netdev_notice(tp->dev, "TSO capability restored\n");
10021                         tg3_flag_set(tp, TSO_CAPABLE);
10022                 }
10023         }
10024
10025         netif_carrier_off(tp->dev);
10026
10027         err = tg3_power_up(tp);
10028         if (err)
10029                 return err;
10030
10031         tg3_full_lock(tp, 0);
10032
10033         tg3_disable_ints(tp);
10034         tg3_flag_clear(tp, INIT_COMPLETE);
10035
10036         tg3_full_unlock(tp);
10037
10038         /*
10039          * Setup interrupts first so we know how
10040          * many NAPI resources to allocate
10041          */
10042         tg3_ints_init(tp);
10043
10044         tg3_rss_check_indir_tbl(tp);
10045
10046         /* The placement of this call is tied
10047          * to the setup and use of Host TX descriptors.
10048          */
10049         err = tg3_alloc_consistent(tp);
10050         if (err)
10051                 goto err_out1;
10052
10053         tg3_napi_init(tp);
10054
10055         tg3_napi_enable(tp);
10056
10057         for (i = 0; i < tp->irq_cnt; i++) {
10058                 struct tg3_napi *tnapi = &tp->napi[i];
10059                 err = tg3_request_irq(tp, i);
10060                 if (err) {
10061                         for (i--; i >= 0; i--) {
10062                                 tnapi = &tp->napi[i];
10063                                 free_irq(tnapi->irq_vec, tnapi);
10064                         }
10065                         goto err_out2;
10066                 }
10067         }
10068
10069         tg3_full_lock(tp, 0);
10070
10071         err = tg3_init_hw(tp, 1);
10072         if (err) {
10073                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10074                 tg3_free_rings(tp);
10075         }
10076
10077         tg3_full_unlock(tp);
10078
10079         if (err)
10080                 goto err_out3;
10081
10082         if (tg3_flag(tp, USING_MSI)) {
10083                 err = tg3_test_msi(tp);
10084
10085                 if (err) {
10086                         tg3_full_lock(tp, 0);
10087                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10088                         tg3_free_rings(tp);
10089                         tg3_full_unlock(tp);
10090
10091                         goto err_out2;
10092                 }
10093
10094                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10095                         u32 val = tr32(PCIE_TRANSACTION_CFG);
10096
10097                         tw32(PCIE_TRANSACTION_CFG,
10098                              val | PCIE_TRANS_CFG_1SHOT_MSI);
10099                 }
10100         }
10101
10102         tg3_phy_start(tp);
10103
10104         tg3_full_lock(tp, 0);
10105
10106         tg3_timer_start(tp);
10107         tg3_flag_set(tp, INIT_COMPLETE);
10108         tg3_enable_ints(tp);
10109
10110         tg3_full_unlock(tp);
10111
10112         netif_tx_start_all_queues(dev);
10113
10114         /*
10115          * Reset loopback feature if it was turned on while the device was down
10116          * make sure that it's installed properly now.
10117          */
10118         if (dev->features & NETIF_F_LOOPBACK)
10119                 tg3_set_loopback(dev, dev->features);
10120
10121         return 0;
10122
10123 err_out3:
10124         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10125                 struct tg3_napi *tnapi = &tp->napi[i];
10126                 free_irq(tnapi->irq_vec, tnapi);
10127         }
10128
10129 err_out2:
10130         tg3_napi_disable(tp);
10131         tg3_napi_fini(tp);
10132         tg3_free_consistent(tp);
10133
10134 err_out1:
10135         tg3_ints_fini(tp);
10136         tg3_frob_aux_power(tp, false);
10137         pci_set_power_state(tp->pdev, PCI_D3hot);
10138         return err;
10139 }
10140
10141 static int tg3_close(struct net_device *dev)
10142 {
10143         int i;
10144         struct tg3 *tp = netdev_priv(dev);
10145
10146         tg3_napi_disable(tp);
10147         tg3_reset_task_cancel(tp);
10148
10149         netif_tx_stop_all_queues(dev);
10150
10151         tg3_timer_stop(tp);
10152
10153         tg3_phy_stop(tp);
10154
10155         tg3_full_lock(tp, 1);
10156
10157         tg3_disable_ints(tp);
10158
10159         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10160         tg3_free_rings(tp);
10161         tg3_flag_clear(tp, INIT_COMPLETE);
10162
10163         tg3_full_unlock(tp);
10164
10165         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10166                 struct tg3_napi *tnapi = &tp->napi[i];
10167                 free_irq(tnapi->irq_vec, tnapi);
10168         }
10169
10170         tg3_ints_fini(tp);
10171
10172         /* Clear stats across close / open calls */
10173         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10174         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10175
10176         tg3_napi_fini(tp);
10177
10178         tg3_free_consistent(tp);
10179
10180         tg3_power_down(tp);
10181
10182         netif_carrier_off(tp->dev);
10183
10184         return 0;
10185 }
10186
10187 static inline u64 get_stat64(tg3_stat64_t *val)
10188 {
10189        return ((u64)val->high << 32) | ((u64)val->low);
10190 }
10191
10192 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10193 {
10194         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10195
10196         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10197             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10198              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10199                 u32 val;
10200
10201                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10202                         tg3_writephy(tp, MII_TG3_TEST1,
10203                                      val | MII_TG3_TEST1_CRC_EN);
10204                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10205                 } else
10206                         val = 0;
10207
10208                 tp->phy_crc_errors += val;
10209
10210                 return tp->phy_crc_errors;
10211         }
10212
10213         return get_stat64(&hw_stats->rx_fcs_errors);
10214 }
10215
10216 #define ESTAT_ADD(member) \
10217         estats->member =        old_estats->member + \
10218                                 get_stat64(&hw_stats->member)
10219
10220 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10221 {
10222         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10223         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10224
10225         ESTAT_ADD(rx_octets);
10226         ESTAT_ADD(rx_fragments);
10227         ESTAT_ADD(rx_ucast_packets);
10228         ESTAT_ADD(rx_mcast_packets);
10229         ESTAT_ADD(rx_bcast_packets);
10230         ESTAT_ADD(rx_fcs_errors);
10231         ESTAT_ADD(rx_align_errors);
10232         ESTAT_ADD(rx_xon_pause_rcvd);
10233         ESTAT_ADD(rx_xoff_pause_rcvd);
10234         ESTAT_ADD(rx_mac_ctrl_rcvd);
10235         ESTAT_ADD(rx_xoff_entered);
10236         ESTAT_ADD(rx_frame_too_long_errors);
10237         ESTAT_ADD(rx_jabbers);
10238         ESTAT_ADD(rx_undersize_packets);
10239         ESTAT_ADD(rx_in_length_errors);
10240         ESTAT_ADD(rx_out_length_errors);
10241         ESTAT_ADD(rx_64_or_less_octet_packets);
10242         ESTAT_ADD(rx_65_to_127_octet_packets);
10243         ESTAT_ADD(rx_128_to_255_octet_packets);
10244         ESTAT_ADD(rx_256_to_511_octet_packets);
10245         ESTAT_ADD(rx_512_to_1023_octet_packets);
10246         ESTAT_ADD(rx_1024_to_1522_octet_packets);
10247         ESTAT_ADD(rx_1523_to_2047_octet_packets);
10248         ESTAT_ADD(rx_2048_to_4095_octet_packets);
10249         ESTAT_ADD(rx_4096_to_8191_octet_packets);
10250         ESTAT_ADD(rx_8192_to_9022_octet_packets);
10251
10252         ESTAT_ADD(tx_octets);
10253         ESTAT_ADD(tx_collisions);
10254         ESTAT_ADD(tx_xon_sent);
10255         ESTAT_ADD(tx_xoff_sent);
10256         ESTAT_ADD(tx_flow_control);
10257         ESTAT_ADD(tx_mac_errors);
10258         ESTAT_ADD(tx_single_collisions);
10259         ESTAT_ADD(tx_mult_collisions);
10260         ESTAT_ADD(tx_deferred);
10261         ESTAT_ADD(tx_excessive_collisions);
10262         ESTAT_ADD(tx_late_collisions);
10263         ESTAT_ADD(tx_collide_2times);
10264         ESTAT_ADD(tx_collide_3times);
10265         ESTAT_ADD(tx_collide_4times);
10266         ESTAT_ADD(tx_collide_5times);
10267         ESTAT_ADD(tx_collide_6times);
10268         ESTAT_ADD(tx_collide_7times);
10269         ESTAT_ADD(tx_collide_8times);
10270         ESTAT_ADD(tx_collide_9times);
10271         ESTAT_ADD(tx_collide_10times);
10272         ESTAT_ADD(tx_collide_11times);
10273         ESTAT_ADD(tx_collide_12times);
10274         ESTAT_ADD(tx_collide_13times);
10275         ESTAT_ADD(tx_collide_14times);
10276         ESTAT_ADD(tx_collide_15times);
10277         ESTAT_ADD(tx_ucast_packets);
10278         ESTAT_ADD(tx_mcast_packets);
10279         ESTAT_ADD(tx_bcast_packets);
10280         ESTAT_ADD(tx_carrier_sense_errors);
10281         ESTAT_ADD(tx_discards);
10282         ESTAT_ADD(tx_errors);
10283
10284         ESTAT_ADD(dma_writeq_full);
10285         ESTAT_ADD(dma_write_prioq_full);
10286         ESTAT_ADD(rxbds_empty);
10287         ESTAT_ADD(rx_discards);
10288         ESTAT_ADD(rx_errors);
10289         ESTAT_ADD(rx_threshold_hit);
10290
10291         ESTAT_ADD(dma_readq_full);
10292         ESTAT_ADD(dma_read_prioq_full);
10293         ESTAT_ADD(tx_comp_queue_full);
10294
10295         ESTAT_ADD(ring_set_send_prod_index);
10296         ESTAT_ADD(ring_status_update);
10297         ESTAT_ADD(nic_irqs);
10298         ESTAT_ADD(nic_avoided_irqs);
10299         ESTAT_ADD(nic_tx_threshold_hit);
10300
10301         ESTAT_ADD(mbuf_lwm_thresh_hit);
10302 }
10303
10304 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
10305 {
10306         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10307         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10308
10309         stats->rx_packets = old_stats->rx_packets +
10310                 get_stat64(&hw_stats->rx_ucast_packets) +
10311                 get_stat64(&hw_stats->rx_mcast_packets) +
10312                 get_stat64(&hw_stats->rx_bcast_packets);
10313
10314         stats->tx_packets = old_stats->tx_packets +
10315                 get_stat64(&hw_stats->tx_ucast_packets) +
10316                 get_stat64(&hw_stats->tx_mcast_packets) +
10317                 get_stat64(&hw_stats->tx_bcast_packets);
10318
10319         stats->rx_bytes = old_stats->rx_bytes +
10320                 get_stat64(&hw_stats->rx_octets);
10321         stats->tx_bytes = old_stats->tx_bytes +
10322                 get_stat64(&hw_stats->tx_octets);
10323
10324         stats->rx_errors = old_stats->rx_errors +
10325                 get_stat64(&hw_stats->rx_errors);
10326         stats->tx_errors = old_stats->tx_errors +
10327                 get_stat64(&hw_stats->tx_errors) +
10328                 get_stat64(&hw_stats->tx_mac_errors) +
10329                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10330                 get_stat64(&hw_stats->tx_discards);
10331
10332         stats->multicast = old_stats->multicast +
10333                 get_stat64(&hw_stats->rx_mcast_packets);
10334         stats->collisions = old_stats->collisions +
10335                 get_stat64(&hw_stats->tx_collisions);
10336
10337         stats->rx_length_errors = old_stats->rx_length_errors +
10338                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10339                 get_stat64(&hw_stats->rx_undersize_packets);
10340
10341         stats->rx_over_errors = old_stats->rx_over_errors +
10342                 get_stat64(&hw_stats->rxbds_empty);
10343         stats->rx_frame_errors = old_stats->rx_frame_errors +
10344                 get_stat64(&hw_stats->rx_align_errors);
10345         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10346                 get_stat64(&hw_stats->tx_discards);
10347         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10348                 get_stat64(&hw_stats->tx_carrier_sense_errors);
10349
10350         stats->rx_crc_errors = old_stats->rx_crc_errors +
10351                 tg3_calc_crc_errors(tp);
10352
10353         stats->rx_missed_errors = old_stats->rx_missed_errors +
10354                 get_stat64(&hw_stats->rx_discards);
10355
10356         stats->rx_dropped = tp->rx_dropped;
10357         stats->tx_dropped = tp->tx_dropped;
10358 }
10359
10360 static int tg3_get_regs_len(struct net_device *dev)
10361 {
10362         return TG3_REG_BLK_SIZE;
10363 }
10364
10365 static void tg3_get_regs(struct net_device *dev,
10366                 struct ethtool_regs *regs, void *_p)
10367 {
10368         struct tg3 *tp = netdev_priv(dev);
10369
10370         regs->version = 0;
10371
10372         memset(_p, 0, TG3_REG_BLK_SIZE);
10373
10374         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10375                 return;
10376
10377         tg3_full_lock(tp, 0);
10378
10379         tg3_dump_legacy_regs(tp, (u32 *)_p);
10380
10381         tg3_full_unlock(tp);
10382 }
10383
10384 static int tg3_get_eeprom_len(struct net_device *dev)
10385 {
10386         struct tg3 *tp = netdev_priv(dev);
10387
10388         return tp->nvram_size;
10389 }
10390
10391 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10392 {
10393         struct tg3 *tp = netdev_priv(dev);
10394         int ret;
10395         u8  *pd;
10396         u32 i, offset, len, b_offset, b_count;
10397         __be32 val;
10398
10399         if (tg3_flag(tp, NO_NVRAM))
10400                 return -EINVAL;
10401
10402         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10403                 return -EAGAIN;
10404
10405         offset = eeprom->offset;
10406         len = eeprom->len;
10407         eeprom->len = 0;
10408
10409         eeprom->magic = TG3_EEPROM_MAGIC;
10410
10411         if (offset & 3) {
10412                 /* adjustments to start on required 4 byte boundary */
10413                 b_offset = offset & 3;
10414                 b_count = 4 - b_offset;
10415                 if (b_count > len) {
10416                         /* i.e. offset=1 len=2 */
10417                         b_count = len;
10418                 }
10419                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10420                 if (ret)
10421                         return ret;
10422                 memcpy(data, ((char *)&val) + b_offset, b_count);
10423                 len -= b_count;
10424                 offset += b_count;
10425                 eeprom->len += b_count;
10426         }
10427
10428         /* read bytes up to the last 4 byte boundary */
10429         pd = &data[eeprom->len];
10430         for (i = 0; i < (len - (len & 3)); i += 4) {
10431                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10432                 if (ret) {
10433                         eeprom->len += i;
10434                         return ret;
10435                 }
10436                 memcpy(pd + i, &val, 4);
10437         }
10438         eeprom->len += i;
10439
10440         if (len & 3) {
10441                 /* read last bytes not ending on 4 byte boundary */
10442                 pd = &data[eeprom->len];
10443                 b_count = len & 3;
10444                 b_offset = offset + len - b_count;
10445                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10446                 if (ret)
10447                         return ret;
10448                 memcpy(pd, &val, b_count);
10449                 eeprom->len += b_count;
10450         }
10451         return 0;
10452 }
10453
10454 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10455 {
10456         struct tg3 *tp = netdev_priv(dev);
10457         int ret;
10458         u32 offset, len, b_offset, odd_len;
10459         u8 *buf;
10460         __be32 start, end;
10461
10462         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10463                 return -EAGAIN;
10464
10465         if (tg3_flag(tp, NO_NVRAM) ||
10466             eeprom->magic != TG3_EEPROM_MAGIC)
10467                 return -EINVAL;
10468
10469         offset = eeprom->offset;
10470         len = eeprom->len;
10471
10472         if ((b_offset = (offset & 3))) {
10473                 /* adjustments to start on required 4 byte boundary */
10474                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10475                 if (ret)
10476                         return ret;
10477                 len += b_offset;
10478                 offset &= ~3;
10479                 if (len < 4)
10480                         len = 4;
10481         }
10482
10483         odd_len = 0;
10484         if (len & 3) {
10485                 /* adjustments to end on required 4 byte boundary */
10486                 odd_len = 1;
10487                 len = (len + 3) & ~3;
10488                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10489                 if (ret)
10490                         return ret;
10491         }
10492
10493         buf = data;
10494         if (b_offset || odd_len) {
10495                 buf = kmalloc(len, GFP_KERNEL);
10496                 if (!buf)
10497                         return -ENOMEM;
10498                 if (b_offset)
10499                         memcpy(buf, &start, 4);
10500                 if (odd_len)
10501                         memcpy(buf+len-4, &end, 4);
10502                 memcpy(buf + b_offset, data, eeprom->len);
10503         }
10504
10505         ret = tg3_nvram_write_block(tp, offset, len, buf);
10506
10507         if (buf != data)
10508                 kfree(buf);
10509
10510         return ret;
10511 }
10512
10513 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10514 {
10515         struct tg3 *tp = netdev_priv(dev);
10516
10517         if (tg3_flag(tp, USE_PHYLIB)) {
10518                 struct phy_device *phydev;
10519                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10520                         return -EAGAIN;
10521                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10522                 return phy_ethtool_gset(phydev, cmd);
10523         }
10524
10525         cmd->supported = (SUPPORTED_Autoneg);
10526
10527         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10528                 cmd->supported |= (SUPPORTED_1000baseT_Half |
10529                                    SUPPORTED_1000baseT_Full);
10530
10531         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10532                 cmd->supported |= (SUPPORTED_100baseT_Half |
10533                                   SUPPORTED_100baseT_Full |
10534                                   SUPPORTED_10baseT_Half |
10535                                   SUPPORTED_10baseT_Full |
10536                                   SUPPORTED_TP);
10537                 cmd->port = PORT_TP;
10538         } else {
10539                 cmd->supported |= SUPPORTED_FIBRE;
10540                 cmd->port = PORT_FIBRE;
10541         }
10542
10543         cmd->advertising = tp->link_config.advertising;
10544         if (tg3_flag(tp, PAUSE_AUTONEG)) {
10545                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10546                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10547                                 cmd->advertising |= ADVERTISED_Pause;
10548                         } else {
10549                                 cmd->advertising |= ADVERTISED_Pause |
10550                                                     ADVERTISED_Asym_Pause;
10551                         }
10552                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10553                         cmd->advertising |= ADVERTISED_Asym_Pause;
10554                 }
10555         }
10556         if (netif_running(dev) && netif_carrier_ok(dev)) {
10557                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10558                 cmd->duplex = tp->link_config.active_duplex;
10559                 cmd->lp_advertising = tp->link_config.rmt_adv;
10560                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10561                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10562                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10563                         else
10564                                 cmd->eth_tp_mdix = ETH_TP_MDI;
10565                 }
10566         } else {
10567                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
10568                 cmd->duplex = DUPLEX_UNKNOWN;
10569                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10570         }
10571         cmd->phy_address = tp->phy_addr;
10572         cmd->transceiver = XCVR_INTERNAL;
10573         cmd->autoneg = tp->link_config.autoneg;
10574         cmd->maxtxpkt = 0;
10575         cmd->maxrxpkt = 0;
10576         return 0;
10577 }
10578
10579 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10580 {
10581         struct tg3 *tp = netdev_priv(dev);
10582         u32 speed = ethtool_cmd_speed(cmd);
10583
10584         if (tg3_flag(tp, USE_PHYLIB)) {
10585                 struct phy_device *phydev;
10586                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10587                         return -EAGAIN;
10588                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10589                 return phy_ethtool_sset(phydev, cmd);
10590         }
10591
10592         if (cmd->autoneg != AUTONEG_ENABLE &&
10593             cmd->autoneg != AUTONEG_DISABLE)
10594                 return -EINVAL;
10595
10596         if (cmd->autoneg == AUTONEG_DISABLE &&
10597             cmd->duplex != DUPLEX_FULL &&
10598             cmd->duplex != DUPLEX_HALF)
10599                 return -EINVAL;
10600
10601         if (cmd->autoneg == AUTONEG_ENABLE) {
10602                 u32 mask = ADVERTISED_Autoneg |
10603                            ADVERTISED_Pause |
10604                            ADVERTISED_Asym_Pause;
10605
10606                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10607                         mask |= ADVERTISED_1000baseT_Half |
10608                                 ADVERTISED_1000baseT_Full;
10609
10610                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10611                         mask |= ADVERTISED_100baseT_Half |
10612                                 ADVERTISED_100baseT_Full |
10613                                 ADVERTISED_10baseT_Half |
10614                                 ADVERTISED_10baseT_Full |
10615                                 ADVERTISED_TP;
10616                 else
10617                         mask |= ADVERTISED_FIBRE;
10618
10619                 if (cmd->advertising & ~mask)
10620                         return -EINVAL;
10621
10622                 mask &= (ADVERTISED_1000baseT_Half |
10623                          ADVERTISED_1000baseT_Full |
10624                          ADVERTISED_100baseT_Half |
10625                          ADVERTISED_100baseT_Full |
10626                          ADVERTISED_10baseT_Half |
10627                          ADVERTISED_10baseT_Full);
10628
10629                 cmd->advertising &= mask;
10630         } else {
10631                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10632                         if (speed != SPEED_1000)
10633                                 return -EINVAL;
10634
10635                         if (cmd->duplex != DUPLEX_FULL)
10636                                 return -EINVAL;
10637                 } else {
10638                         if (speed != SPEED_100 &&
10639                             speed != SPEED_10)
10640                                 return -EINVAL;
10641                 }
10642         }
10643
10644         tg3_full_lock(tp, 0);
10645
10646         tp->link_config.autoneg = cmd->autoneg;
10647         if (cmd->autoneg == AUTONEG_ENABLE) {
10648                 tp->link_config.advertising = (cmd->advertising |
10649                                               ADVERTISED_Autoneg);
10650                 tp->link_config.speed = SPEED_UNKNOWN;
10651                 tp->link_config.duplex = DUPLEX_UNKNOWN;
10652         } else {
10653                 tp->link_config.advertising = 0;
10654                 tp->link_config.speed = speed;
10655                 tp->link_config.duplex = cmd->duplex;
10656         }
10657
10658         if (netif_running(dev))
10659                 tg3_setup_phy(tp, 1);
10660
10661         tg3_full_unlock(tp);
10662
10663         return 0;
10664 }
10665
10666 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10667 {
10668         struct tg3 *tp = netdev_priv(dev);
10669
10670         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
10671         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
10672         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
10673         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
10674 }
10675
10676 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10677 {
10678         struct tg3 *tp = netdev_priv(dev);
10679
10680         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10681                 wol->supported = WAKE_MAGIC;
10682         else
10683                 wol->supported = 0;
10684         wol->wolopts = 0;
10685         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10686                 wol->wolopts = WAKE_MAGIC;
10687         memset(&wol->sopass, 0, sizeof(wol->sopass));
10688 }
10689
10690 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10691 {
10692         struct tg3 *tp = netdev_priv(dev);
10693         struct device *dp = &tp->pdev->dev;
10694
10695         if (wol->wolopts & ~WAKE_MAGIC)
10696                 return -EINVAL;
10697         if ((wol->wolopts & WAKE_MAGIC) &&
10698             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10699                 return -EINVAL;
10700
10701         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10702
10703         spin_lock_bh(&tp->lock);
10704         if (device_may_wakeup(dp))
10705                 tg3_flag_set(tp, WOL_ENABLE);
10706         else
10707                 tg3_flag_clear(tp, WOL_ENABLE);
10708         spin_unlock_bh(&tp->lock);
10709
10710         return 0;
10711 }
10712
10713 static u32 tg3_get_msglevel(struct net_device *dev)
10714 {
10715         struct tg3 *tp = netdev_priv(dev);
10716         return tp->msg_enable;
10717 }
10718
10719 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10720 {
10721         struct tg3 *tp = netdev_priv(dev);
10722         tp->msg_enable = value;
10723 }
10724
10725 static int tg3_nway_reset(struct net_device *dev)
10726 {
10727         struct tg3 *tp = netdev_priv(dev);
10728         int r;
10729
10730         if (!netif_running(dev))
10731                 return -EAGAIN;
10732
10733         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10734                 return -EINVAL;
10735
10736         if (tg3_flag(tp, USE_PHYLIB)) {
10737                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10738                         return -EAGAIN;
10739                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10740         } else {
10741                 u32 bmcr;
10742
10743                 spin_lock_bh(&tp->lock);
10744                 r = -EINVAL;
10745                 tg3_readphy(tp, MII_BMCR, &bmcr);
10746                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10747                     ((bmcr & BMCR_ANENABLE) ||
10748                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10749                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10750                                                    BMCR_ANENABLE);
10751                         r = 0;
10752                 }
10753                 spin_unlock_bh(&tp->lock);
10754         }
10755
10756         return r;
10757 }
10758
10759 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10760 {
10761         struct tg3 *tp = netdev_priv(dev);
10762
10763         ering->rx_max_pending = tp->rx_std_ring_mask;
10764         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10765                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10766         else
10767                 ering->rx_jumbo_max_pending = 0;
10768
10769         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10770
10771         ering->rx_pending = tp->rx_pending;
10772         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10773                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10774         else
10775                 ering->rx_jumbo_pending = 0;
10776
10777         ering->tx_pending = tp->napi[0].tx_pending;
10778 }
10779
10780 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10781 {
10782         struct tg3 *tp = netdev_priv(dev);
10783         int i, irq_sync = 0, err = 0;
10784
10785         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10786             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10787             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10788             (ering->tx_pending <= MAX_SKB_FRAGS) ||
10789             (tg3_flag(tp, TSO_BUG) &&
10790              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10791                 return -EINVAL;
10792
10793         if (netif_running(dev)) {
10794                 tg3_phy_stop(tp);
10795                 tg3_netif_stop(tp);
10796                 irq_sync = 1;
10797         }
10798
10799         tg3_full_lock(tp, irq_sync);
10800
10801         tp->rx_pending = ering->rx_pending;
10802
10803         if (tg3_flag(tp, MAX_RXPEND_64) &&
10804             tp->rx_pending > 63)
10805                 tp->rx_pending = 63;
10806         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10807
10808         for (i = 0; i < tp->irq_max; i++)
10809                 tp->napi[i].tx_pending = ering->tx_pending;
10810
10811         if (netif_running(dev)) {
10812                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10813                 err = tg3_restart_hw(tp, 1);
10814                 if (!err)
10815                         tg3_netif_start(tp);
10816         }
10817
10818         tg3_full_unlock(tp);
10819
10820         if (irq_sync && !err)
10821                 tg3_phy_start(tp);
10822
10823         return err;
10824 }
10825
10826 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10827 {
10828         struct tg3 *tp = netdev_priv(dev);
10829
10830         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10831
10832         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
10833                 epause->rx_pause = 1;
10834         else
10835                 epause->rx_pause = 0;
10836
10837         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
10838                 epause->tx_pause = 1;
10839         else
10840                 epause->tx_pause = 0;
10841 }
10842
10843 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10844 {
10845         struct tg3 *tp = netdev_priv(dev);
10846         int err = 0;
10847
10848         if (tg3_flag(tp, USE_PHYLIB)) {
10849                 u32 newadv;
10850                 struct phy_device *phydev;
10851
10852                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10853
10854                 if (!(phydev->supported & SUPPORTED_Pause) ||
10855                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10856                      (epause->rx_pause != epause->tx_pause)))
10857                         return -EINVAL;
10858
10859                 tp->link_config.flowctrl = 0;
10860                 if (epause->rx_pause) {
10861                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10862
10863                         if (epause->tx_pause) {
10864                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10865                                 newadv = ADVERTISED_Pause;
10866                         } else
10867                                 newadv = ADVERTISED_Pause |
10868                                          ADVERTISED_Asym_Pause;
10869                 } else if (epause->tx_pause) {
10870                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10871                         newadv = ADVERTISED_Asym_Pause;
10872                 } else
10873                         newadv = 0;
10874
10875                 if (epause->autoneg)
10876                         tg3_flag_set(tp, PAUSE_AUTONEG);
10877                 else
10878                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10879
10880                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10881                         u32 oldadv = phydev->advertising &
10882                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10883                         if (oldadv != newadv) {
10884                                 phydev->advertising &=
10885                                         ~(ADVERTISED_Pause |
10886                                           ADVERTISED_Asym_Pause);
10887                                 phydev->advertising |= newadv;
10888                                 if (phydev->autoneg) {
10889                                         /*
10890                                          * Always renegotiate the link to
10891                                          * inform our link partner of our
10892                                          * flow control settings, even if the
10893                                          * flow control is forced.  Let
10894                                          * tg3_adjust_link() do the final
10895                                          * flow control setup.
10896                                          */
10897                                         return phy_start_aneg(phydev);
10898                                 }
10899                         }
10900
10901                         if (!epause->autoneg)
10902                                 tg3_setup_flow_control(tp, 0, 0);
10903                 } else {
10904                         tp->link_config.advertising &=
10905                                         ~(ADVERTISED_Pause |
10906                                           ADVERTISED_Asym_Pause);
10907                         tp->link_config.advertising |= newadv;
10908                 }
10909         } else {
10910                 int irq_sync = 0;
10911
10912                 if (netif_running(dev)) {
10913                         tg3_netif_stop(tp);
10914                         irq_sync = 1;
10915                 }
10916
10917                 tg3_full_lock(tp, irq_sync);
10918
10919                 if (epause->autoneg)
10920                         tg3_flag_set(tp, PAUSE_AUTONEG);
10921                 else
10922                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10923                 if (epause->rx_pause)
10924                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10925                 else
10926                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10927                 if (epause->tx_pause)
10928                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10929                 else
10930                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10931
10932                 if (netif_running(dev)) {
10933                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10934                         err = tg3_restart_hw(tp, 1);
10935                         if (!err)
10936                                 tg3_netif_start(tp);
10937                 }
10938
10939                 tg3_full_unlock(tp);
10940         }
10941
10942         return err;
10943 }
10944
10945 static int tg3_get_sset_count(struct net_device *dev, int sset)
10946 {
10947         switch (sset) {
10948         case ETH_SS_TEST:
10949                 return TG3_NUM_TEST;
10950         case ETH_SS_STATS:
10951                 return TG3_NUM_STATS;
10952         default:
10953                 return -EOPNOTSUPP;
10954         }
10955 }
10956
10957 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
10958                          u32 *rules __always_unused)
10959 {
10960         struct tg3 *tp = netdev_priv(dev);
10961
10962         if (!tg3_flag(tp, SUPPORT_MSIX))
10963                 return -EOPNOTSUPP;
10964
10965         switch (info->cmd) {
10966         case ETHTOOL_GRXRINGS:
10967                 if (netif_running(tp->dev))
10968                         info->data = tp->irq_cnt;
10969                 else {
10970                         info->data = num_online_cpus();
10971                         if (info->data > TG3_IRQ_MAX_VECS_RSS)
10972                                 info->data = TG3_IRQ_MAX_VECS_RSS;
10973                 }
10974
10975                 /* The first interrupt vector only
10976                  * handles link interrupts.
10977                  */
10978                 info->data -= 1;
10979                 return 0;
10980
10981         default:
10982                 return -EOPNOTSUPP;
10983         }
10984 }
10985
10986 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
10987 {
10988         u32 size = 0;
10989         struct tg3 *tp = netdev_priv(dev);
10990
10991         if (tg3_flag(tp, SUPPORT_MSIX))
10992                 size = TG3_RSS_INDIR_TBL_SIZE;
10993
10994         return size;
10995 }
10996
10997 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
10998 {
10999         struct tg3 *tp = netdev_priv(dev);
11000         int i;
11001
11002         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11003                 indir[i] = tp->rss_ind_tbl[i];
11004
11005         return 0;
11006 }
11007
11008 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11009 {
11010         struct tg3 *tp = netdev_priv(dev);
11011         size_t i;
11012
11013         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11014                 tp->rss_ind_tbl[i] = indir[i];
11015
11016         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11017                 return 0;
11018
11019         /* It is legal to write the indirection
11020          * table while the device is running.
11021          */
11022         tg3_full_lock(tp, 0);
11023         tg3_rss_write_indir_tbl(tp);
11024         tg3_full_unlock(tp);
11025
11026         return 0;
11027 }
11028
11029 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11030 {
11031         switch (stringset) {
11032         case ETH_SS_STATS:
11033                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11034                 break;
11035         case ETH_SS_TEST:
11036                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11037                 break;
11038         default:
11039                 WARN_ON(1);     /* we need a WARN() */
11040                 break;
11041         }
11042 }
11043
11044 static int tg3_set_phys_id(struct net_device *dev,
11045                             enum ethtool_phys_id_state state)
11046 {
11047         struct tg3 *tp = netdev_priv(dev);
11048
11049         if (!netif_running(tp->dev))
11050                 return -EAGAIN;
11051
11052         switch (state) {
11053         case ETHTOOL_ID_ACTIVE:
11054                 return 1;       /* cycle on/off once per second */
11055
11056         case ETHTOOL_ID_ON:
11057                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11058                      LED_CTRL_1000MBPS_ON |
11059                      LED_CTRL_100MBPS_ON |
11060                      LED_CTRL_10MBPS_ON |
11061                      LED_CTRL_TRAFFIC_OVERRIDE |
11062                      LED_CTRL_TRAFFIC_BLINK |
11063                      LED_CTRL_TRAFFIC_LED);
11064                 break;
11065
11066         case ETHTOOL_ID_OFF:
11067                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11068                      LED_CTRL_TRAFFIC_OVERRIDE);
11069                 break;
11070
11071         case ETHTOOL_ID_INACTIVE:
11072                 tw32(MAC_LED_CTRL, tp->led_ctrl);
11073                 break;
11074         }
11075
11076         return 0;
11077 }
11078
11079 static void tg3_get_ethtool_stats(struct net_device *dev,
11080                                    struct ethtool_stats *estats, u64 *tmp_stats)
11081 {
11082         struct tg3 *tp = netdev_priv(dev);
11083
11084         if (tp->hw_stats)
11085                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11086         else
11087                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11088 }
11089
11090 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11091 {
11092         int i;
11093         __be32 *buf;
11094         u32 offset = 0, len = 0;
11095         u32 magic, val;
11096
11097         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11098                 return NULL;
11099
11100         if (magic == TG3_EEPROM_MAGIC) {
11101                 for (offset = TG3_NVM_DIR_START;
11102                      offset < TG3_NVM_DIR_END;
11103                      offset += TG3_NVM_DIRENT_SIZE) {
11104                         if (tg3_nvram_read(tp, offset, &val))
11105                                 return NULL;
11106
11107                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11108                             TG3_NVM_DIRTYPE_EXTVPD)
11109                                 break;
11110                 }
11111
11112                 if (offset != TG3_NVM_DIR_END) {
11113                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11114                         if (tg3_nvram_read(tp, offset + 4, &offset))
11115                                 return NULL;
11116
11117                         offset = tg3_nvram_logical_addr(tp, offset);
11118                 }
11119         }
11120
11121         if (!offset || !len) {
11122                 offset = TG3_NVM_VPD_OFF;
11123                 len = TG3_NVM_VPD_LEN;
11124         }
11125
11126         buf = kmalloc(len, GFP_KERNEL);
11127         if (buf == NULL)
11128                 return NULL;
11129
11130         if (magic == TG3_EEPROM_MAGIC) {
11131                 for (i = 0; i < len; i += 4) {
11132                         /* The data is in little-endian format in NVRAM.
11133                          * Use the big-endian read routines to preserve
11134                          * the byte order as it exists in NVRAM.
11135                          */
11136                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11137                                 goto error;
11138                 }
11139         } else {
11140                 u8 *ptr;
11141                 ssize_t cnt;
11142                 unsigned int pos = 0;
11143
11144                 ptr = (u8 *)&buf[0];
11145                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11146                         cnt = pci_read_vpd(tp->pdev, pos,
11147                                            len - pos, ptr);
11148                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
11149                                 cnt = 0;
11150                         else if (cnt < 0)
11151                                 goto error;
11152                 }
11153                 if (pos != len)
11154                         goto error;
11155         }
11156
11157         *vpdlen = len;
11158
11159         return buf;
11160
11161 error:
11162         kfree(buf);
11163         return NULL;
11164 }
11165
11166 #define NVRAM_TEST_SIZE 0x100
11167 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
11168 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
11169 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
11170 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
11171 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
11172 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
11173 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11174 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11175
11176 static int tg3_test_nvram(struct tg3 *tp)
11177 {
11178         u32 csum, magic, len;
11179         __be32 *buf;
11180         int i, j, k, err = 0, size;
11181
11182         if (tg3_flag(tp, NO_NVRAM))
11183                 return 0;
11184
11185         if (tg3_nvram_read(tp, 0, &magic) != 0)
11186                 return -EIO;
11187
11188         if (magic == TG3_EEPROM_MAGIC)
11189                 size = NVRAM_TEST_SIZE;
11190         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11191                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11192                     TG3_EEPROM_SB_FORMAT_1) {
11193                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11194                         case TG3_EEPROM_SB_REVISION_0:
11195                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11196                                 break;
11197                         case TG3_EEPROM_SB_REVISION_2:
11198                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11199                                 break;
11200                         case TG3_EEPROM_SB_REVISION_3:
11201                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11202                                 break;
11203                         case TG3_EEPROM_SB_REVISION_4:
11204                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11205                                 break;
11206                         case TG3_EEPROM_SB_REVISION_5:
11207                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11208                                 break;
11209                         case TG3_EEPROM_SB_REVISION_6:
11210                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11211                                 break;
11212                         default:
11213                                 return -EIO;
11214                         }
11215                 } else
11216                         return 0;
11217         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11218                 size = NVRAM_SELFBOOT_HW_SIZE;
11219         else
11220                 return -EIO;
11221
11222         buf = kmalloc(size, GFP_KERNEL);
11223         if (buf == NULL)
11224                 return -ENOMEM;
11225
11226         err = -EIO;
11227         for (i = 0, j = 0; i < size; i += 4, j++) {
11228                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11229                 if (err)
11230                         break;
11231         }
11232         if (i < size)
11233                 goto out;
11234
11235         /* Selfboot format */
11236         magic = be32_to_cpu(buf[0]);
11237         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11238             TG3_EEPROM_MAGIC_FW) {
11239                 u8 *buf8 = (u8 *) buf, csum8 = 0;
11240
11241                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11242                     TG3_EEPROM_SB_REVISION_2) {
11243                         /* For rev 2, the csum doesn't include the MBA. */
11244                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11245                                 csum8 += buf8[i];
11246                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11247                                 csum8 += buf8[i];
11248                 } else {
11249                         for (i = 0; i < size; i++)
11250                                 csum8 += buf8[i];
11251                 }
11252
11253                 if (csum8 == 0) {
11254                         err = 0;
11255                         goto out;
11256                 }
11257
11258                 err = -EIO;
11259                 goto out;
11260         }
11261
11262         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11263             TG3_EEPROM_MAGIC_HW) {
11264                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11265                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11266                 u8 *buf8 = (u8 *) buf;
11267
11268                 /* Separate the parity bits and the data bytes.  */
11269                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11270                         if ((i == 0) || (i == 8)) {
11271                                 int l;
11272                                 u8 msk;
11273
11274                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11275                                         parity[k++] = buf8[i] & msk;
11276                                 i++;
11277                         } else if (i == 16) {
11278                                 int l;
11279                                 u8 msk;
11280
11281                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11282                                         parity[k++] = buf8[i] & msk;
11283                                 i++;
11284
11285                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11286                                         parity[k++] = buf8[i] & msk;
11287                                 i++;
11288                         }
11289                         data[j++] = buf8[i];
11290                 }
11291
11292                 err = -EIO;
11293                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11294                         u8 hw8 = hweight8(data[i]);
11295
11296                         if ((hw8 & 0x1) && parity[i])
11297                                 goto out;
11298                         else if (!(hw8 & 0x1) && !parity[i])
11299                                 goto out;
11300                 }
11301                 err = 0;
11302                 goto out;
11303         }
11304
11305         err = -EIO;
11306
11307         /* Bootstrap checksum at offset 0x10 */
11308         csum = calc_crc((unsigned char *) buf, 0x10);
11309         if (csum != le32_to_cpu(buf[0x10/4]))
11310                 goto out;
11311
11312         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11313         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11314         if (csum != le32_to_cpu(buf[0xfc/4]))
11315                 goto out;
11316
11317         kfree(buf);
11318
11319         buf = tg3_vpd_readblock(tp, &len);
11320         if (!buf)
11321                 return -ENOMEM;
11322
11323         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11324         if (i > 0) {
11325                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11326                 if (j < 0)
11327                         goto out;
11328
11329                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11330                         goto out;
11331
11332                 i += PCI_VPD_LRDT_TAG_SIZE;
11333                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11334                                               PCI_VPD_RO_KEYWORD_CHKSUM);
11335                 if (j > 0) {
11336                         u8 csum8 = 0;
11337
11338                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
11339
11340                         for (i = 0; i <= j; i++)
11341                                 csum8 += ((u8 *)buf)[i];
11342
11343                         if (csum8)
11344                                 goto out;
11345                 }
11346         }
11347
11348         err = 0;
11349
11350 out:
11351         kfree(buf);
11352         return err;
11353 }
11354
11355 #define TG3_SERDES_TIMEOUT_SEC  2
11356 #define TG3_COPPER_TIMEOUT_SEC  6
11357
11358 static int tg3_test_link(struct tg3 *tp)
11359 {
11360         int i, max;
11361
11362         if (!netif_running(tp->dev))
11363                 return -ENODEV;
11364
11365         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11366                 max = TG3_SERDES_TIMEOUT_SEC;
11367         else
11368                 max = TG3_COPPER_TIMEOUT_SEC;
11369
11370         for (i = 0; i < max; i++) {
11371                 if (netif_carrier_ok(tp->dev))
11372                         return 0;
11373
11374                 if (msleep_interruptible(1000))
11375                         break;
11376         }
11377
11378         return -EIO;
11379 }
11380
11381 /* Only test the commonly used registers */
11382 static int tg3_test_registers(struct tg3 *tp)
11383 {
11384         int i, is_5705, is_5750;
11385         u32 offset, read_mask, write_mask, val, save_val, read_val;
11386         static struct {
11387                 u16 offset;
11388                 u16 flags;
11389 #define TG3_FL_5705     0x1
11390 #define TG3_FL_NOT_5705 0x2
11391 #define TG3_FL_NOT_5788 0x4
11392 #define TG3_FL_NOT_5750 0x8
11393                 u32 read_mask;
11394                 u32 write_mask;
11395         } reg_tbl[] = {
11396                 /* MAC Control Registers */
11397                 { MAC_MODE, TG3_FL_NOT_5705,
11398                         0x00000000, 0x00ef6f8c },
11399                 { MAC_MODE, TG3_FL_5705,
11400                         0x00000000, 0x01ef6b8c },
11401                 { MAC_STATUS, TG3_FL_NOT_5705,
11402                         0x03800107, 0x00000000 },
11403                 { MAC_STATUS, TG3_FL_5705,
11404                         0x03800100, 0x00000000 },
11405                 { MAC_ADDR_0_HIGH, 0x0000,
11406                         0x00000000, 0x0000ffff },
11407                 { MAC_ADDR_0_LOW, 0x0000,
11408                         0x00000000, 0xffffffff },
11409                 { MAC_RX_MTU_SIZE, 0x0000,
11410                         0x00000000, 0x0000ffff },
11411                 { MAC_TX_MODE, 0x0000,
11412                         0x00000000, 0x00000070 },
11413                 { MAC_TX_LENGTHS, 0x0000,
11414                         0x00000000, 0x00003fff },
11415                 { MAC_RX_MODE, TG3_FL_NOT_5705,
11416                         0x00000000, 0x000007fc },
11417                 { MAC_RX_MODE, TG3_FL_5705,
11418                         0x00000000, 0x000007dc },
11419                 { MAC_HASH_REG_0, 0x0000,
11420                         0x00000000, 0xffffffff },
11421                 { MAC_HASH_REG_1, 0x0000,
11422                         0x00000000, 0xffffffff },
11423                 { MAC_HASH_REG_2, 0x0000,
11424                         0x00000000, 0xffffffff },
11425                 { MAC_HASH_REG_3, 0x0000,
11426                         0x00000000, 0xffffffff },
11427
11428                 /* Receive Data and Receive BD Initiator Control Registers. */
11429                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11430                         0x00000000, 0xffffffff },
11431                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11432                         0x00000000, 0xffffffff },
11433                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11434                         0x00000000, 0x00000003 },
11435                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11436                         0x00000000, 0xffffffff },
11437                 { RCVDBDI_STD_BD+0, 0x0000,
11438                         0x00000000, 0xffffffff },
11439                 { RCVDBDI_STD_BD+4, 0x0000,
11440                         0x00000000, 0xffffffff },
11441                 { RCVDBDI_STD_BD+8, 0x0000,
11442                         0x00000000, 0xffff0002 },
11443                 { RCVDBDI_STD_BD+0xc, 0x0000,
11444                         0x00000000, 0xffffffff },
11445
11446                 /* Receive BD Initiator Control Registers. */
11447                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11448                         0x00000000, 0xffffffff },
11449                 { RCVBDI_STD_THRESH, TG3_FL_5705,
11450                         0x00000000, 0x000003ff },
11451                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11452                         0x00000000, 0xffffffff },
11453
11454                 /* Host Coalescing Control Registers. */
11455                 { HOSTCC_MODE, TG3_FL_NOT_5705,
11456                         0x00000000, 0x00000004 },
11457                 { HOSTCC_MODE, TG3_FL_5705,
11458                         0x00000000, 0x000000f6 },
11459                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11460                         0x00000000, 0xffffffff },
11461                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11462                         0x00000000, 0x000003ff },
11463                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11464                         0x00000000, 0xffffffff },
11465                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11466                         0x00000000, 0x000003ff },
11467                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11468                         0x00000000, 0xffffffff },
11469                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11470                         0x00000000, 0x000000ff },
11471                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11472                         0x00000000, 0xffffffff },
11473                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11474                         0x00000000, 0x000000ff },
11475                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11476                         0x00000000, 0xffffffff },
11477                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11478                         0x00000000, 0xffffffff },
11479                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11480                         0x00000000, 0xffffffff },
11481                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11482                         0x00000000, 0x000000ff },
11483                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11484                         0x00000000, 0xffffffff },
11485                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11486                         0x00000000, 0x000000ff },
11487                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11488                         0x00000000, 0xffffffff },
11489                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11490                         0x00000000, 0xffffffff },
11491                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11492                         0x00000000, 0xffffffff },
11493                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11494                         0x00000000, 0xffffffff },
11495                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11496                         0x00000000, 0xffffffff },
11497                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11498                         0xffffffff, 0x00000000 },
11499                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11500                         0xffffffff, 0x00000000 },
11501
11502                 /* Buffer Manager Control Registers. */
11503                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11504                         0x00000000, 0x007fff80 },
11505                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11506                         0x00000000, 0x007fffff },
11507                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11508                         0x00000000, 0x0000003f },
11509                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11510                         0x00000000, 0x000001ff },
11511                 { BUFMGR_MB_HIGH_WATER, 0x0000,
11512                         0x00000000, 0x000001ff },
11513                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11514                         0xffffffff, 0x00000000 },
11515                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11516                         0xffffffff, 0x00000000 },
11517
11518                 /* Mailbox Registers */
11519                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11520                         0x00000000, 0x000001ff },
11521                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11522                         0x00000000, 0x000001ff },
11523                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11524                         0x00000000, 0x000007ff },
11525                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11526                         0x00000000, 0x000001ff },
11527
11528                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11529         };
11530
11531         is_5705 = is_5750 = 0;
11532         if (tg3_flag(tp, 5705_PLUS)) {
11533                 is_5705 = 1;
11534                 if (tg3_flag(tp, 5750_PLUS))
11535                         is_5750 = 1;
11536         }
11537
11538         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11539                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11540                         continue;
11541
11542                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11543                         continue;
11544
11545                 if (tg3_flag(tp, IS_5788) &&
11546                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
11547                         continue;
11548
11549                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11550                         continue;
11551
11552                 offset = (u32) reg_tbl[i].offset;
11553                 read_mask = reg_tbl[i].read_mask;
11554                 write_mask = reg_tbl[i].write_mask;
11555
11556                 /* Save the original register content */
11557                 save_val = tr32(offset);
11558
11559                 /* Determine the read-only value. */
11560                 read_val = save_val & read_mask;
11561
11562                 /* Write zero to the register, then make sure the read-only bits
11563                  * are not changed and the read/write bits are all zeros.
11564                  */
11565                 tw32(offset, 0);
11566
11567                 val = tr32(offset);
11568
11569                 /* Test the read-only and read/write bits. */
11570                 if (((val & read_mask) != read_val) || (val & write_mask))
11571                         goto out;
11572
11573                 /* Write ones to all the bits defined by RdMask and WrMask, then
11574                  * make sure the read-only bits are not changed and the
11575                  * read/write bits are all ones.
11576                  */
11577                 tw32(offset, read_mask | write_mask);
11578
11579                 val = tr32(offset);
11580
11581                 /* Test the read-only bits. */
11582                 if ((val & read_mask) != read_val)
11583                         goto out;
11584
11585                 /* Test the read/write bits. */
11586                 if ((val & write_mask) != write_mask)
11587                         goto out;
11588
11589                 tw32(offset, save_val);
11590         }
11591
11592         return 0;
11593
11594 out:
11595         if (netif_msg_hw(tp))
11596                 netdev_err(tp->dev,
11597                            "Register test failed at offset %x\n", offset);
11598         tw32(offset, save_val);
11599         return -EIO;
11600 }
11601
11602 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11603 {
11604         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11605         int i;
11606         u32 j;
11607
11608         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11609                 for (j = 0; j < len; j += 4) {
11610                         u32 val;
11611
11612                         tg3_write_mem(tp, offset + j, test_pattern[i]);
11613                         tg3_read_mem(tp, offset + j, &val);
11614                         if (val != test_pattern[i])
11615                                 return -EIO;
11616                 }
11617         }
11618         return 0;
11619 }
11620
11621 static int tg3_test_memory(struct tg3 *tp)
11622 {
11623         static struct mem_entry {
11624                 u32 offset;
11625                 u32 len;
11626         } mem_tbl_570x[] = {
11627                 { 0x00000000, 0x00b50},
11628                 { 0x00002000, 0x1c000},
11629                 { 0xffffffff, 0x00000}
11630         }, mem_tbl_5705[] = {
11631                 { 0x00000100, 0x0000c},
11632                 { 0x00000200, 0x00008},
11633                 { 0x00004000, 0x00800},
11634                 { 0x00006000, 0x01000},
11635                 { 0x00008000, 0x02000},
11636                 { 0x00010000, 0x0e000},
11637                 { 0xffffffff, 0x00000}
11638         }, mem_tbl_5755[] = {
11639                 { 0x00000200, 0x00008},
11640                 { 0x00004000, 0x00800},
11641                 { 0x00006000, 0x00800},
11642                 { 0x00008000, 0x02000},
11643                 { 0x00010000, 0x0c000},
11644                 { 0xffffffff, 0x00000}
11645         }, mem_tbl_5906[] = {
11646                 { 0x00000200, 0x00008},
11647                 { 0x00004000, 0x00400},
11648                 { 0x00006000, 0x00400},
11649                 { 0x00008000, 0x01000},
11650                 { 0x00010000, 0x01000},
11651                 { 0xffffffff, 0x00000}
11652         }, mem_tbl_5717[] = {
11653                 { 0x00000200, 0x00008},
11654                 { 0x00010000, 0x0a000},
11655                 { 0x00020000, 0x13c00},
11656                 { 0xffffffff, 0x00000}
11657         }, mem_tbl_57765[] = {
11658                 { 0x00000200, 0x00008},
11659                 { 0x00004000, 0x00800},
11660                 { 0x00006000, 0x09800},
11661                 { 0x00010000, 0x0a000},
11662                 { 0xffffffff, 0x00000}
11663         };
11664         struct mem_entry *mem_tbl;
11665         int err = 0;
11666         int i;
11667
11668         if (tg3_flag(tp, 5717_PLUS))
11669                 mem_tbl = mem_tbl_5717;
11670         else if (tg3_flag(tp, 57765_CLASS))
11671                 mem_tbl = mem_tbl_57765;
11672         else if (tg3_flag(tp, 5755_PLUS))
11673                 mem_tbl = mem_tbl_5755;
11674         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11675                 mem_tbl = mem_tbl_5906;
11676         else if (tg3_flag(tp, 5705_PLUS))
11677                 mem_tbl = mem_tbl_5705;
11678         else
11679                 mem_tbl = mem_tbl_570x;
11680
11681         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11682                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11683                 if (err)
11684                         break;
11685         }
11686
11687         return err;
11688 }
11689
11690 #define TG3_TSO_MSS             500
11691
11692 #define TG3_TSO_IP_HDR_LEN      20
11693 #define TG3_TSO_TCP_HDR_LEN     20
11694 #define TG3_TSO_TCP_OPT_LEN     12
11695
11696 static const u8 tg3_tso_header[] = {
11697 0x08, 0x00,
11698 0x45, 0x00, 0x00, 0x00,
11699 0x00, 0x00, 0x40, 0x00,
11700 0x40, 0x06, 0x00, 0x00,
11701 0x0a, 0x00, 0x00, 0x01,
11702 0x0a, 0x00, 0x00, 0x02,
11703 0x0d, 0x00, 0xe0, 0x00,
11704 0x00, 0x00, 0x01, 0x00,
11705 0x00, 0x00, 0x02, 0x00,
11706 0x80, 0x10, 0x10, 0x00,
11707 0x14, 0x09, 0x00, 0x00,
11708 0x01, 0x01, 0x08, 0x0a,
11709 0x11, 0x11, 0x11, 0x11,
11710 0x11, 0x11, 0x11, 0x11,
11711 };
11712
11713 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11714 {
11715         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11716         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11717         u32 budget;
11718         struct sk_buff *skb;
11719         u8 *tx_data, *rx_data;
11720         dma_addr_t map;
11721         int num_pkts, tx_len, rx_len, i, err;
11722         struct tg3_rx_buffer_desc *desc;
11723         struct tg3_napi *tnapi, *rnapi;
11724         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11725
11726         tnapi = &tp->napi[0];
11727         rnapi = &tp->napi[0];
11728         if (tp->irq_cnt > 1) {
11729                 if (tg3_flag(tp, ENABLE_RSS))
11730                         rnapi = &tp->napi[1];
11731                 if (tg3_flag(tp, ENABLE_TSS))
11732                         tnapi = &tp->napi[1];
11733         }
11734         coal_now = tnapi->coal_now | rnapi->coal_now;
11735
11736         err = -EIO;
11737
11738         tx_len = pktsz;
11739         skb = netdev_alloc_skb(tp->dev, tx_len);
11740         if (!skb)
11741                 return -ENOMEM;
11742
11743         tx_data = skb_put(skb, tx_len);
11744         memcpy(tx_data, tp->dev->dev_addr, 6);
11745         memset(tx_data + 6, 0x0, 8);
11746
11747         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11748
11749         if (tso_loopback) {
11750                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11751
11752                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11753                               TG3_TSO_TCP_OPT_LEN;
11754
11755                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11756                        sizeof(tg3_tso_header));
11757                 mss = TG3_TSO_MSS;
11758
11759                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11760                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11761
11762                 /* Set the total length field in the IP header */
11763                 iph->tot_len = htons((u16)(mss + hdr_len));
11764
11765                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11766                               TXD_FLAG_CPU_POST_DMA);
11767
11768                 if (tg3_flag(tp, HW_TSO_1) ||
11769                     tg3_flag(tp, HW_TSO_2) ||
11770                     tg3_flag(tp, HW_TSO_3)) {
11771                         struct tcphdr *th;
11772                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11773                         th = (struct tcphdr *)&tx_data[val];
11774                         th->check = 0;
11775                 } else
11776                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11777
11778                 if (tg3_flag(tp, HW_TSO_3)) {
11779                         mss |= (hdr_len & 0xc) << 12;
11780                         if (hdr_len & 0x10)
11781                                 base_flags |= 0x00000010;
11782                         base_flags |= (hdr_len & 0x3e0) << 5;
11783                 } else if (tg3_flag(tp, HW_TSO_2))
11784                         mss |= hdr_len << 9;
11785                 else if (tg3_flag(tp, HW_TSO_1) ||
11786                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11787                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11788                 } else {
11789                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11790                 }
11791
11792                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11793         } else {
11794                 num_pkts = 1;
11795                 data_off = ETH_HLEN;
11796
11797                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
11798                     tx_len > VLAN_ETH_FRAME_LEN)
11799                         base_flags |= TXD_FLAG_JMB_PKT;
11800         }
11801
11802         for (i = data_off; i < tx_len; i++)
11803                 tx_data[i] = (u8) (i & 0xff);
11804
11805         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11806         if (pci_dma_mapping_error(tp->pdev, map)) {
11807                 dev_kfree_skb(skb);
11808                 return -EIO;
11809         }
11810
11811         val = tnapi->tx_prod;
11812         tnapi->tx_buffers[val].skb = skb;
11813         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11814
11815         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11816                rnapi->coal_now);
11817
11818         udelay(10);
11819
11820         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11821
11822         budget = tg3_tx_avail(tnapi);
11823         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11824                             base_flags | TXD_FLAG_END, mss, 0)) {
11825                 tnapi->tx_buffers[val].skb = NULL;
11826                 dev_kfree_skb(skb);
11827                 return -EIO;
11828         }
11829
11830         tnapi->tx_prod++;
11831
11832         /* Sync BD data before updating mailbox */
11833         wmb();
11834
11835         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11836         tr32_mailbox(tnapi->prodmbox);
11837
11838         udelay(10);
11839
11840         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11841         for (i = 0; i < 35; i++) {
11842                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11843                        coal_now);
11844
11845                 udelay(10);
11846
11847                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11848                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11849                 if ((tx_idx == tnapi->tx_prod) &&
11850                     (rx_idx == (rx_start_idx + num_pkts)))
11851                         break;
11852         }
11853
11854         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11855         dev_kfree_skb(skb);
11856
11857         if (tx_idx != tnapi->tx_prod)
11858                 goto out;
11859
11860         if (rx_idx != rx_start_idx + num_pkts)
11861                 goto out;
11862
11863         val = data_off;
11864         while (rx_idx != rx_start_idx) {
11865                 desc = &rnapi->rx_rcb[rx_start_idx++];
11866                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11867                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11868
11869                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11870                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11871                         goto out;
11872
11873                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11874                          - ETH_FCS_LEN;
11875
11876                 if (!tso_loopback) {
11877                         if (rx_len != tx_len)
11878                                 goto out;
11879
11880                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11881                                 if (opaque_key != RXD_OPAQUE_RING_STD)
11882                                         goto out;
11883                         } else {
11884                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11885                                         goto out;
11886                         }
11887                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11888                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11889                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
11890                         goto out;
11891                 }
11892
11893                 if (opaque_key == RXD_OPAQUE_RING_STD) {
11894                         rx_data = tpr->rx_std_buffers[desc_idx].data;
11895                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11896                                              mapping);
11897                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11898                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
11899                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11900                                              mapping);
11901                 } else
11902                         goto out;
11903
11904                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11905                                             PCI_DMA_FROMDEVICE);
11906
11907                 rx_data += TG3_RX_OFFSET(tp);
11908                 for (i = data_off; i < rx_len; i++, val++) {
11909                         if (*(rx_data + i) != (u8) (val & 0xff))
11910                                 goto out;
11911                 }
11912         }
11913
11914         err = 0;
11915
11916         /* tg3_free_rings will unmap and free the rx_data */
11917 out:
11918         return err;
11919 }
11920
11921 #define TG3_STD_LOOPBACK_FAILED         1
11922 #define TG3_JMB_LOOPBACK_FAILED         2
11923 #define TG3_TSO_LOOPBACK_FAILED         4
11924 #define TG3_LOOPBACK_FAILED \
11925         (TG3_STD_LOOPBACK_FAILED | \
11926          TG3_JMB_LOOPBACK_FAILED | \
11927          TG3_TSO_LOOPBACK_FAILED)
11928
11929 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11930 {
11931         int err = -EIO;
11932         u32 eee_cap;
11933         u32 jmb_pkt_sz = 9000;
11934
11935         if (tp->dma_limit)
11936                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
11937
11938         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11939         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11940
11941         if (!netif_running(tp->dev)) {
11942                 data[0] = TG3_LOOPBACK_FAILED;
11943                 data[1] = TG3_LOOPBACK_FAILED;
11944                 if (do_extlpbk)
11945                         data[2] = TG3_LOOPBACK_FAILED;
11946                 goto done;
11947         }
11948
11949         err = tg3_reset_hw(tp, 1);
11950         if (err) {
11951                 data[0] = TG3_LOOPBACK_FAILED;
11952                 data[1] = TG3_LOOPBACK_FAILED;
11953                 if (do_extlpbk)
11954                         data[2] = TG3_LOOPBACK_FAILED;
11955                 goto done;
11956         }
11957
11958         if (tg3_flag(tp, ENABLE_RSS)) {
11959                 int i;
11960
11961                 /* Reroute all rx packets to the 1st queue */
11962                 for (i = MAC_RSS_INDIR_TBL_0;
11963                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11964                         tw32(i, 0x0);
11965         }
11966
11967         /* HW errata - mac loopback fails in some cases on 5780.
11968          * Normal traffic and PHY loopback are not affected by
11969          * errata.  Also, the MAC loopback test is deprecated for
11970          * all newer ASIC revisions.
11971          */
11972         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11973             !tg3_flag(tp, CPMU_PRESENT)) {
11974                 tg3_mac_loopback(tp, true);
11975
11976                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11977                         data[0] |= TG3_STD_LOOPBACK_FAILED;
11978
11979                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11980                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
11981                         data[0] |= TG3_JMB_LOOPBACK_FAILED;
11982
11983                 tg3_mac_loopback(tp, false);
11984         }
11985
11986         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11987             !tg3_flag(tp, USE_PHYLIB)) {
11988                 int i;
11989
11990                 tg3_phy_lpbk_set(tp, 0, false);
11991
11992                 /* Wait for link */
11993                 for (i = 0; i < 100; i++) {
11994                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11995                                 break;
11996                         mdelay(1);
11997                 }
11998
11999                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12000                         data[1] |= TG3_STD_LOOPBACK_FAILED;
12001                 if (tg3_flag(tp, TSO_CAPABLE) &&
12002                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12003                         data[1] |= TG3_TSO_LOOPBACK_FAILED;
12004                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12005                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12006                         data[1] |= TG3_JMB_LOOPBACK_FAILED;
12007
12008                 if (do_extlpbk) {
12009                         tg3_phy_lpbk_set(tp, 0, true);
12010
12011                         /* All link indications report up, but the hardware
12012                          * isn't really ready for about 20 msec.  Double it
12013                          * to be sure.
12014                          */
12015                         mdelay(40);
12016
12017                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12018                                 data[2] |= TG3_STD_LOOPBACK_FAILED;
12019                         if (tg3_flag(tp, TSO_CAPABLE) &&
12020                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12021                                 data[2] |= TG3_TSO_LOOPBACK_FAILED;
12022                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12023                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12024                                 data[2] |= TG3_JMB_LOOPBACK_FAILED;
12025                 }
12026
12027                 /* Re-enable gphy autopowerdown. */
12028                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12029                         tg3_phy_toggle_apd(tp, true);
12030         }
12031
12032         err = (data[0] | data[1] | data[2]) ? -EIO : 0;
12033
12034 done:
12035         tp->phy_flags |= eee_cap;
12036
12037         return err;
12038 }
12039
12040 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12041                           u64 *data)
12042 {
12043         struct tg3 *tp = netdev_priv(dev);
12044         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12045
12046         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12047             tg3_power_up(tp)) {
12048                 etest->flags |= ETH_TEST_FL_FAILED;
12049                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12050                 return;
12051         }
12052
12053         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12054
12055         if (tg3_test_nvram(tp) != 0) {
12056                 etest->flags |= ETH_TEST_FL_FAILED;
12057                 data[0] = 1;
12058         }
12059         if (!doextlpbk && tg3_test_link(tp)) {
12060                 etest->flags |= ETH_TEST_FL_FAILED;
12061                 data[1] = 1;
12062         }
12063         if (etest->flags & ETH_TEST_FL_OFFLINE) {
12064                 int err, err2 = 0, irq_sync = 0;
12065
12066                 if (netif_running(dev)) {
12067                         tg3_phy_stop(tp);
12068                         tg3_netif_stop(tp);
12069                         irq_sync = 1;
12070                 }
12071
12072                 tg3_full_lock(tp, irq_sync);
12073
12074                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12075                 err = tg3_nvram_lock(tp);
12076                 tg3_halt_cpu(tp, RX_CPU_BASE);
12077                 if (!tg3_flag(tp, 5705_PLUS))
12078                         tg3_halt_cpu(tp, TX_CPU_BASE);
12079                 if (!err)
12080                         tg3_nvram_unlock(tp);
12081
12082                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12083                         tg3_phy_reset(tp);
12084
12085                 if (tg3_test_registers(tp) != 0) {
12086                         etest->flags |= ETH_TEST_FL_FAILED;
12087                         data[2] = 1;
12088                 }
12089
12090                 if (tg3_test_memory(tp) != 0) {
12091                         etest->flags |= ETH_TEST_FL_FAILED;
12092                         data[3] = 1;
12093                 }
12094
12095                 if (doextlpbk)
12096                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12097
12098                 if (tg3_test_loopback(tp, &data[4], doextlpbk))
12099                         etest->flags |= ETH_TEST_FL_FAILED;
12100
12101                 tg3_full_unlock(tp);
12102
12103                 if (tg3_test_interrupt(tp) != 0) {
12104                         etest->flags |= ETH_TEST_FL_FAILED;
12105                         data[7] = 1;
12106                 }
12107
12108                 tg3_full_lock(tp, 0);
12109
12110                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12111                 if (netif_running(dev)) {
12112                         tg3_flag_set(tp, INIT_COMPLETE);
12113                         err2 = tg3_restart_hw(tp, 1);
12114                         if (!err2)
12115                                 tg3_netif_start(tp);
12116                 }
12117
12118                 tg3_full_unlock(tp);
12119
12120                 if (irq_sync && !err2)
12121                         tg3_phy_start(tp);
12122         }
12123         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12124                 tg3_power_down(tp);
12125
12126 }
12127
12128 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12129 {
12130         struct mii_ioctl_data *data = if_mii(ifr);
12131         struct tg3 *tp = netdev_priv(dev);
12132         int err;
12133
12134         if (tg3_flag(tp, USE_PHYLIB)) {
12135                 struct phy_device *phydev;
12136                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12137                         return -EAGAIN;
12138                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12139                 return phy_mii_ioctl(phydev, ifr, cmd);
12140         }
12141
12142         switch (cmd) {
12143         case SIOCGMIIPHY:
12144                 data->phy_id = tp->phy_addr;
12145
12146                 /* fallthru */
12147         case SIOCGMIIREG: {
12148                 u32 mii_regval;
12149
12150                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12151                         break;                  /* We have no PHY */
12152
12153                 if (!netif_running(dev))
12154                         return -EAGAIN;
12155
12156                 spin_lock_bh(&tp->lock);
12157                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12158                 spin_unlock_bh(&tp->lock);
12159
12160                 data->val_out = mii_regval;
12161
12162                 return err;
12163         }
12164
12165         case SIOCSMIIREG:
12166                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12167                         break;                  /* We have no PHY */
12168
12169                 if (!netif_running(dev))
12170                         return -EAGAIN;
12171
12172                 spin_lock_bh(&tp->lock);
12173                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12174                 spin_unlock_bh(&tp->lock);
12175
12176                 return err;
12177
12178         default:
12179                 /* do nothing */
12180                 break;
12181         }
12182         return -EOPNOTSUPP;
12183 }
12184
12185 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12186 {
12187         struct tg3 *tp = netdev_priv(dev);
12188
12189         memcpy(ec, &tp->coal, sizeof(*ec));
12190         return 0;
12191 }
12192
12193 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12194 {
12195         struct tg3 *tp = netdev_priv(dev);
12196         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12197         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12198
12199         if (!tg3_flag(tp, 5705_PLUS)) {
12200                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12201                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12202                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12203                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12204         }
12205
12206         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12207             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12208             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12209             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12210             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12211             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12212             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12213             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12214             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12215             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12216                 return -EINVAL;
12217
12218         /* No rx interrupts will be generated if both are zero */
12219         if ((ec->rx_coalesce_usecs == 0) &&
12220             (ec->rx_max_coalesced_frames == 0))
12221                 return -EINVAL;
12222
12223         /* No tx interrupts will be generated if both are zero */
12224         if ((ec->tx_coalesce_usecs == 0) &&
12225             (ec->tx_max_coalesced_frames == 0))
12226                 return -EINVAL;
12227
12228         /* Only copy relevant parameters, ignore all others. */
12229         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12230         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12231         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12232         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12233         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12234         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12235         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12236         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12237         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12238
12239         if (netif_running(dev)) {
12240                 tg3_full_lock(tp, 0);
12241                 __tg3_set_coalesce(tp, &tp->coal);
12242                 tg3_full_unlock(tp);
12243         }
12244         return 0;
12245 }
12246
12247 static const struct ethtool_ops tg3_ethtool_ops = {
12248         .get_settings           = tg3_get_settings,
12249         .set_settings           = tg3_set_settings,
12250         .get_drvinfo            = tg3_get_drvinfo,
12251         .get_regs_len           = tg3_get_regs_len,
12252         .get_regs               = tg3_get_regs,
12253         .get_wol                = tg3_get_wol,
12254         .set_wol                = tg3_set_wol,
12255         .get_msglevel           = tg3_get_msglevel,
12256         .set_msglevel           = tg3_set_msglevel,
12257         .nway_reset             = tg3_nway_reset,
12258         .get_link               = ethtool_op_get_link,
12259         .get_eeprom_len         = tg3_get_eeprom_len,
12260         .get_eeprom             = tg3_get_eeprom,
12261         .set_eeprom             = tg3_set_eeprom,
12262         .get_ringparam          = tg3_get_ringparam,
12263         .set_ringparam          = tg3_set_ringparam,
12264         .get_pauseparam         = tg3_get_pauseparam,
12265         .set_pauseparam         = tg3_set_pauseparam,
12266         .self_test              = tg3_self_test,
12267         .get_strings            = tg3_get_strings,
12268         .set_phys_id            = tg3_set_phys_id,
12269         .get_ethtool_stats      = tg3_get_ethtool_stats,
12270         .get_coalesce           = tg3_get_coalesce,
12271         .set_coalesce           = tg3_set_coalesce,
12272         .get_sset_count         = tg3_get_sset_count,
12273         .get_rxnfc              = tg3_get_rxnfc,
12274         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
12275         .get_rxfh_indir         = tg3_get_rxfh_indir,
12276         .set_rxfh_indir         = tg3_set_rxfh_indir,
12277         .get_ts_info            = ethtool_op_get_ts_info,
12278 };
12279
12280 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
12281                                                 struct rtnl_link_stats64 *stats)
12282 {
12283         struct tg3 *tp = netdev_priv(dev);
12284
12285         if (!tp->hw_stats)
12286                 return &tp->net_stats_prev;
12287
12288         spin_lock_bh(&tp->lock);
12289         tg3_get_nstats(tp, stats);
12290         spin_unlock_bh(&tp->lock);
12291
12292         return stats;
12293 }
12294
12295 static void tg3_set_rx_mode(struct net_device *dev)
12296 {
12297         struct tg3 *tp = netdev_priv(dev);
12298
12299         if (!netif_running(dev))
12300                 return;
12301
12302         tg3_full_lock(tp, 0);
12303         __tg3_set_rx_mode(dev);
12304         tg3_full_unlock(tp);
12305 }
12306
12307 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12308                                int new_mtu)
12309 {
12310         dev->mtu = new_mtu;
12311
12312         if (new_mtu > ETH_DATA_LEN) {
12313                 if (tg3_flag(tp, 5780_CLASS)) {
12314                         netdev_update_features(dev);
12315                         tg3_flag_clear(tp, TSO_CAPABLE);
12316                 } else {
12317                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
12318                 }
12319         } else {
12320                 if (tg3_flag(tp, 5780_CLASS)) {
12321                         tg3_flag_set(tp, TSO_CAPABLE);
12322                         netdev_update_features(dev);
12323                 }
12324                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12325         }
12326 }
12327
12328 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12329 {
12330         struct tg3 *tp = netdev_priv(dev);
12331         int err, reset_phy = 0;
12332
12333         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12334                 return -EINVAL;
12335
12336         if (!netif_running(dev)) {
12337                 /* We'll just catch it later when the
12338                  * device is up'd.
12339                  */
12340                 tg3_set_mtu(dev, tp, new_mtu);
12341                 return 0;
12342         }
12343
12344         tg3_phy_stop(tp);
12345
12346         tg3_netif_stop(tp);
12347
12348         tg3_full_lock(tp, 1);
12349
12350         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12351
12352         tg3_set_mtu(dev, tp, new_mtu);
12353
12354         /* Reset PHY, otherwise the read DMA engine will be in a mode that
12355          * breaks all requests to 256 bytes.
12356          */
12357         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
12358                 reset_phy = 1;
12359
12360         err = tg3_restart_hw(tp, reset_phy);
12361
12362         if (!err)
12363                 tg3_netif_start(tp);
12364
12365         tg3_full_unlock(tp);
12366
12367         if (!err)
12368                 tg3_phy_start(tp);
12369
12370         return err;
12371 }
12372
12373 static const struct net_device_ops tg3_netdev_ops = {
12374         .ndo_open               = tg3_open,
12375         .ndo_stop               = tg3_close,
12376         .ndo_start_xmit         = tg3_start_xmit,
12377         .ndo_get_stats64        = tg3_get_stats64,
12378         .ndo_validate_addr      = eth_validate_addr,
12379         .ndo_set_rx_mode        = tg3_set_rx_mode,
12380         .ndo_set_mac_address    = tg3_set_mac_addr,
12381         .ndo_do_ioctl           = tg3_ioctl,
12382         .ndo_tx_timeout         = tg3_tx_timeout,
12383         .ndo_change_mtu         = tg3_change_mtu,
12384         .ndo_fix_features       = tg3_fix_features,
12385         .ndo_set_features       = tg3_set_features,
12386 #ifdef CONFIG_NET_POLL_CONTROLLER
12387         .ndo_poll_controller    = tg3_poll_controller,
12388 #endif
12389 };
12390
12391 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12392 {
12393         u32 cursize, val, magic;
12394
12395         tp->nvram_size = EEPROM_CHIP_SIZE;
12396
12397         if (tg3_nvram_read(tp, 0, &magic) != 0)
12398                 return;
12399
12400         if ((magic != TG3_EEPROM_MAGIC) &&
12401             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12402             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12403                 return;
12404
12405         /*
12406          * Size the chip by reading offsets at increasing powers of two.
12407          * When we encounter our validation signature, we know the addressing
12408          * has wrapped around, and thus have our chip size.
12409          */
12410         cursize = 0x10;
12411
12412         while (cursize < tp->nvram_size) {
12413                 if (tg3_nvram_read(tp, cursize, &val) != 0)
12414                         return;
12415
12416                 if (val == magic)
12417                         break;
12418
12419                 cursize <<= 1;
12420         }
12421
12422         tp->nvram_size = cursize;
12423 }
12424
12425 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12426 {
12427         u32 val;
12428
12429         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12430                 return;
12431
12432         /* Selfboot format */
12433         if (val != TG3_EEPROM_MAGIC) {
12434                 tg3_get_eeprom_size(tp);
12435                 return;
12436         }
12437
12438         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12439                 if (val != 0) {
12440                         /* This is confusing.  We want to operate on the
12441                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
12442                          * call will read from NVRAM and byteswap the data
12443                          * according to the byteswapping settings for all
12444                          * other register accesses.  This ensures the data we
12445                          * want will always reside in the lower 16-bits.
12446                          * However, the data in NVRAM is in LE format, which
12447                          * means the data from the NVRAM read will always be
12448                          * opposite the endianness of the CPU.  The 16-bit
12449                          * byteswap then brings the data to CPU endianness.
12450                          */
12451                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12452                         return;
12453                 }
12454         }
12455         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12456 }
12457
12458 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12459 {
12460         u32 nvcfg1;
12461
12462         nvcfg1 = tr32(NVRAM_CFG1);
12463         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12464                 tg3_flag_set(tp, FLASH);
12465         } else {
12466                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12467                 tw32(NVRAM_CFG1, nvcfg1);
12468         }
12469
12470         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12471             tg3_flag(tp, 5780_CLASS)) {
12472                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12473                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12474                         tp->nvram_jedecnum = JEDEC_ATMEL;
12475                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12476                         tg3_flag_set(tp, NVRAM_BUFFERED);
12477                         break;
12478                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12479                         tp->nvram_jedecnum = JEDEC_ATMEL;
12480                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12481                         break;
12482                 case FLASH_VENDOR_ATMEL_EEPROM:
12483                         tp->nvram_jedecnum = JEDEC_ATMEL;
12484                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12485                         tg3_flag_set(tp, NVRAM_BUFFERED);
12486                         break;
12487                 case FLASH_VENDOR_ST:
12488                         tp->nvram_jedecnum = JEDEC_ST;
12489                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12490                         tg3_flag_set(tp, NVRAM_BUFFERED);
12491                         break;
12492                 case FLASH_VENDOR_SAIFUN:
12493                         tp->nvram_jedecnum = JEDEC_SAIFUN;
12494                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12495                         break;
12496                 case FLASH_VENDOR_SST_SMALL:
12497                 case FLASH_VENDOR_SST_LARGE:
12498                         tp->nvram_jedecnum = JEDEC_SST;
12499                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12500                         break;
12501                 }
12502         } else {
12503                 tp->nvram_jedecnum = JEDEC_ATMEL;
12504                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12505                 tg3_flag_set(tp, NVRAM_BUFFERED);
12506         }
12507 }
12508
12509 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12510 {
12511         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12512         case FLASH_5752PAGE_SIZE_256:
12513                 tp->nvram_pagesize = 256;
12514                 break;
12515         case FLASH_5752PAGE_SIZE_512:
12516                 tp->nvram_pagesize = 512;
12517                 break;
12518         case FLASH_5752PAGE_SIZE_1K:
12519                 tp->nvram_pagesize = 1024;
12520                 break;
12521         case FLASH_5752PAGE_SIZE_2K:
12522                 tp->nvram_pagesize = 2048;
12523                 break;
12524         case FLASH_5752PAGE_SIZE_4K:
12525                 tp->nvram_pagesize = 4096;
12526                 break;
12527         case FLASH_5752PAGE_SIZE_264:
12528                 tp->nvram_pagesize = 264;
12529                 break;
12530         case FLASH_5752PAGE_SIZE_528:
12531                 tp->nvram_pagesize = 528;
12532                 break;
12533         }
12534 }
12535
12536 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12537 {
12538         u32 nvcfg1;
12539
12540         nvcfg1 = tr32(NVRAM_CFG1);
12541
12542         /* NVRAM protection for TPM */
12543         if (nvcfg1 & (1 << 27))
12544                 tg3_flag_set(tp, PROTECTED_NVRAM);
12545
12546         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12547         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12548         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12549                 tp->nvram_jedecnum = JEDEC_ATMEL;
12550                 tg3_flag_set(tp, NVRAM_BUFFERED);
12551                 break;
12552         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12553                 tp->nvram_jedecnum = JEDEC_ATMEL;
12554                 tg3_flag_set(tp, NVRAM_BUFFERED);
12555                 tg3_flag_set(tp, FLASH);
12556                 break;
12557         case FLASH_5752VENDOR_ST_M45PE10:
12558         case FLASH_5752VENDOR_ST_M45PE20:
12559         case FLASH_5752VENDOR_ST_M45PE40:
12560                 tp->nvram_jedecnum = JEDEC_ST;
12561                 tg3_flag_set(tp, NVRAM_BUFFERED);
12562                 tg3_flag_set(tp, FLASH);
12563                 break;
12564         }
12565
12566         if (tg3_flag(tp, FLASH)) {
12567                 tg3_nvram_get_pagesize(tp, nvcfg1);
12568         } else {
12569                 /* For eeprom, set pagesize to maximum eeprom size */
12570                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12571
12572                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12573                 tw32(NVRAM_CFG1, nvcfg1);
12574         }
12575 }
12576
12577 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12578 {
12579         u32 nvcfg1, protect = 0;
12580
12581         nvcfg1 = tr32(NVRAM_CFG1);
12582
12583         /* NVRAM protection for TPM */
12584         if (nvcfg1 & (1 << 27)) {
12585                 tg3_flag_set(tp, PROTECTED_NVRAM);
12586                 protect = 1;
12587         }
12588
12589         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12590         switch (nvcfg1) {
12591         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12592         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12593         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12594         case FLASH_5755VENDOR_ATMEL_FLASH_5:
12595                 tp->nvram_jedecnum = JEDEC_ATMEL;
12596                 tg3_flag_set(tp, NVRAM_BUFFERED);
12597                 tg3_flag_set(tp, FLASH);
12598                 tp->nvram_pagesize = 264;
12599                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12600                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12601                         tp->nvram_size = (protect ? 0x3e200 :
12602                                           TG3_NVRAM_SIZE_512KB);
12603                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12604                         tp->nvram_size = (protect ? 0x1f200 :
12605                                           TG3_NVRAM_SIZE_256KB);
12606                 else
12607                         tp->nvram_size = (protect ? 0x1f200 :
12608                                           TG3_NVRAM_SIZE_128KB);
12609                 break;
12610         case FLASH_5752VENDOR_ST_M45PE10:
12611         case FLASH_5752VENDOR_ST_M45PE20:
12612         case FLASH_5752VENDOR_ST_M45PE40:
12613                 tp->nvram_jedecnum = JEDEC_ST;
12614                 tg3_flag_set(tp, NVRAM_BUFFERED);
12615                 tg3_flag_set(tp, FLASH);
12616                 tp->nvram_pagesize = 256;
12617                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12618                         tp->nvram_size = (protect ?
12619                                           TG3_NVRAM_SIZE_64KB :
12620                                           TG3_NVRAM_SIZE_128KB);
12621                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12622                         tp->nvram_size = (protect ?
12623                                           TG3_NVRAM_SIZE_64KB :
12624                                           TG3_NVRAM_SIZE_256KB);
12625                 else
12626                         tp->nvram_size = (protect ?
12627                                           TG3_NVRAM_SIZE_128KB :
12628                                           TG3_NVRAM_SIZE_512KB);
12629                 break;
12630         }
12631 }
12632
12633 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12634 {
12635         u32 nvcfg1;
12636
12637         nvcfg1 = tr32(NVRAM_CFG1);
12638
12639         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12640         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12641         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12642         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12643         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12644                 tp->nvram_jedecnum = JEDEC_ATMEL;
12645                 tg3_flag_set(tp, NVRAM_BUFFERED);
12646                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12647
12648                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12649                 tw32(NVRAM_CFG1, nvcfg1);
12650                 break;
12651         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12652         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12653         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12654         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12655                 tp->nvram_jedecnum = JEDEC_ATMEL;
12656                 tg3_flag_set(tp, NVRAM_BUFFERED);
12657                 tg3_flag_set(tp, FLASH);
12658                 tp->nvram_pagesize = 264;
12659                 break;
12660         case FLASH_5752VENDOR_ST_M45PE10:
12661         case FLASH_5752VENDOR_ST_M45PE20:
12662         case FLASH_5752VENDOR_ST_M45PE40:
12663                 tp->nvram_jedecnum = JEDEC_ST;
12664                 tg3_flag_set(tp, NVRAM_BUFFERED);
12665                 tg3_flag_set(tp, FLASH);
12666                 tp->nvram_pagesize = 256;
12667                 break;
12668         }
12669 }
12670
12671 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12672 {
12673         u32 nvcfg1, protect = 0;
12674
12675         nvcfg1 = tr32(NVRAM_CFG1);
12676
12677         /* NVRAM protection for TPM */
12678         if (nvcfg1 & (1 << 27)) {
12679                 tg3_flag_set(tp, PROTECTED_NVRAM);
12680                 protect = 1;
12681         }
12682
12683         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12684         switch (nvcfg1) {
12685         case FLASH_5761VENDOR_ATMEL_ADB021D:
12686         case FLASH_5761VENDOR_ATMEL_ADB041D:
12687         case FLASH_5761VENDOR_ATMEL_ADB081D:
12688         case FLASH_5761VENDOR_ATMEL_ADB161D:
12689         case FLASH_5761VENDOR_ATMEL_MDB021D:
12690         case FLASH_5761VENDOR_ATMEL_MDB041D:
12691         case FLASH_5761VENDOR_ATMEL_MDB081D:
12692         case FLASH_5761VENDOR_ATMEL_MDB161D:
12693                 tp->nvram_jedecnum = JEDEC_ATMEL;
12694                 tg3_flag_set(tp, NVRAM_BUFFERED);
12695                 tg3_flag_set(tp, FLASH);
12696                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12697                 tp->nvram_pagesize = 256;
12698                 break;
12699         case FLASH_5761VENDOR_ST_A_M45PE20:
12700         case FLASH_5761VENDOR_ST_A_M45PE40:
12701         case FLASH_5761VENDOR_ST_A_M45PE80:
12702         case FLASH_5761VENDOR_ST_A_M45PE16:
12703         case FLASH_5761VENDOR_ST_M_M45PE20:
12704         case FLASH_5761VENDOR_ST_M_M45PE40:
12705         case FLASH_5761VENDOR_ST_M_M45PE80:
12706         case FLASH_5761VENDOR_ST_M_M45PE16:
12707                 tp->nvram_jedecnum = JEDEC_ST;
12708                 tg3_flag_set(tp, NVRAM_BUFFERED);
12709                 tg3_flag_set(tp, FLASH);
12710                 tp->nvram_pagesize = 256;
12711                 break;
12712         }
12713
12714         if (protect) {
12715                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12716         } else {
12717                 switch (nvcfg1) {
12718                 case FLASH_5761VENDOR_ATMEL_ADB161D:
12719                 case FLASH_5761VENDOR_ATMEL_MDB161D:
12720                 case FLASH_5761VENDOR_ST_A_M45PE16:
12721                 case FLASH_5761VENDOR_ST_M_M45PE16:
12722                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12723                         break;
12724                 case FLASH_5761VENDOR_ATMEL_ADB081D:
12725                 case FLASH_5761VENDOR_ATMEL_MDB081D:
12726                 case FLASH_5761VENDOR_ST_A_M45PE80:
12727                 case FLASH_5761VENDOR_ST_M_M45PE80:
12728                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12729                         break;
12730                 case FLASH_5761VENDOR_ATMEL_ADB041D:
12731                 case FLASH_5761VENDOR_ATMEL_MDB041D:
12732                 case FLASH_5761VENDOR_ST_A_M45PE40:
12733                 case FLASH_5761VENDOR_ST_M_M45PE40:
12734                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12735                         break;
12736                 case FLASH_5761VENDOR_ATMEL_ADB021D:
12737                 case FLASH_5761VENDOR_ATMEL_MDB021D:
12738                 case FLASH_5761VENDOR_ST_A_M45PE20:
12739                 case FLASH_5761VENDOR_ST_M_M45PE20:
12740                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12741                         break;
12742                 }
12743         }
12744 }
12745
12746 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12747 {
12748         tp->nvram_jedecnum = JEDEC_ATMEL;
12749         tg3_flag_set(tp, NVRAM_BUFFERED);
12750         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12751 }
12752
12753 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12754 {
12755         u32 nvcfg1;
12756
12757         nvcfg1 = tr32(NVRAM_CFG1);
12758
12759         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12760         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12761         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12762                 tp->nvram_jedecnum = JEDEC_ATMEL;
12763                 tg3_flag_set(tp, NVRAM_BUFFERED);
12764                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12765
12766                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12767                 tw32(NVRAM_CFG1, nvcfg1);
12768                 return;
12769         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12770         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12771         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12772         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12773         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12774         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12775         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12776                 tp->nvram_jedecnum = JEDEC_ATMEL;
12777                 tg3_flag_set(tp, NVRAM_BUFFERED);
12778                 tg3_flag_set(tp, FLASH);
12779
12780                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12781                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12782                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12783                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12784                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12785                         break;
12786                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12787                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12788                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12789                         break;
12790                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12791                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12792                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12793                         break;
12794                 }
12795                 break;
12796         case FLASH_5752VENDOR_ST_M45PE10:
12797         case FLASH_5752VENDOR_ST_M45PE20:
12798         case FLASH_5752VENDOR_ST_M45PE40:
12799                 tp->nvram_jedecnum = JEDEC_ST;
12800                 tg3_flag_set(tp, NVRAM_BUFFERED);
12801                 tg3_flag_set(tp, FLASH);
12802
12803                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12804                 case FLASH_5752VENDOR_ST_M45PE10:
12805                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12806                         break;
12807                 case FLASH_5752VENDOR_ST_M45PE20:
12808                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12809                         break;
12810                 case FLASH_5752VENDOR_ST_M45PE40:
12811                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12812                         break;
12813                 }
12814                 break;
12815         default:
12816                 tg3_flag_set(tp, NO_NVRAM);
12817                 return;
12818         }
12819
12820         tg3_nvram_get_pagesize(tp, nvcfg1);
12821         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12822                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12823 }
12824
12825
12826 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12827 {
12828         u32 nvcfg1;
12829
12830         nvcfg1 = tr32(NVRAM_CFG1);
12831
12832         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12833         case FLASH_5717VENDOR_ATMEL_EEPROM:
12834         case FLASH_5717VENDOR_MICRO_EEPROM:
12835                 tp->nvram_jedecnum = JEDEC_ATMEL;
12836                 tg3_flag_set(tp, NVRAM_BUFFERED);
12837                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12838
12839                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12840                 tw32(NVRAM_CFG1, nvcfg1);
12841                 return;
12842         case FLASH_5717VENDOR_ATMEL_MDB011D:
12843         case FLASH_5717VENDOR_ATMEL_ADB011B:
12844         case FLASH_5717VENDOR_ATMEL_ADB011D:
12845         case FLASH_5717VENDOR_ATMEL_MDB021D:
12846         case FLASH_5717VENDOR_ATMEL_ADB021B:
12847         case FLASH_5717VENDOR_ATMEL_ADB021D:
12848         case FLASH_5717VENDOR_ATMEL_45USPT:
12849                 tp->nvram_jedecnum = JEDEC_ATMEL;
12850                 tg3_flag_set(tp, NVRAM_BUFFERED);
12851                 tg3_flag_set(tp, FLASH);
12852
12853                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12854                 case FLASH_5717VENDOR_ATMEL_MDB021D:
12855                         /* Detect size with tg3_nvram_get_size() */
12856                         break;
12857                 case FLASH_5717VENDOR_ATMEL_ADB021B:
12858                 case FLASH_5717VENDOR_ATMEL_ADB021D:
12859                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12860                         break;
12861                 default:
12862                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12863                         break;
12864                 }
12865                 break;
12866         case FLASH_5717VENDOR_ST_M_M25PE10:
12867         case FLASH_5717VENDOR_ST_A_M25PE10:
12868         case FLASH_5717VENDOR_ST_M_M45PE10:
12869         case FLASH_5717VENDOR_ST_A_M45PE10:
12870         case FLASH_5717VENDOR_ST_M_M25PE20:
12871         case FLASH_5717VENDOR_ST_A_M25PE20:
12872         case FLASH_5717VENDOR_ST_M_M45PE20:
12873         case FLASH_5717VENDOR_ST_A_M45PE20:
12874         case FLASH_5717VENDOR_ST_25USPT:
12875         case FLASH_5717VENDOR_ST_45USPT:
12876                 tp->nvram_jedecnum = JEDEC_ST;
12877                 tg3_flag_set(tp, NVRAM_BUFFERED);
12878                 tg3_flag_set(tp, FLASH);
12879
12880                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12881                 case FLASH_5717VENDOR_ST_M_M25PE20:
12882                 case FLASH_5717VENDOR_ST_M_M45PE20:
12883                         /* Detect size with tg3_nvram_get_size() */
12884                         break;
12885                 case FLASH_5717VENDOR_ST_A_M25PE20:
12886                 case FLASH_5717VENDOR_ST_A_M45PE20:
12887                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12888                         break;
12889                 default:
12890                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12891                         break;
12892                 }
12893                 break;
12894         default:
12895                 tg3_flag_set(tp, NO_NVRAM);
12896                 return;
12897         }
12898
12899         tg3_nvram_get_pagesize(tp, nvcfg1);
12900         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12901                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12902 }
12903
12904 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12905 {
12906         u32 nvcfg1, nvmpinstrp;
12907
12908         nvcfg1 = tr32(NVRAM_CFG1);
12909         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12910
12911         switch (nvmpinstrp) {
12912         case FLASH_5720_EEPROM_HD:
12913         case FLASH_5720_EEPROM_LD:
12914                 tp->nvram_jedecnum = JEDEC_ATMEL;
12915                 tg3_flag_set(tp, NVRAM_BUFFERED);
12916
12917                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12918                 tw32(NVRAM_CFG1, nvcfg1);
12919                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12920                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12921                 else
12922                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12923                 return;
12924         case FLASH_5720VENDOR_M_ATMEL_DB011D:
12925         case FLASH_5720VENDOR_A_ATMEL_DB011B:
12926         case FLASH_5720VENDOR_A_ATMEL_DB011D:
12927         case FLASH_5720VENDOR_M_ATMEL_DB021D:
12928         case FLASH_5720VENDOR_A_ATMEL_DB021B:
12929         case FLASH_5720VENDOR_A_ATMEL_DB021D:
12930         case FLASH_5720VENDOR_M_ATMEL_DB041D:
12931         case FLASH_5720VENDOR_A_ATMEL_DB041B:
12932         case FLASH_5720VENDOR_A_ATMEL_DB041D:
12933         case FLASH_5720VENDOR_M_ATMEL_DB081D:
12934         case FLASH_5720VENDOR_A_ATMEL_DB081D:
12935         case FLASH_5720VENDOR_ATMEL_45USPT:
12936                 tp->nvram_jedecnum = JEDEC_ATMEL;
12937                 tg3_flag_set(tp, NVRAM_BUFFERED);
12938                 tg3_flag_set(tp, FLASH);
12939
12940                 switch (nvmpinstrp) {
12941                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12942                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12943                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12944                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12945                         break;
12946                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12947                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12948                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12949                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12950                         break;
12951                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12952                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12953                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12954                         break;
12955                 default:
12956                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12957                         break;
12958                 }
12959                 break;
12960         case FLASH_5720VENDOR_M_ST_M25PE10:
12961         case FLASH_5720VENDOR_M_ST_M45PE10:
12962         case FLASH_5720VENDOR_A_ST_M25PE10:
12963         case FLASH_5720VENDOR_A_ST_M45PE10:
12964         case FLASH_5720VENDOR_M_ST_M25PE20:
12965         case FLASH_5720VENDOR_M_ST_M45PE20:
12966         case FLASH_5720VENDOR_A_ST_M25PE20:
12967         case FLASH_5720VENDOR_A_ST_M45PE20:
12968         case FLASH_5720VENDOR_M_ST_M25PE40:
12969         case FLASH_5720VENDOR_M_ST_M45PE40:
12970         case FLASH_5720VENDOR_A_ST_M25PE40:
12971         case FLASH_5720VENDOR_A_ST_M45PE40:
12972         case FLASH_5720VENDOR_M_ST_M25PE80:
12973         case FLASH_5720VENDOR_M_ST_M45PE80:
12974         case FLASH_5720VENDOR_A_ST_M25PE80:
12975         case FLASH_5720VENDOR_A_ST_M45PE80:
12976         case FLASH_5720VENDOR_ST_25USPT:
12977         case FLASH_5720VENDOR_ST_45USPT:
12978                 tp->nvram_jedecnum = JEDEC_ST;
12979                 tg3_flag_set(tp, NVRAM_BUFFERED);
12980                 tg3_flag_set(tp, FLASH);
12981
12982                 switch (nvmpinstrp) {
12983                 case FLASH_5720VENDOR_M_ST_M25PE20:
12984                 case FLASH_5720VENDOR_M_ST_M45PE20:
12985                 case FLASH_5720VENDOR_A_ST_M25PE20:
12986                 case FLASH_5720VENDOR_A_ST_M45PE20:
12987                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12988                         break;
12989                 case FLASH_5720VENDOR_M_ST_M25PE40:
12990                 case FLASH_5720VENDOR_M_ST_M45PE40:
12991                 case FLASH_5720VENDOR_A_ST_M25PE40:
12992                 case FLASH_5720VENDOR_A_ST_M45PE40:
12993                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12994                         break;
12995                 case FLASH_5720VENDOR_M_ST_M25PE80:
12996                 case FLASH_5720VENDOR_M_ST_M45PE80:
12997                 case FLASH_5720VENDOR_A_ST_M25PE80:
12998                 case FLASH_5720VENDOR_A_ST_M45PE80:
12999                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13000                         break;
13001                 default:
13002                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13003                         break;
13004                 }
13005                 break;
13006         default:
13007                 tg3_flag_set(tp, NO_NVRAM);
13008                 return;
13009         }
13010
13011         tg3_nvram_get_pagesize(tp, nvcfg1);
13012         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13013                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13014 }
13015
13016 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
13017 static void __devinit tg3_nvram_init(struct tg3 *tp)
13018 {
13019         tw32_f(GRC_EEPROM_ADDR,
13020              (EEPROM_ADDR_FSM_RESET |
13021               (EEPROM_DEFAULT_CLOCK_PERIOD <<
13022                EEPROM_ADDR_CLKPERD_SHIFT)));
13023
13024         msleep(1);
13025
13026         /* Enable seeprom accesses. */
13027         tw32_f(GRC_LOCAL_CTRL,
13028              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13029         udelay(100);
13030
13031         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13032             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
13033                 tg3_flag_set(tp, NVRAM);
13034
13035                 if (tg3_nvram_lock(tp)) {
13036                         netdev_warn(tp->dev,
13037                                     "Cannot get nvram lock, %s failed\n",
13038                                     __func__);
13039                         return;
13040                 }
13041                 tg3_enable_nvram_access(tp);
13042
13043                 tp->nvram_size = 0;
13044
13045                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13046                         tg3_get_5752_nvram_info(tp);
13047                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13048                         tg3_get_5755_nvram_info(tp);
13049                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13050                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13051                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13052                         tg3_get_5787_nvram_info(tp);
13053                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13054                         tg3_get_5761_nvram_info(tp);
13055                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13056                         tg3_get_5906_nvram_info(tp);
13057                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13058                          tg3_flag(tp, 57765_CLASS))
13059                         tg3_get_57780_nvram_info(tp);
13060                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13061                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13062                         tg3_get_5717_nvram_info(tp);
13063                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13064                         tg3_get_5720_nvram_info(tp);
13065                 else
13066                         tg3_get_nvram_info(tp);
13067
13068                 if (tp->nvram_size == 0)
13069                         tg3_get_nvram_size(tp);
13070
13071                 tg3_disable_nvram_access(tp);
13072                 tg3_nvram_unlock(tp);
13073
13074         } else {
13075                 tg3_flag_clear(tp, NVRAM);
13076                 tg3_flag_clear(tp, NVRAM_BUFFERED);
13077
13078                 tg3_get_eeprom_size(tp);
13079         }
13080 }
13081
13082 struct subsys_tbl_ent {
13083         u16 subsys_vendor, subsys_devid;
13084         u32 phy_id;
13085 };
13086
13087 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
13088         /* Broadcom boards. */
13089         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13090           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13091         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13092           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13093         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13094           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13095         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13096           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13097         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13098           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13099         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13100           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13101         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13102           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13103         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13104           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13105         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13106           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13107         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13108           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13109         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13110           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13111
13112         /* 3com boards. */
13113         { TG3PCI_SUBVENDOR_ID_3COM,
13114           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13115         { TG3PCI_SUBVENDOR_ID_3COM,
13116           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13117         { TG3PCI_SUBVENDOR_ID_3COM,
13118           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13119         { TG3PCI_SUBVENDOR_ID_3COM,
13120           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13121         { TG3PCI_SUBVENDOR_ID_3COM,
13122           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13123
13124         /* DELL boards. */
13125         { TG3PCI_SUBVENDOR_ID_DELL,
13126           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13127         { TG3PCI_SUBVENDOR_ID_DELL,
13128           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13129         { TG3PCI_SUBVENDOR_ID_DELL,
13130           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13131         { TG3PCI_SUBVENDOR_ID_DELL,
13132           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13133
13134         /* Compaq boards. */
13135         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13136           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13137         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13138           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13139         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13140           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13141         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13142           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13143         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13144           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13145
13146         /* IBM boards. */
13147         { TG3PCI_SUBVENDOR_ID_IBM,
13148           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13149 };
13150
13151 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13152 {
13153         int i;
13154
13155         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13156                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13157                      tp->pdev->subsystem_vendor) &&
13158                     (subsys_id_to_phy_id[i].subsys_devid ==
13159                      tp->pdev->subsystem_device))
13160                         return &subsys_id_to_phy_id[i];
13161         }
13162         return NULL;
13163 }
13164
13165 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13166 {
13167         u32 val;
13168
13169         tp->phy_id = TG3_PHY_ID_INVALID;
13170         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13171
13172         /* Assume an onboard device and WOL capable by default.  */
13173         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13174         tg3_flag_set(tp, WOL_CAP);
13175
13176         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13177                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13178                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13179                         tg3_flag_set(tp, IS_NIC);
13180                 }
13181                 val = tr32(VCPU_CFGSHDW);
13182                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13183                         tg3_flag_set(tp, ASPM_WORKAROUND);
13184                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13185                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13186                         tg3_flag_set(tp, WOL_ENABLE);
13187                         device_set_wakeup_enable(&tp->pdev->dev, true);
13188                 }
13189                 goto done;
13190         }
13191
13192         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13193         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13194                 u32 nic_cfg, led_cfg;
13195                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13196                 int eeprom_phy_serdes = 0;
13197
13198                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13199                 tp->nic_sram_data_cfg = nic_cfg;
13200
13201                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13202                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13203                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13204                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13205                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13206                     (ver > 0) && (ver < 0x100))
13207                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13208
13209                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13210                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13211
13212                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13213                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13214                         eeprom_phy_serdes = 1;
13215
13216                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13217                 if (nic_phy_id != 0) {
13218                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13219                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13220
13221                         eeprom_phy_id  = (id1 >> 16) << 10;
13222                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
13223                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
13224                 } else
13225                         eeprom_phy_id = 0;
13226
13227                 tp->phy_id = eeprom_phy_id;
13228                 if (eeprom_phy_serdes) {
13229                         if (!tg3_flag(tp, 5705_PLUS))
13230                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13231                         else
13232                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13233                 }
13234
13235                 if (tg3_flag(tp, 5750_PLUS))
13236                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13237                                     SHASTA_EXT_LED_MODE_MASK);
13238                 else
13239                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13240
13241                 switch (led_cfg) {
13242                 default:
13243                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13244                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13245                         break;
13246
13247                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13248                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13249                         break;
13250
13251                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13252                         tp->led_ctrl = LED_CTRL_MODE_MAC;
13253
13254                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13255                          * read on some older 5700/5701 bootcode.
13256                          */
13257                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13258                             ASIC_REV_5700 ||
13259                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
13260                             ASIC_REV_5701)
13261                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13262
13263                         break;
13264
13265                 case SHASTA_EXT_LED_SHARED:
13266                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
13267                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13268                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13269                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13270                                                  LED_CTRL_MODE_PHY_2);
13271                         break;
13272
13273                 case SHASTA_EXT_LED_MAC:
13274                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13275                         break;
13276
13277                 case SHASTA_EXT_LED_COMBO:
13278                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
13279                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13280                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13281                                                  LED_CTRL_MODE_PHY_2);
13282                         break;
13283
13284                 }
13285
13286                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13287                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13288                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13289                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13290
13291                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13292                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13293
13294                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13295                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13296                         if ((tp->pdev->subsystem_vendor ==
13297                              PCI_VENDOR_ID_ARIMA) &&
13298                             (tp->pdev->subsystem_device == 0x205a ||
13299                              tp->pdev->subsystem_device == 0x2063))
13300                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13301                 } else {
13302                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13303                         tg3_flag_set(tp, IS_NIC);
13304                 }
13305
13306                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13307                         tg3_flag_set(tp, ENABLE_ASF);
13308                         if (tg3_flag(tp, 5750_PLUS))
13309                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13310                 }
13311
13312                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13313                     tg3_flag(tp, 5750_PLUS))
13314                         tg3_flag_set(tp, ENABLE_APE);
13315
13316                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13317                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13318                         tg3_flag_clear(tp, WOL_CAP);
13319
13320                 if (tg3_flag(tp, WOL_CAP) &&
13321                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13322                         tg3_flag_set(tp, WOL_ENABLE);
13323                         device_set_wakeup_enable(&tp->pdev->dev, true);
13324                 }
13325
13326                 if (cfg2 & (1 << 17))
13327                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13328
13329                 /* serdes signal pre-emphasis in register 0x590 set by */
13330                 /* bootcode if bit 18 is set */
13331                 if (cfg2 & (1 << 18))
13332                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13333
13334                 if ((tg3_flag(tp, 57765_PLUS) ||
13335                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13336                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13337                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13338                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13339
13340                 if (tg3_flag(tp, PCI_EXPRESS) &&
13341                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13342                     !tg3_flag(tp, 57765_PLUS)) {
13343                         u32 cfg3;
13344
13345                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13346                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13347                                 tg3_flag_set(tp, ASPM_WORKAROUND);
13348                 }
13349
13350                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13351                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13352                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13353                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13354                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13355                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13356         }
13357 done:
13358         if (tg3_flag(tp, WOL_CAP))
13359                 device_set_wakeup_enable(&tp->pdev->dev,
13360                                          tg3_flag(tp, WOL_ENABLE));
13361         else
13362                 device_set_wakeup_capable(&tp->pdev->dev, false);
13363 }
13364
13365 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13366 {
13367         int i;
13368         u32 val;
13369
13370         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13371         tw32(OTP_CTRL, cmd);
13372
13373         /* Wait for up to 1 ms for command to execute. */
13374         for (i = 0; i < 100; i++) {
13375                 val = tr32(OTP_STATUS);
13376                 if (val & OTP_STATUS_CMD_DONE)
13377                         break;
13378                 udelay(10);
13379         }
13380
13381         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13382 }
13383
13384 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13385  * configuration is a 32-bit value that straddles the alignment boundary.
13386  * We do two 32-bit reads and then shift and merge the results.
13387  */
13388 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13389 {
13390         u32 bhalf_otp, thalf_otp;
13391
13392         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13393
13394         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13395                 return 0;
13396
13397         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13398
13399         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13400                 return 0;
13401
13402         thalf_otp = tr32(OTP_READ_DATA);
13403
13404         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13405
13406         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13407                 return 0;
13408
13409         bhalf_otp = tr32(OTP_READ_DATA);
13410
13411         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13412 }
13413
13414 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13415 {
13416         u32 adv = ADVERTISED_Autoneg;
13417
13418         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13419                 adv |= ADVERTISED_1000baseT_Half |
13420                        ADVERTISED_1000baseT_Full;
13421
13422         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13423                 adv |= ADVERTISED_100baseT_Half |
13424                        ADVERTISED_100baseT_Full |
13425                        ADVERTISED_10baseT_Half |
13426                        ADVERTISED_10baseT_Full |
13427                        ADVERTISED_TP;
13428         else
13429                 adv |= ADVERTISED_FIBRE;
13430
13431         tp->link_config.advertising = adv;
13432         tp->link_config.speed = SPEED_UNKNOWN;
13433         tp->link_config.duplex = DUPLEX_UNKNOWN;
13434         tp->link_config.autoneg = AUTONEG_ENABLE;
13435         tp->link_config.active_speed = SPEED_UNKNOWN;
13436         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
13437
13438         tp->old_link = -1;
13439 }
13440
13441 static int __devinit tg3_phy_probe(struct tg3 *tp)
13442 {
13443         u32 hw_phy_id_1, hw_phy_id_2;
13444         u32 hw_phy_id, hw_phy_id_masked;
13445         int err;
13446
13447         /* flow control autonegotiation is default behavior */
13448         tg3_flag_set(tp, PAUSE_AUTONEG);
13449         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13450
13451         if (tg3_flag(tp, USE_PHYLIB))
13452                 return tg3_phy_init(tp);
13453
13454         /* Reading the PHY ID register can conflict with ASF
13455          * firmware access to the PHY hardware.
13456          */
13457         err = 0;
13458         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13459                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13460         } else {
13461                 /* Now read the physical PHY_ID from the chip and verify
13462                  * that it is sane.  If it doesn't look good, we fall back
13463                  * to either the hard-coded table based PHY_ID and failing
13464                  * that the value found in the eeprom area.
13465                  */
13466                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13467                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13468
13469                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13470                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13471                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13472
13473                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13474         }
13475
13476         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13477                 tp->phy_id = hw_phy_id;
13478                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13479                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13480                 else
13481                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13482         } else {
13483                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13484                         /* Do nothing, phy ID already set up in
13485                          * tg3_get_eeprom_hw_cfg().
13486                          */
13487                 } else {
13488                         struct subsys_tbl_ent *p;
13489
13490                         /* No eeprom signature?  Try the hardcoded
13491                          * subsys device table.
13492                          */
13493                         p = tg3_lookup_by_subsys(tp);
13494                         if (!p)
13495                                 return -ENODEV;
13496
13497                         tp->phy_id = p->phy_id;
13498                         if (!tp->phy_id ||
13499                             tp->phy_id == TG3_PHY_ID_BCM8002)
13500                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13501                 }
13502         }
13503
13504         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13505             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13506              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13507              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13508               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13509              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13510               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13511                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13512
13513         tg3_phy_init_link_config(tp);
13514
13515         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13516             !tg3_flag(tp, ENABLE_APE) &&
13517             !tg3_flag(tp, ENABLE_ASF)) {
13518                 u32 bmsr, dummy;
13519
13520                 tg3_readphy(tp, MII_BMSR, &bmsr);
13521                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13522                     (bmsr & BMSR_LSTATUS))
13523                         goto skip_phy_reset;
13524
13525                 err = tg3_phy_reset(tp);
13526                 if (err)
13527                         return err;
13528
13529                 tg3_phy_set_wirespeed(tp);
13530
13531                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13532                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13533                                             tp->link_config.flowctrl);
13534
13535                         tg3_writephy(tp, MII_BMCR,
13536                                      BMCR_ANENABLE | BMCR_ANRESTART);
13537                 }
13538         }
13539
13540 skip_phy_reset:
13541         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13542                 err = tg3_init_5401phy_dsp(tp);
13543                 if (err)
13544                         return err;
13545
13546                 err = tg3_init_5401phy_dsp(tp);
13547         }
13548
13549         return err;
13550 }
13551
13552 static void __devinit tg3_read_vpd(struct tg3 *tp)
13553 {
13554         u8 *vpd_data;
13555         unsigned int block_end, rosize, len;
13556         u32 vpdlen;
13557         int j, i = 0;
13558
13559         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13560         if (!vpd_data)
13561                 goto out_no_vpd;
13562
13563         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13564         if (i < 0)
13565                 goto out_not_found;
13566
13567         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13568         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13569         i += PCI_VPD_LRDT_TAG_SIZE;
13570
13571         if (block_end > vpdlen)
13572                 goto out_not_found;
13573
13574         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13575                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13576         if (j > 0) {
13577                 len = pci_vpd_info_field_size(&vpd_data[j]);
13578
13579                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13580                 if (j + len > block_end || len != 4 ||
13581                     memcmp(&vpd_data[j], "1028", 4))
13582                         goto partno;
13583
13584                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13585                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13586                 if (j < 0)
13587                         goto partno;
13588
13589                 len = pci_vpd_info_field_size(&vpd_data[j]);
13590
13591                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13592                 if (j + len > block_end)
13593                         goto partno;
13594
13595                 memcpy(tp->fw_ver, &vpd_data[j], len);
13596                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13597         }
13598
13599 partno:
13600         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13601                                       PCI_VPD_RO_KEYWORD_PARTNO);
13602         if (i < 0)
13603                 goto out_not_found;
13604
13605         len = pci_vpd_info_field_size(&vpd_data[i]);
13606
13607         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13608         if (len > TG3_BPN_SIZE ||
13609             (len + i) > vpdlen)
13610                 goto out_not_found;
13611
13612         memcpy(tp->board_part_number, &vpd_data[i], len);
13613
13614 out_not_found:
13615         kfree(vpd_data);
13616         if (tp->board_part_number[0])
13617                 return;
13618
13619 out_no_vpd:
13620         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13621                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13622                         strcpy(tp->board_part_number, "BCM5717");
13623                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13624                         strcpy(tp->board_part_number, "BCM5718");
13625                 else
13626                         goto nomatch;
13627         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13628                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13629                         strcpy(tp->board_part_number, "BCM57780");
13630                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13631                         strcpy(tp->board_part_number, "BCM57760");
13632                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13633                         strcpy(tp->board_part_number, "BCM57790");
13634                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13635                         strcpy(tp->board_part_number, "BCM57788");
13636                 else
13637                         goto nomatch;
13638         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13639                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13640                         strcpy(tp->board_part_number, "BCM57761");
13641                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13642                         strcpy(tp->board_part_number, "BCM57765");
13643                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13644                         strcpy(tp->board_part_number, "BCM57781");
13645                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13646                         strcpy(tp->board_part_number, "BCM57785");
13647                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13648                         strcpy(tp->board_part_number, "BCM57791");
13649                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13650                         strcpy(tp->board_part_number, "BCM57795");
13651                 else
13652                         goto nomatch;
13653         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
13654                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
13655                         strcpy(tp->board_part_number, "BCM57762");
13656                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
13657                         strcpy(tp->board_part_number, "BCM57766");
13658                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
13659                         strcpy(tp->board_part_number, "BCM57782");
13660                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13661                         strcpy(tp->board_part_number, "BCM57786");
13662                 else
13663                         goto nomatch;
13664         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13665                 strcpy(tp->board_part_number, "BCM95906");
13666         } else {
13667 nomatch:
13668                 strcpy(tp->board_part_number, "none");
13669         }
13670 }
13671
13672 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13673 {
13674         u32 val;
13675
13676         if (tg3_nvram_read(tp, offset, &val) ||
13677             (val & 0xfc000000) != 0x0c000000 ||
13678             tg3_nvram_read(tp, offset + 4, &val) ||
13679             val != 0)
13680                 return 0;
13681
13682         return 1;
13683 }
13684
13685 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13686 {
13687         u32 val, offset, start, ver_offset;
13688         int i, dst_off;
13689         bool newver = false;
13690
13691         if (tg3_nvram_read(tp, 0xc, &offset) ||
13692             tg3_nvram_read(tp, 0x4, &start))
13693                 return;
13694
13695         offset = tg3_nvram_logical_addr(tp, offset);
13696
13697         if (tg3_nvram_read(tp, offset, &val))
13698                 return;
13699
13700         if ((val & 0xfc000000) == 0x0c000000) {
13701                 if (tg3_nvram_read(tp, offset + 4, &val))
13702                         return;
13703
13704                 if (val == 0)
13705                         newver = true;
13706         }
13707
13708         dst_off = strlen(tp->fw_ver);
13709
13710         if (newver) {
13711                 if (TG3_VER_SIZE - dst_off < 16 ||
13712                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13713                         return;
13714
13715                 offset = offset + ver_offset - start;
13716                 for (i = 0; i < 16; i += 4) {
13717                         __be32 v;
13718                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13719                                 return;
13720
13721                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13722                 }
13723         } else {
13724                 u32 major, minor;
13725
13726                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13727                         return;
13728
13729                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13730                         TG3_NVM_BCVER_MAJSFT;
13731                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13732                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13733                          "v%d.%02d", major, minor);
13734         }
13735 }
13736
13737 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13738 {
13739         u32 val, major, minor;
13740
13741         /* Use native endian representation */
13742         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13743                 return;
13744
13745         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13746                 TG3_NVM_HWSB_CFG1_MAJSFT;
13747         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13748                 TG3_NVM_HWSB_CFG1_MINSFT;
13749
13750         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13751 }
13752
13753 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13754 {
13755         u32 offset, major, minor, build;
13756
13757         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13758
13759         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13760                 return;
13761
13762         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13763         case TG3_EEPROM_SB_REVISION_0:
13764                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13765                 break;
13766         case TG3_EEPROM_SB_REVISION_2:
13767                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13768                 break;
13769         case TG3_EEPROM_SB_REVISION_3:
13770                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13771                 break;
13772         case TG3_EEPROM_SB_REVISION_4:
13773                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13774                 break;
13775         case TG3_EEPROM_SB_REVISION_5:
13776                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13777                 break;
13778         case TG3_EEPROM_SB_REVISION_6:
13779                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13780                 break;
13781         default:
13782                 return;
13783         }
13784
13785         if (tg3_nvram_read(tp, offset, &val))
13786                 return;
13787
13788         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13789                 TG3_EEPROM_SB_EDH_BLD_SHFT;
13790         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13791                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13792         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13793
13794         if (minor > 99 || build > 26)
13795                 return;
13796
13797         offset = strlen(tp->fw_ver);
13798         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13799                  " v%d.%02d", major, minor);
13800
13801         if (build > 0) {
13802                 offset = strlen(tp->fw_ver);
13803                 if (offset < TG3_VER_SIZE - 1)
13804                         tp->fw_ver[offset] = 'a' + build - 1;
13805         }
13806 }
13807
13808 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13809 {
13810         u32 val, offset, start;
13811         int i, vlen;
13812
13813         for (offset = TG3_NVM_DIR_START;
13814              offset < TG3_NVM_DIR_END;
13815              offset += TG3_NVM_DIRENT_SIZE) {
13816                 if (tg3_nvram_read(tp, offset, &val))
13817                         return;
13818
13819                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13820                         break;
13821         }
13822
13823         if (offset == TG3_NVM_DIR_END)
13824                 return;
13825
13826         if (!tg3_flag(tp, 5705_PLUS))
13827                 start = 0x08000000;
13828         else if (tg3_nvram_read(tp, offset - 4, &start))
13829                 return;
13830
13831         if (tg3_nvram_read(tp, offset + 4, &offset) ||
13832             !tg3_fw_img_is_valid(tp, offset) ||
13833             tg3_nvram_read(tp, offset + 8, &val))
13834                 return;
13835
13836         offset += val - start;
13837
13838         vlen = strlen(tp->fw_ver);
13839
13840         tp->fw_ver[vlen++] = ',';
13841         tp->fw_ver[vlen++] = ' ';
13842
13843         for (i = 0; i < 4; i++) {
13844                 __be32 v;
13845                 if (tg3_nvram_read_be32(tp, offset, &v))
13846                         return;
13847
13848                 offset += sizeof(v);
13849
13850                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13851                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13852                         break;
13853                 }
13854
13855                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13856                 vlen += sizeof(v);
13857         }
13858 }
13859
13860 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13861 {
13862         int vlen;
13863         u32 apedata;
13864         char *fwtype;
13865
13866         if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13867                 return;
13868
13869         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13870         if (apedata != APE_SEG_SIG_MAGIC)
13871                 return;
13872
13873         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13874         if (!(apedata & APE_FW_STATUS_READY))
13875                 return;
13876
13877         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13878
13879         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13880                 tg3_flag_set(tp, APE_HAS_NCSI);
13881                 fwtype = "NCSI";
13882         } else {
13883                 fwtype = "DASH";
13884         }
13885
13886         vlen = strlen(tp->fw_ver);
13887
13888         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13889                  fwtype,
13890                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13891                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13892                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13893                  (apedata & APE_FW_VERSION_BLDMSK));
13894 }
13895
13896 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13897 {
13898         u32 val;
13899         bool vpd_vers = false;
13900
13901         if (tp->fw_ver[0] != 0)
13902                 vpd_vers = true;
13903
13904         if (tg3_flag(tp, NO_NVRAM)) {
13905                 strcat(tp->fw_ver, "sb");
13906                 return;
13907         }
13908
13909         if (tg3_nvram_read(tp, 0, &val))
13910                 return;
13911
13912         if (val == TG3_EEPROM_MAGIC)
13913                 tg3_read_bc_ver(tp);
13914         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13915                 tg3_read_sb_ver(tp, val);
13916         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13917                 tg3_read_hwsb_ver(tp);
13918         else
13919                 return;
13920
13921         if (vpd_vers)
13922                 goto done;
13923
13924         if (tg3_flag(tp, ENABLE_APE)) {
13925                 if (tg3_flag(tp, ENABLE_ASF))
13926                         tg3_read_dash_ver(tp);
13927         } else if (tg3_flag(tp, ENABLE_ASF)) {
13928                 tg3_read_mgmtfw_ver(tp);
13929         }
13930
13931 done:
13932         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13933 }
13934
13935 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13936 {
13937         if (tg3_flag(tp, LRG_PROD_RING_CAP))
13938                 return TG3_RX_RET_MAX_SIZE_5717;
13939         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13940                 return TG3_RX_RET_MAX_SIZE_5700;
13941         else
13942                 return TG3_RX_RET_MAX_SIZE_5705;
13943 }
13944
13945 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13946         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13947         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13948         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13949         { },
13950 };
13951
13952 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13953 {
13954         struct pci_dev *peer;
13955         unsigned int func, devnr = tp->pdev->devfn & ~7;
13956
13957         for (func = 0; func < 8; func++) {
13958                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13959                 if (peer && peer != tp->pdev)
13960                         break;
13961                 pci_dev_put(peer);
13962         }
13963         /* 5704 can be configured in single-port mode, set peer to
13964          * tp->pdev in that case.
13965          */
13966         if (!peer) {
13967                 peer = tp->pdev;
13968                 return peer;
13969         }
13970
13971         /*
13972          * We don't need to keep the refcount elevated; there's no way
13973          * to remove one half of this device without removing the other
13974          */
13975         pci_dev_put(peer);
13976
13977         return peer;
13978 }
13979
13980 static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
13981 {
13982         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
13983         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13984                 u32 reg;
13985
13986                 /* All devices that use the alternate
13987                  * ASIC REV location have a CPMU.
13988                  */
13989                 tg3_flag_set(tp, CPMU_PRESENT);
13990
13991                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13992                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13993                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13994                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13995                         reg = TG3PCI_GEN2_PRODID_ASICREV;
13996                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13997                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13998                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13999                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
14000                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14001                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14002                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
14003                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
14004                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
14005                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14006                         reg = TG3PCI_GEN15_PRODID_ASICREV;
14007                 else
14008                         reg = TG3PCI_PRODID_ASICREV;
14009
14010                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
14011         }
14012
14013         /* Wrong chip ID in 5752 A0. This code can be removed later
14014          * as A0 is not in production.
14015          */
14016         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
14017                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
14018
14019         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14020             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14021             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14022                 tg3_flag_set(tp, 5717_PLUS);
14023
14024         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14025             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14026                 tg3_flag_set(tp, 57765_CLASS);
14027
14028         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
14029                 tg3_flag_set(tp, 57765_PLUS);
14030
14031         /* Intentionally exclude ASIC_REV_5906 */
14032         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14033             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14034             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14035             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14036             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14037             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14038             tg3_flag(tp, 57765_PLUS))
14039                 tg3_flag_set(tp, 5755_PLUS);
14040
14041         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14042             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14043                 tg3_flag_set(tp, 5780_CLASS);
14044
14045         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14046             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14047             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14048             tg3_flag(tp, 5755_PLUS) ||
14049             tg3_flag(tp, 5780_CLASS))
14050                 tg3_flag_set(tp, 5750_PLUS);
14051
14052         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14053             tg3_flag(tp, 5750_PLUS))
14054                 tg3_flag_set(tp, 5705_PLUS);
14055 }
14056
14057 static int __devinit tg3_get_invariants(struct tg3 *tp)
14058 {
14059         u32 misc_ctrl_reg;
14060         u32 pci_state_reg, grc_misc_cfg;
14061         u32 val;
14062         u16 pci_cmd;
14063         int err;
14064
14065         /* Force memory write invalidate off.  If we leave it on,
14066          * then on 5700_BX chips we have to enable a workaround.
14067          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
14068          * to match the cacheline size.  The Broadcom driver have this
14069          * workaround but turns MWI off all the times so never uses
14070          * it.  This seems to suggest that the workaround is insufficient.
14071          */
14072         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14073         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
14074         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14075
14076         /* Important! -- Make sure register accesses are byteswapped
14077          * correctly.  Also, for those chips that require it, make
14078          * sure that indirect register accesses are enabled before
14079          * the first operation.
14080          */
14081         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14082                               &misc_ctrl_reg);
14083         tp->misc_host_ctrl |= (misc_ctrl_reg &
14084                                MISC_HOST_CTRL_CHIPREV);
14085         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14086                                tp->misc_host_ctrl);
14087
14088         tg3_detect_asic_rev(tp, misc_ctrl_reg);
14089
14090         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14091          * we need to disable memory and use config. cycles
14092          * only to access all registers. The 5702/03 chips
14093          * can mistakenly decode the special cycles from the
14094          * ICH chipsets as memory write cycles, causing corruption
14095          * of register and memory space. Only certain ICH bridges
14096          * will drive special cycles with non-zero data during the
14097          * address phase which can fall within the 5703's address
14098          * range. This is not an ICH bug as the PCI spec allows
14099          * non-zero address during special cycles. However, only
14100          * these ICH bridges are known to drive non-zero addresses
14101          * during special cycles.
14102          *
14103          * Since special cycles do not cross PCI bridges, we only
14104          * enable this workaround if the 5703 is on the secondary
14105          * bus of these ICH bridges.
14106          */
14107         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14108             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14109                 static struct tg3_dev_id {
14110                         u32     vendor;
14111                         u32     device;
14112                         u32     rev;
14113                 } ich_chipsets[] = {
14114                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14115                           PCI_ANY_ID },
14116                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14117                           PCI_ANY_ID },
14118                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14119                           0xa },
14120                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14121                           PCI_ANY_ID },
14122                         { },
14123                 };
14124                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
14125                 struct pci_dev *bridge = NULL;
14126
14127                 while (pci_id->vendor != 0) {
14128                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
14129                                                 bridge);
14130                         if (!bridge) {
14131                                 pci_id++;
14132                                 continue;
14133                         }
14134                         if (pci_id->rev != PCI_ANY_ID) {
14135                                 if (bridge->revision > pci_id->rev)
14136                                         continue;
14137                         }
14138                         if (bridge->subordinate &&
14139                             (bridge->subordinate->number ==
14140                              tp->pdev->bus->number)) {
14141                                 tg3_flag_set(tp, ICH_WORKAROUND);
14142                                 pci_dev_put(bridge);
14143                                 break;
14144                         }
14145                 }
14146         }
14147
14148         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14149                 static struct tg3_dev_id {
14150                         u32     vendor;
14151                         u32     device;
14152                 } bridge_chipsets[] = {
14153                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14154                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14155                         { },
14156                 };
14157                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14158                 struct pci_dev *bridge = NULL;
14159
14160                 while (pci_id->vendor != 0) {
14161                         bridge = pci_get_device(pci_id->vendor,
14162                                                 pci_id->device,
14163                                                 bridge);
14164                         if (!bridge) {
14165                                 pci_id++;
14166                                 continue;
14167                         }
14168                         if (bridge->subordinate &&
14169                             (bridge->subordinate->number <=
14170                              tp->pdev->bus->number) &&
14171                             (bridge->subordinate->subordinate >=
14172                              tp->pdev->bus->number)) {
14173                                 tg3_flag_set(tp, 5701_DMA_BUG);
14174                                 pci_dev_put(bridge);
14175                                 break;
14176                         }
14177                 }
14178         }
14179
14180         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14181          * DMA addresses > 40-bit. This bridge may have other additional
14182          * 57xx devices behind it in some 4-port NIC designs for example.
14183          * Any tg3 device found behind the bridge will also need the 40-bit
14184          * DMA workaround.
14185          */
14186         if (tg3_flag(tp, 5780_CLASS)) {
14187                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14188                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14189         } else {
14190                 struct pci_dev *bridge = NULL;
14191
14192                 do {
14193                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14194                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
14195                                                 bridge);
14196                         if (bridge && bridge->subordinate &&
14197                             (bridge->subordinate->number <=
14198                              tp->pdev->bus->number) &&
14199                             (bridge->subordinate->subordinate >=
14200                              tp->pdev->bus->number)) {
14201                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14202                                 pci_dev_put(bridge);
14203                                 break;
14204                         }
14205                 } while (bridge);
14206         }
14207
14208         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14209             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14210                 tp->pdev_peer = tg3_find_peer(tp);
14211
14212         /* Determine TSO capabilities */
14213         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14214                 ; /* Do nothing. HW bug. */
14215         else if (tg3_flag(tp, 57765_PLUS))
14216                 tg3_flag_set(tp, HW_TSO_3);
14217         else if (tg3_flag(tp, 5755_PLUS) ||
14218                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14219                 tg3_flag_set(tp, HW_TSO_2);
14220         else if (tg3_flag(tp, 5750_PLUS)) {
14221                 tg3_flag_set(tp, HW_TSO_1);
14222                 tg3_flag_set(tp, TSO_BUG);
14223                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14224                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14225                         tg3_flag_clear(tp, TSO_BUG);
14226         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14227                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14228                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14229                         tg3_flag_set(tp, TSO_BUG);
14230                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14231                         tp->fw_needed = FIRMWARE_TG3TSO5;
14232                 else
14233                         tp->fw_needed = FIRMWARE_TG3TSO;
14234         }
14235
14236         /* Selectively allow TSO based on operating conditions */
14237         if (tg3_flag(tp, HW_TSO_1) ||
14238             tg3_flag(tp, HW_TSO_2) ||
14239             tg3_flag(tp, HW_TSO_3) ||
14240             tp->fw_needed) {
14241                 /* For firmware TSO, assume ASF is disabled.
14242                  * We'll disable TSO later if we discover ASF
14243                  * is enabled in tg3_get_eeprom_hw_cfg().
14244                  */
14245                 tg3_flag_set(tp, TSO_CAPABLE);
14246         } else {
14247                 tg3_flag_clear(tp, TSO_CAPABLE);
14248                 tg3_flag_clear(tp, TSO_BUG);
14249                 tp->fw_needed = NULL;
14250         }
14251
14252         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14253                 tp->fw_needed = FIRMWARE_TG3;
14254
14255         tp->irq_max = 1;
14256
14257         if (tg3_flag(tp, 5750_PLUS)) {
14258                 tg3_flag_set(tp, SUPPORT_MSI);
14259                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14260                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14261                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14262                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14263                      tp->pdev_peer == tp->pdev))
14264                         tg3_flag_clear(tp, SUPPORT_MSI);
14265
14266                 if (tg3_flag(tp, 5755_PLUS) ||
14267                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14268                         tg3_flag_set(tp, 1SHOT_MSI);
14269                 }
14270
14271                 if (tg3_flag(tp, 57765_PLUS)) {
14272                         tg3_flag_set(tp, SUPPORT_MSIX);
14273                         tp->irq_max = TG3_IRQ_MAX_VECS;
14274                         tg3_rss_init_dflt_indir_tbl(tp);
14275                 }
14276         }
14277
14278         if (tg3_flag(tp, 5755_PLUS) ||
14279             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14280                 tg3_flag_set(tp, SHORT_DMA_BUG);
14281
14282         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14283                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14284
14285         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14286             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14287             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14288                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14289
14290         if (tg3_flag(tp, 57765_PLUS) &&
14291             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14292                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14293
14294         if (!tg3_flag(tp, 5705_PLUS) ||
14295             tg3_flag(tp, 5780_CLASS) ||
14296             tg3_flag(tp, USE_JUMBO_BDFLAG))
14297                 tg3_flag_set(tp, JUMBO_CAPABLE);
14298
14299         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14300                               &pci_state_reg);
14301
14302         if (pci_is_pcie(tp->pdev)) {
14303                 u16 lnkctl;
14304
14305                 tg3_flag_set(tp, PCI_EXPRESS);
14306
14307                 pci_read_config_word(tp->pdev,
14308                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14309                                      &lnkctl);
14310                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14311                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14312                             ASIC_REV_5906) {
14313                                 tg3_flag_clear(tp, HW_TSO_2);
14314                                 tg3_flag_clear(tp, TSO_CAPABLE);
14315                         }
14316                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14317                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14318                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14319                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14320                                 tg3_flag_set(tp, CLKREQ_BUG);
14321                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14322                         tg3_flag_set(tp, L1PLLPD_EN);
14323                 }
14324         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14325                 /* BCM5785 devices are effectively PCIe devices, and should
14326                  * follow PCIe codepaths, but do not have a PCIe capabilities
14327                  * section.
14328                  */
14329                 tg3_flag_set(tp, PCI_EXPRESS);
14330         } else if (!tg3_flag(tp, 5705_PLUS) ||
14331                    tg3_flag(tp, 5780_CLASS)) {
14332                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14333                 if (!tp->pcix_cap) {
14334                         dev_err(&tp->pdev->dev,
14335                                 "Cannot find PCI-X capability, aborting\n");
14336                         return -EIO;
14337                 }
14338
14339                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14340                         tg3_flag_set(tp, PCIX_MODE);
14341         }
14342
14343         /* If we have an AMD 762 or VIA K8T800 chipset, write
14344          * reordering to the mailbox registers done by the host
14345          * controller can cause major troubles.  We read back from
14346          * every mailbox register write to force the writes to be
14347          * posted to the chip in order.
14348          */
14349         if (pci_dev_present(tg3_write_reorder_chipsets) &&
14350             !tg3_flag(tp, PCI_EXPRESS))
14351                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14352
14353         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14354                              &tp->pci_cacheline_sz);
14355         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14356                              &tp->pci_lat_timer);
14357         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14358             tp->pci_lat_timer < 64) {
14359                 tp->pci_lat_timer = 64;
14360                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14361                                       tp->pci_lat_timer);
14362         }
14363
14364         /* Important! -- It is critical that the PCI-X hw workaround
14365          * situation is decided before the first MMIO register access.
14366          */
14367         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14368                 /* 5700 BX chips need to have their TX producer index
14369                  * mailboxes written twice to workaround a bug.
14370                  */
14371                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14372
14373                 /* If we are in PCI-X mode, enable register write workaround.
14374                  *
14375                  * The workaround is to use indirect register accesses
14376                  * for all chip writes not to mailbox registers.
14377                  */
14378                 if (tg3_flag(tp, PCIX_MODE)) {
14379                         u32 pm_reg;
14380
14381                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14382
14383                         /* The chip can have it's power management PCI config
14384                          * space registers clobbered due to this bug.
14385                          * So explicitly force the chip into D0 here.
14386                          */
14387                         pci_read_config_dword(tp->pdev,
14388                                               tp->pm_cap + PCI_PM_CTRL,
14389                                               &pm_reg);
14390                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14391                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14392                         pci_write_config_dword(tp->pdev,
14393                                                tp->pm_cap + PCI_PM_CTRL,
14394                                                pm_reg);
14395
14396                         /* Also, force SERR#/PERR# in PCI command. */
14397                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14398                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14399                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14400                 }
14401         }
14402
14403         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14404                 tg3_flag_set(tp, PCI_HIGH_SPEED);
14405         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14406                 tg3_flag_set(tp, PCI_32BIT);
14407
14408         /* Chip-specific fixup from Broadcom driver */
14409         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14410             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14411                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14412                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14413         }
14414
14415         /* Default fast path register access methods */
14416         tp->read32 = tg3_read32;
14417         tp->write32 = tg3_write32;
14418         tp->read32_mbox = tg3_read32;
14419         tp->write32_mbox = tg3_write32;
14420         tp->write32_tx_mbox = tg3_write32;
14421         tp->write32_rx_mbox = tg3_write32;
14422
14423         /* Various workaround register access methods */
14424         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14425                 tp->write32 = tg3_write_indirect_reg32;
14426         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14427                  (tg3_flag(tp, PCI_EXPRESS) &&
14428                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14429                 /*
14430                  * Back to back register writes can cause problems on these
14431                  * chips, the workaround is to read back all reg writes
14432                  * except those to mailbox regs.
14433                  *
14434                  * See tg3_write_indirect_reg32().
14435                  */
14436                 tp->write32 = tg3_write_flush_reg32;
14437         }
14438
14439         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14440                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14441                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14442                         tp->write32_rx_mbox = tg3_write_flush_reg32;
14443         }
14444
14445         if (tg3_flag(tp, ICH_WORKAROUND)) {
14446                 tp->read32 = tg3_read_indirect_reg32;
14447                 tp->write32 = tg3_write_indirect_reg32;
14448                 tp->read32_mbox = tg3_read_indirect_mbox;
14449                 tp->write32_mbox = tg3_write_indirect_mbox;
14450                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14451                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14452
14453                 iounmap(tp->regs);
14454                 tp->regs = NULL;
14455
14456                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14457                 pci_cmd &= ~PCI_COMMAND_MEMORY;
14458                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14459         }
14460         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14461                 tp->read32_mbox = tg3_read32_mbox_5906;
14462                 tp->write32_mbox = tg3_write32_mbox_5906;
14463                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14464                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14465         }
14466
14467         if (tp->write32 == tg3_write_indirect_reg32 ||
14468             (tg3_flag(tp, PCIX_MODE) &&
14469              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14470               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14471                 tg3_flag_set(tp, SRAM_USE_CONFIG);
14472
14473         /* The memory arbiter has to be enabled in order for SRAM accesses
14474          * to succeed.  Normally on powerup the tg3 chip firmware will make
14475          * sure it is enabled, but other entities such as system netboot
14476          * code might disable it.
14477          */
14478         val = tr32(MEMARB_MODE);
14479         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14480
14481         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14482         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14483             tg3_flag(tp, 5780_CLASS)) {
14484                 if (tg3_flag(tp, PCIX_MODE)) {
14485                         pci_read_config_dword(tp->pdev,
14486                                               tp->pcix_cap + PCI_X_STATUS,
14487                                               &val);
14488                         tp->pci_fn = val & 0x7;
14489                 }
14490         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14491                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14492                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14493                     NIC_SRAM_CPMUSTAT_SIG) {
14494                         tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14495                         tp->pci_fn = tp->pci_fn ? 1 : 0;
14496                 }
14497         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14498                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14499                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14500                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14501                     NIC_SRAM_CPMUSTAT_SIG) {
14502                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14503                                      TG3_CPMU_STATUS_FSHFT_5719;
14504                 }
14505         }
14506
14507         /* Get eeprom hw config before calling tg3_set_power_state().
14508          * In particular, the TG3_FLAG_IS_NIC flag must be
14509          * determined before calling tg3_set_power_state() so that
14510          * we know whether or not to switch out of Vaux power.
14511          * When the flag is set, it means that GPIO1 is used for eeprom
14512          * write protect and also implies that it is a LOM where GPIOs
14513          * are not used to switch power.
14514          */
14515         tg3_get_eeprom_hw_cfg(tp);
14516
14517         if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14518                 tg3_flag_clear(tp, TSO_CAPABLE);
14519                 tg3_flag_clear(tp, TSO_BUG);
14520                 tp->fw_needed = NULL;
14521         }
14522
14523         if (tg3_flag(tp, ENABLE_APE)) {
14524                 /* Allow reads and writes to the
14525                  * APE register and memory space.
14526                  */
14527                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14528                                  PCISTATE_ALLOW_APE_SHMEM_WR |
14529                                  PCISTATE_ALLOW_APE_PSPACE_WR;
14530                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14531                                        pci_state_reg);
14532
14533                 tg3_ape_lock_init(tp);
14534         }
14535
14536         /* Set up tp->grc_local_ctrl before calling
14537          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14538          * will bring 5700's external PHY out of reset.
14539          * It is also used as eeprom write protect on LOMs.
14540          */
14541         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14542         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14543             tg3_flag(tp, EEPROM_WRITE_PROT))
14544                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14545                                        GRC_LCLCTRL_GPIO_OUTPUT1);
14546         /* Unused GPIO3 must be driven as output on 5752 because there
14547          * are no pull-up resistors on unused GPIO pins.
14548          */
14549         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14550                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14551
14552         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14553             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14554             tg3_flag(tp, 57765_CLASS))
14555                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14556
14557         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14558             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14559                 /* Turn off the debug UART. */
14560                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14561                 if (tg3_flag(tp, IS_NIC))
14562                         /* Keep VMain power. */
14563                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14564                                               GRC_LCLCTRL_GPIO_OUTPUT0;
14565         }
14566
14567         /* Switch out of Vaux if it is a NIC */
14568         tg3_pwrsrc_switch_to_vmain(tp);
14569
14570         /* Derive initial jumbo mode from MTU assigned in
14571          * ether_setup() via the alloc_etherdev() call
14572          */
14573         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14574                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14575
14576         /* Determine WakeOnLan speed to use. */
14577         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14578             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14579             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14580             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14581                 tg3_flag_clear(tp, WOL_SPEED_100MB);
14582         } else {
14583                 tg3_flag_set(tp, WOL_SPEED_100MB);
14584         }
14585
14586         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14587                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14588
14589         /* A few boards don't want Ethernet@WireSpeed phy feature */
14590         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14591             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14592              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14593              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14594             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14595             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14596                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14597
14598         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14599             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14600                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14601         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14602                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14603
14604         if (tg3_flag(tp, 5705_PLUS) &&
14605             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14606             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14607             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14608             !tg3_flag(tp, 57765_PLUS)) {
14609                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14610                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14611                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14612                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14613                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14614                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14615                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14616                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14617                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14618                 } else
14619                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14620         }
14621
14622         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14623             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14624                 tp->phy_otp = tg3_read_otp_phycfg(tp);
14625                 if (tp->phy_otp == 0)
14626                         tp->phy_otp = TG3_OTP_DEFAULT;
14627         }
14628
14629         if (tg3_flag(tp, CPMU_PRESENT))
14630                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14631         else
14632                 tp->mi_mode = MAC_MI_MODE_BASE;
14633
14634         tp->coalesce_mode = 0;
14635         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14636             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14637                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14638
14639         /* Set these bits to enable statistics workaround. */
14640         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14641             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14642             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14643                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14644                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14645         }
14646
14647         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14648             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14649                 tg3_flag_set(tp, USE_PHYLIB);
14650
14651         err = tg3_mdio_init(tp);
14652         if (err)
14653                 return err;
14654
14655         /* Initialize data/descriptor byte/word swapping. */
14656         val = tr32(GRC_MODE);
14657         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14658                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14659                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14660                         GRC_MODE_B2HRX_ENABLE |
14661                         GRC_MODE_HTX2B_ENABLE |
14662                         GRC_MODE_HOST_STACKUP);
14663         else
14664                 val &= GRC_MODE_HOST_STACKUP;
14665
14666         tw32(GRC_MODE, val | tp->grc_mode);
14667
14668         tg3_switch_clocks(tp);
14669
14670         /* Clear this out for sanity. */
14671         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14672
14673         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14674                               &pci_state_reg);
14675         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14676             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14677                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14678
14679                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14680                     chiprevid == CHIPREV_ID_5701_B0 ||
14681                     chiprevid == CHIPREV_ID_5701_B2 ||
14682                     chiprevid == CHIPREV_ID_5701_B5) {
14683                         void __iomem *sram_base;
14684
14685                         /* Write some dummy words into the SRAM status block
14686                          * area, see if it reads back correctly.  If the return
14687                          * value is bad, force enable the PCIX workaround.
14688                          */
14689                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14690
14691                         writel(0x00000000, sram_base);
14692                         writel(0x00000000, sram_base + 4);
14693                         writel(0xffffffff, sram_base + 4);
14694                         if (readl(sram_base) != 0x00000000)
14695                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14696                 }
14697         }
14698
14699         udelay(50);
14700         tg3_nvram_init(tp);
14701
14702         grc_misc_cfg = tr32(GRC_MISC_CFG);
14703         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14704
14705         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14706             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14707              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14708                 tg3_flag_set(tp, IS_5788);
14709
14710         if (!tg3_flag(tp, IS_5788) &&
14711             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14712                 tg3_flag_set(tp, TAGGED_STATUS);
14713         if (tg3_flag(tp, TAGGED_STATUS)) {
14714                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14715                                       HOSTCC_MODE_CLRTICK_TXBD);
14716
14717                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14718                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14719                                        tp->misc_host_ctrl);
14720         }
14721
14722         /* Preserve the APE MAC_MODE bits */
14723         if (tg3_flag(tp, ENABLE_APE))
14724                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14725         else
14726                 tp->mac_mode = 0;
14727
14728         /* these are limited to 10/100 only */
14729         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14730              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14731             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14732              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14733              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14734               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14735               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14736             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14737              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14738               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14739               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14740             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14741             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14742             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14743             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14744                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14745
14746         err = tg3_phy_probe(tp);
14747         if (err) {
14748                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14749                 /* ... but do not return immediately ... */
14750                 tg3_mdio_fini(tp);
14751         }
14752
14753         tg3_read_vpd(tp);
14754         tg3_read_fw_ver(tp);
14755
14756         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14757                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14758         } else {
14759                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14760                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14761                 else
14762                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14763         }
14764
14765         /* 5700 {AX,BX} chips have a broken status block link
14766          * change bit implementation, so we must use the
14767          * status register in those cases.
14768          */
14769         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14770                 tg3_flag_set(tp, USE_LINKCHG_REG);
14771         else
14772                 tg3_flag_clear(tp, USE_LINKCHG_REG);
14773
14774         /* The led_ctrl is set during tg3_phy_probe, here we might
14775          * have to force the link status polling mechanism based
14776          * upon subsystem IDs.
14777          */
14778         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14779             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14780             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14781                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14782                 tg3_flag_set(tp, USE_LINKCHG_REG);
14783         }
14784
14785         /* For all SERDES we poll the MAC status register. */
14786         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14787                 tg3_flag_set(tp, POLL_SERDES);
14788         else
14789                 tg3_flag_clear(tp, POLL_SERDES);
14790
14791         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
14792         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14793         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14794             tg3_flag(tp, PCIX_MODE)) {
14795                 tp->rx_offset = NET_SKB_PAD;
14796 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14797                 tp->rx_copy_thresh = ~(u16)0;
14798 #endif
14799         }
14800
14801         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14802         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14803         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14804
14805         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14806
14807         /* Increment the rx prod index on the rx std ring by at most
14808          * 8 for these chips to workaround hw errata.
14809          */
14810         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14811             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14812             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14813                 tp->rx_std_max_post = 8;
14814
14815         if (tg3_flag(tp, ASPM_WORKAROUND))
14816                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14817                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
14818
14819         return err;
14820 }
14821
14822 #ifdef CONFIG_SPARC
14823 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14824 {
14825         struct net_device *dev = tp->dev;
14826         struct pci_dev *pdev = tp->pdev;
14827         struct device_node *dp = pci_device_to_OF_node(pdev);
14828         const unsigned char *addr;
14829         int len;
14830
14831         addr = of_get_property(dp, "local-mac-address", &len);
14832         if (addr && len == 6) {
14833                 memcpy(dev->dev_addr, addr, 6);
14834                 memcpy(dev->perm_addr, dev->dev_addr, 6);
14835                 return 0;
14836         }
14837         return -ENODEV;
14838 }
14839
14840 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14841 {
14842         struct net_device *dev = tp->dev;
14843
14844         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14845         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14846         return 0;
14847 }
14848 #endif
14849
14850 static int __devinit tg3_get_device_address(struct tg3 *tp)
14851 {
14852         struct net_device *dev = tp->dev;
14853         u32 hi, lo, mac_offset;
14854         int addr_ok = 0;
14855
14856 #ifdef CONFIG_SPARC
14857         if (!tg3_get_macaddr_sparc(tp))
14858                 return 0;
14859 #endif
14860
14861         mac_offset = 0x7c;
14862         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14863             tg3_flag(tp, 5780_CLASS)) {
14864                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14865                         mac_offset = 0xcc;
14866                 if (tg3_nvram_lock(tp))
14867                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14868                 else
14869                         tg3_nvram_unlock(tp);
14870         } else if (tg3_flag(tp, 5717_PLUS)) {
14871                 if (tp->pci_fn & 1)
14872                         mac_offset = 0xcc;
14873                 if (tp->pci_fn > 1)
14874                         mac_offset += 0x18c;
14875         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14876                 mac_offset = 0x10;
14877
14878         /* First try to get it from MAC address mailbox. */
14879         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14880         if ((hi >> 16) == 0x484b) {
14881                 dev->dev_addr[0] = (hi >>  8) & 0xff;
14882                 dev->dev_addr[1] = (hi >>  0) & 0xff;
14883
14884                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14885                 dev->dev_addr[2] = (lo >> 24) & 0xff;
14886                 dev->dev_addr[3] = (lo >> 16) & 0xff;
14887                 dev->dev_addr[4] = (lo >>  8) & 0xff;
14888                 dev->dev_addr[5] = (lo >>  0) & 0xff;
14889
14890                 /* Some old bootcode may report a 0 MAC address in SRAM */
14891                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14892         }
14893         if (!addr_ok) {
14894                 /* Next, try NVRAM. */
14895                 if (!tg3_flag(tp, NO_NVRAM) &&
14896                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14897                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14898                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14899                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14900                 }
14901                 /* Finally just fetch it out of the MAC control regs. */
14902                 else {
14903                         hi = tr32(MAC_ADDR_0_HIGH);
14904                         lo = tr32(MAC_ADDR_0_LOW);
14905
14906                         dev->dev_addr[5] = lo & 0xff;
14907                         dev->dev_addr[4] = (lo >> 8) & 0xff;
14908                         dev->dev_addr[3] = (lo >> 16) & 0xff;
14909                         dev->dev_addr[2] = (lo >> 24) & 0xff;
14910                         dev->dev_addr[1] = hi & 0xff;
14911                         dev->dev_addr[0] = (hi >> 8) & 0xff;
14912                 }
14913         }
14914
14915         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14916 #ifdef CONFIG_SPARC
14917                 if (!tg3_get_default_macaddr_sparc(tp))
14918                         return 0;
14919 #endif
14920                 return -EINVAL;
14921         }
14922         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14923         return 0;
14924 }
14925
14926 #define BOUNDARY_SINGLE_CACHELINE       1
14927 #define BOUNDARY_MULTI_CACHELINE        2
14928
14929 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14930 {
14931         int cacheline_size;
14932         u8 byte;
14933         int goal;
14934
14935         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14936         if (byte == 0)
14937                 cacheline_size = 1024;
14938         else
14939                 cacheline_size = (int) byte * 4;
14940
14941         /* On 5703 and later chips, the boundary bits have no
14942          * effect.
14943          */
14944         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14945             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14946             !tg3_flag(tp, PCI_EXPRESS))
14947                 goto out;
14948
14949 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14950         goal = BOUNDARY_MULTI_CACHELINE;
14951 #else
14952 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14953         goal = BOUNDARY_SINGLE_CACHELINE;
14954 #else
14955         goal = 0;
14956 #endif
14957 #endif
14958
14959         if (tg3_flag(tp, 57765_PLUS)) {
14960                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14961                 goto out;
14962         }
14963
14964         if (!goal)
14965                 goto out;
14966
14967         /* PCI controllers on most RISC systems tend to disconnect
14968          * when a device tries to burst across a cache-line boundary.
14969          * Therefore, letting tg3 do so just wastes PCI bandwidth.
14970          *
14971          * Unfortunately, for PCI-E there are only limited
14972          * write-side controls for this, and thus for reads
14973          * we will still get the disconnects.  We'll also waste
14974          * these PCI cycles for both read and write for chips
14975          * other than 5700 and 5701 which do not implement the
14976          * boundary bits.
14977          */
14978         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14979                 switch (cacheline_size) {
14980                 case 16:
14981                 case 32:
14982                 case 64:
14983                 case 128:
14984                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14985                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14986                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14987                         } else {
14988                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14989                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14990                         }
14991                         break;
14992
14993                 case 256:
14994                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14995                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14996                         break;
14997
14998                 default:
14999                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15000                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15001                         break;
15002                 }
15003         } else if (tg3_flag(tp, PCI_EXPRESS)) {
15004                 switch (cacheline_size) {
15005                 case 16:
15006                 case 32:
15007                 case 64:
15008                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15009                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15010                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
15011                                 break;
15012                         }
15013                         /* fallthrough */
15014                 case 128:
15015                 default:
15016                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15017                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
15018                         break;
15019                 }
15020         } else {
15021                 switch (cacheline_size) {
15022                 case 16:
15023                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15024                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
15025                                         DMA_RWCTRL_WRITE_BNDRY_16);
15026                                 break;
15027                         }
15028                         /* fallthrough */
15029                 case 32:
15030                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15031                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
15032                                         DMA_RWCTRL_WRITE_BNDRY_32);
15033                                 break;
15034                         }
15035                         /* fallthrough */
15036                 case 64:
15037                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15038                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
15039                                         DMA_RWCTRL_WRITE_BNDRY_64);
15040                                 break;
15041                         }
15042                         /* fallthrough */
15043                 case 128:
15044                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15045                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
15046                                         DMA_RWCTRL_WRITE_BNDRY_128);
15047                                 break;
15048                         }
15049                         /* fallthrough */
15050                 case 256:
15051                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
15052                                 DMA_RWCTRL_WRITE_BNDRY_256);
15053                         break;
15054                 case 512:
15055                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
15056                                 DMA_RWCTRL_WRITE_BNDRY_512);
15057                         break;
15058                 case 1024:
15059                 default:
15060                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
15061                                 DMA_RWCTRL_WRITE_BNDRY_1024);
15062                         break;
15063                 }
15064         }
15065
15066 out:
15067         return val;
15068 }
15069
15070 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
15071 {
15072         struct tg3_internal_buffer_desc test_desc;
15073         u32 sram_dma_descs;
15074         int i, ret;
15075
15076         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15077
15078         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15079         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15080         tw32(RDMAC_STATUS, 0);
15081         tw32(WDMAC_STATUS, 0);
15082
15083         tw32(BUFMGR_MODE, 0);
15084         tw32(FTQ_RESET, 0);
15085
15086         test_desc.addr_hi = ((u64) buf_dma) >> 32;
15087         test_desc.addr_lo = buf_dma & 0xffffffff;
15088         test_desc.nic_mbuf = 0x00002100;
15089         test_desc.len = size;
15090
15091         /*
15092          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15093          * the *second* time the tg3 driver was getting loaded after an
15094          * initial scan.
15095          *
15096          * Broadcom tells me:
15097          *   ...the DMA engine is connected to the GRC block and a DMA
15098          *   reset may affect the GRC block in some unpredictable way...
15099          *   The behavior of resets to individual blocks has not been tested.
15100          *
15101          * Broadcom noted the GRC reset will also reset all sub-components.
15102          */
15103         if (to_device) {
15104                 test_desc.cqid_sqid = (13 << 8) | 2;
15105
15106                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15107                 udelay(40);
15108         } else {
15109                 test_desc.cqid_sqid = (16 << 8) | 7;
15110
15111                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15112                 udelay(40);
15113         }
15114         test_desc.flags = 0x00000005;
15115
15116         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15117                 u32 val;
15118
15119                 val = *(((u32 *)&test_desc) + i);
15120                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15121                                        sram_dma_descs + (i * sizeof(u32)));
15122                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15123         }
15124         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15125
15126         if (to_device)
15127                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15128         else
15129                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15130
15131         ret = -ENODEV;
15132         for (i = 0; i < 40; i++) {
15133                 u32 val;
15134
15135                 if (to_device)
15136                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15137                 else
15138                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15139                 if ((val & 0xffff) == sram_dma_descs) {
15140                         ret = 0;
15141                         break;
15142                 }
15143
15144                 udelay(100);
15145         }
15146
15147         return ret;
15148 }
15149
15150 #define TEST_BUFFER_SIZE        0x2000
15151
15152 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15153         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15154         { },
15155 };
15156
15157 static int __devinit tg3_test_dma(struct tg3 *tp)
15158 {
15159         dma_addr_t buf_dma;
15160         u32 *buf, saved_dma_rwctrl;
15161         int ret = 0;
15162
15163         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15164                                  &buf_dma, GFP_KERNEL);
15165         if (!buf) {
15166                 ret = -ENOMEM;
15167                 goto out_nofree;
15168         }
15169
15170         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15171                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15172
15173         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15174
15175         if (tg3_flag(tp, 57765_PLUS))
15176                 goto out;
15177
15178         if (tg3_flag(tp, PCI_EXPRESS)) {
15179                 /* DMA read watermark not used on PCIE */
15180                 tp->dma_rwctrl |= 0x00180000;
15181         } else if (!tg3_flag(tp, PCIX_MODE)) {
15182                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15183                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15184                         tp->dma_rwctrl |= 0x003f0000;
15185                 else
15186                         tp->dma_rwctrl |= 0x003f000f;
15187         } else {
15188                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15189                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15190                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15191                         u32 read_water = 0x7;
15192
15193                         /* If the 5704 is behind the EPB bridge, we can
15194                          * do the less restrictive ONE_DMA workaround for
15195                          * better performance.
15196                          */
15197                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15198                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15199                                 tp->dma_rwctrl |= 0x8000;
15200                         else if (ccval == 0x6 || ccval == 0x7)
15201                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15202
15203                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15204                                 read_water = 4;
15205                         /* Set bit 23 to enable PCIX hw bug fix */
15206                         tp->dma_rwctrl |=
15207                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15208                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15209                                 (1 << 23);
15210                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15211                         /* 5780 always in PCIX mode */
15212                         tp->dma_rwctrl |= 0x00144000;
15213                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15214                         /* 5714 always in PCIX mode */
15215                         tp->dma_rwctrl |= 0x00148000;
15216                 } else {
15217                         tp->dma_rwctrl |= 0x001b000f;
15218                 }
15219         }
15220
15221         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15222             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15223                 tp->dma_rwctrl &= 0xfffffff0;
15224
15225         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15226             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15227                 /* Remove this if it causes problems for some boards. */
15228                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15229
15230                 /* On 5700/5701 chips, we need to set this bit.
15231                  * Otherwise the chip will issue cacheline transactions
15232                  * to streamable DMA memory with not all the byte
15233                  * enables turned on.  This is an error on several
15234                  * RISC PCI controllers, in particular sparc64.
15235                  *
15236                  * On 5703/5704 chips, this bit has been reassigned
15237                  * a different meaning.  In particular, it is used
15238                  * on those chips to enable a PCI-X workaround.
15239                  */
15240                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15241         }
15242
15243         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15244
15245 #if 0
15246         /* Unneeded, already done by tg3_get_invariants.  */
15247         tg3_switch_clocks(tp);
15248 #endif
15249
15250         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15251             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15252                 goto out;
15253
15254         /* It is best to perform DMA test with maximum write burst size
15255          * to expose the 5700/5701 write DMA bug.
15256          */
15257         saved_dma_rwctrl = tp->dma_rwctrl;
15258         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15259         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15260
15261         while (1) {
15262                 u32 *p = buf, i;
15263
15264                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15265                         p[i] = i;
15266
15267                 /* Send the buffer to the chip. */
15268                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15269                 if (ret) {
15270                         dev_err(&tp->pdev->dev,
15271                                 "%s: Buffer write failed. err = %d\n",
15272                                 __func__, ret);
15273                         break;
15274                 }
15275
15276 #if 0
15277                 /* validate data reached card RAM correctly. */
15278                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15279                         u32 val;
15280                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
15281                         if (le32_to_cpu(val) != p[i]) {
15282                                 dev_err(&tp->pdev->dev,
15283                                         "%s: Buffer corrupted on device! "
15284                                         "(%d != %d)\n", __func__, val, i);
15285                                 /* ret = -ENODEV here? */
15286                         }
15287                         p[i] = 0;
15288                 }
15289 #endif
15290                 /* Now read it back. */
15291                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15292                 if (ret) {
15293                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15294                                 "err = %d\n", __func__, ret);
15295                         break;
15296                 }
15297
15298                 /* Verify it. */
15299                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15300                         if (p[i] == i)
15301                                 continue;
15302
15303                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15304                             DMA_RWCTRL_WRITE_BNDRY_16) {
15305                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15306                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15307                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15308                                 break;
15309                         } else {
15310                                 dev_err(&tp->pdev->dev,
15311                                         "%s: Buffer corrupted on read back! "
15312                                         "(%d != %d)\n", __func__, p[i], i);
15313                                 ret = -ENODEV;
15314                                 goto out;
15315                         }
15316                 }
15317
15318                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15319                         /* Success. */
15320                         ret = 0;
15321                         break;
15322                 }
15323         }
15324         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15325             DMA_RWCTRL_WRITE_BNDRY_16) {
15326                 /* DMA test passed without adjusting DMA boundary,
15327                  * now look for chipsets that are known to expose the
15328                  * DMA bug without failing the test.
15329                  */
15330                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15331                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15332                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15333                 } else {
15334                         /* Safe to use the calculated DMA boundary. */
15335                         tp->dma_rwctrl = saved_dma_rwctrl;
15336                 }
15337
15338                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15339         }
15340
15341 out:
15342         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15343 out_nofree:
15344         return ret;
15345 }
15346
15347 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15348 {
15349         if (tg3_flag(tp, 57765_PLUS)) {
15350                 tp->bufmgr_config.mbuf_read_dma_low_water =
15351                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15352                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15353                         DEFAULT_MB_MACRX_LOW_WATER_57765;
15354                 tp->bufmgr_config.mbuf_high_water =
15355                         DEFAULT_MB_HIGH_WATER_57765;
15356
15357                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15358                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15359                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15360                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15361                 tp->bufmgr_config.mbuf_high_water_jumbo =
15362                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15363         } else if (tg3_flag(tp, 5705_PLUS)) {
15364                 tp->bufmgr_config.mbuf_read_dma_low_water =
15365                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15366                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15367                         DEFAULT_MB_MACRX_LOW_WATER_5705;
15368                 tp->bufmgr_config.mbuf_high_water =
15369                         DEFAULT_MB_HIGH_WATER_5705;
15370                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15371                         tp->bufmgr_config.mbuf_mac_rx_low_water =
15372                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
15373                         tp->bufmgr_config.mbuf_high_water =
15374                                 DEFAULT_MB_HIGH_WATER_5906;
15375                 }
15376
15377                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15378                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15379                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15380                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15381                 tp->bufmgr_config.mbuf_high_water_jumbo =
15382                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15383         } else {
15384                 tp->bufmgr_config.mbuf_read_dma_low_water =
15385                         DEFAULT_MB_RDMA_LOW_WATER;
15386                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15387                         DEFAULT_MB_MACRX_LOW_WATER;
15388                 tp->bufmgr_config.mbuf_high_water =
15389                         DEFAULT_MB_HIGH_WATER;
15390
15391                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15392                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15393                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15394                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15395                 tp->bufmgr_config.mbuf_high_water_jumbo =
15396                         DEFAULT_MB_HIGH_WATER_JUMBO;
15397         }
15398
15399         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15400         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15401 }
15402
15403 static char * __devinit tg3_phy_string(struct tg3 *tp)
15404 {
15405         switch (tp->phy_id & TG3_PHY_ID_MASK) {
15406         case TG3_PHY_ID_BCM5400:        return "5400";
15407         case TG3_PHY_ID_BCM5401:        return "5401";
15408         case TG3_PHY_ID_BCM5411:        return "5411";
15409         case TG3_PHY_ID_BCM5701:        return "5701";
15410         case TG3_PHY_ID_BCM5703:        return "5703";
15411         case TG3_PHY_ID_BCM5704:        return "5704";
15412         case TG3_PHY_ID_BCM5705:        return "5705";
15413         case TG3_PHY_ID_BCM5750:        return "5750";
15414         case TG3_PHY_ID_BCM5752:        return "5752";
15415         case TG3_PHY_ID_BCM5714:        return "5714";
15416         case TG3_PHY_ID_BCM5780:        return "5780";
15417         case TG3_PHY_ID_BCM5755:        return "5755";
15418         case TG3_PHY_ID_BCM5787:        return "5787";
15419         case TG3_PHY_ID_BCM5784:        return "5784";
15420         case TG3_PHY_ID_BCM5756:        return "5722/5756";
15421         case TG3_PHY_ID_BCM5906:        return "5906";
15422         case TG3_PHY_ID_BCM5761:        return "5761";
15423         case TG3_PHY_ID_BCM5718C:       return "5718C";
15424         case TG3_PHY_ID_BCM5718S:       return "5718S";
15425         case TG3_PHY_ID_BCM57765:       return "57765";
15426         case TG3_PHY_ID_BCM5719C:       return "5719C";
15427         case TG3_PHY_ID_BCM5720C:       return "5720C";
15428         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
15429         case 0:                 return "serdes";
15430         default:                return "unknown";
15431         }
15432 }
15433
15434 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15435 {
15436         if (tg3_flag(tp, PCI_EXPRESS)) {
15437                 strcpy(str, "PCI Express");
15438                 return str;
15439         } else if (tg3_flag(tp, PCIX_MODE)) {
15440                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15441
15442                 strcpy(str, "PCIX:");
15443
15444                 if ((clock_ctrl == 7) ||
15445                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15446                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15447                         strcat(str, "133MHz");
15448                 else if (clock_ctrl == 0)
15449                         strcat(str, "33MHz");
15450                 else if (clock_ctrl == 2)
15451                         strcat(str, "50MHz");
15452                 else if (clock_ctrl == 4)
15453                         strcat(str, "66MHz");
15454                 else if (clock_ctrl == 6)
15455                         strcat(str, "100MHz");
15456         } else {
15457                 strcpy(str, "PCI:");
15458                 if (tg3_flag(tp, PCI_HIGH_SPEED))
15459                         strcat(str, "66MHz");
15460                 else
15461                         strcat(str, "33MHz");
15462         }
15463         if (tg3_flag(tp, PCI_32BIT))
15464                 strcat(str, ":32-bit");
15465         else
15466                 strcat(str, ":64-bit");
15467         return str;
15468 }
15469
15470 static void __devinit tg3_init_coal(struct tg3 *tp)
15471 {
15472         struct ethtool_coalesce *ec = &tp->coal;
15473
15474         memset(ec, 0, sizeof(*ec));
15475         ec->cmd = ETHTOOL_GCOALESCE;
15476         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15477         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15478         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15479         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15480         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15481         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15482         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15483         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15484         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15485
15486         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15487                                  HOSTCC_MODE_CLRTICK_TXBD)) {
15488                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15489                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15490                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15491                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15492         }
15493
15494         if (tg3_flag(tp, 5705_PLUS)) {
15495                 ec->rx_coalesce_usecs_irq = 0;
15496                 ec->tx_coalesce_usecs_irq = 0;
15497                 ec->stats_block_coalesce_usecs = 0;
15498         }
15499 }
15500
15501 static int __devinit tg3_init_one(struct pci_dev *pdev,
15502                                   const struct pci_device_id *ent)
15503 {
15504         struct net_device *dev;
15505         struct tg3 *tp;
15506         int i, err, pm_cap;
15507         u32 sndmbx, rcvmbx, intmbx;
15508         char str[40];
15509         u64 dma_mask, persist_dma_mask;
15510         netdev_features_t features = 0;
15511
15512         printk_once(KERN_INFO "%s\n", version);
15513
15514         err = pci_enable_device(pdev);
15515         if (err) {
15516                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15517                 return err;
15518         }
15519
15520         err = pci_request_regions(pdev, DRV_MODULE_NAME);
15521         if (err) {
15522                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15523                 goto err_out_disable_pdev;
15524         }
15525
15526         pci_set_master(pdev);
15527
15528         /* Find power-management capability. */
15529         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15530         if (pm_cap == 0) {
15531                 dev_err(&pdev->dev,
15532                         "Cannot find Power Management capability, aborting\n");
15533                 err = -EIO;
15534                 goto err_out_free_res;
15535         }
15536
15537         err = pci_set_power_state(pdev, PCI_D0);
15538         if (err) {
15539                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15540                 goto err_out_free_res;
15541         }
15542
15543         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15544         if (!dev) {
15545                 err = -ENOMEM;
15546                 goto err_out_power_down;
15547         }
15548
15549         SET_NETDEV_DEV(dev, &pdev->dev);
15550
15551         tp = netdev_priv(dev);
15552         tp->pdev = pdev;
15553         tp->dev = dev;
15554         tp->pm_cap = pm_cap;
15555         tp->rx_mode = TG3_DEF_RX_MODE;
15556         tp->tx_mode = TG3_DEF_TX_MODE;
15557
15558         if (tg3_debug > 0)
15559                 tp->msg_enable = tg3_debug;
15560         else
15561                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15562
15563         /* The word/byte swap controls here control register access byte
15564          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15565          * setting below.
15566          */
15567         tp->misc_host_ctrl =
15568                 MISC_HOST_CTRL_MASK_PCI_INT |
15569                 MISC_HOST_CTRL_WORD_SWAP |
15570                 MISC_HOST_CTRL_INDIR_ACCESS |
15571                 MISC_HOST_CTRL_PCISTATE_RW;
15572
15573         /* The NONFRM (non-frame) byte/word swap controls take effect
15574          * on descriptor entries, anything which isn't packet data.
15575          *
15576          * The StrongARM chips on the board (one for tx, one for rx)
15577          * are running in big-endian mode.
15578          */
15579         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15580                         GRC_MODE_WSWAP_NONFRM_DATA);
15581 #ifdef __BIG_ENDIAN
15582         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15583 #endif
15584         spin_lock_init(&tp->lock);
15585         spin_lock_init(&tp->indirect_lock);
15586         INIT_WORK(&tp->reset_task, tg3_reset_task);
15587
15588         tp->regs = pci_ioremap_bar(pdev, BAR_0);
15589         if (!tp->regs) {
15590                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15591                 err = -ENOMEM;
15592                 goto err_out_free_dev;
15593         }
15594
15595         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15596             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15597             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15598             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15599             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15600             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15601             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15602             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15603                 tg3_flag_set(tp, ENABLE_APE);
15604                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15605                 if (!tp->aperegs) {
15606                         dev_err(&pdev->dev,
15607                                 "Cannot map APE registers, aborting\n");
15608                         err = -ENOMEM;
15609                         goto err_out_iounmap;
15610                 }
15611         }
15612
15613         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15614         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15615
15616         dev->ethtool_ops = &tg3_ethtool_ops;
15617         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15618         dev->netdev_ops = &tg3_netdev_ops;
15619         dev->irq = pdev->irq;
15620
15621         err = tg3_get_invariants(tp);
15622         if (err) {
15623                 dev_err(&pdev->dev,
15624                         "Problem fetching invariants of chip, aborting\n");
15625                 goto err_out_apeunmap;
15626         }
15627
15628         /* The EPB bridge inside 5714, 5715, and 5780 and any
15629          * device behind the EPB cannot support DMA addresses > 40-bit.
15630          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15631          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15632          * do DMA address check in tg3_start_xmit().
15633          */
15634         if (tg3_flag(tp, IS_5788))
15635                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15636         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15637                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15638 #ifdef CONFIG_HIGHMEM
15639                 dma_mask = DMA_BIT_MASK(64);
15640 #endif
15641         } else
15642                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15643
15644         /* Configure DMA attributes. */
15645         if (dma_mask > DMA_BIT_MASK(32)) {
15646                 err = pci_set_dma_mask(pdev, dma_mask);
15647                 if (!err) {
15648                         features |= NETIF_F_HIGHDMA;
15649                         err = pci_set_consistent_dma_mask(pdev,
15650                                                           persist_dma_mask);
15651                         if (err < 0) {
15652                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15653                                         "DMA for consistent allocations\n");
15654                                 goto err_out_apeunmap;
15655                         }
15656                 }
15657         }
15658         if (err || dma_mask == DMA_BIT_MASK(32)) {
15659                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15660                 if (err) {
15661                         dev_err(&pdev->dev,
15662                                 "No usable DMA configuration, aborting\n");
15663                         goto err_out_apeunmap;
15664                 }
15665         }
15666
15667         tg3_init_bufmgr_config(tp);
15668
15669         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15670
15671         /* 5700 B0 chips do not support checksumming correctly due
15672          * to hardware bugs.
15673          */
15674         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15675                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15676
15677                 if (tg3_flag(tp, 5755_PLUS))
15678                         features |= NETIF_F_IPV6_CSUM;
15679         }
15680
15681         /* TSO is on by default on chips that support hardware TSO.
15682          * Firmware TSO on older chips gives lower performance, so it
15683          * is off by default, but can be enabled using ethtool.
15684          */
15685         if ((tg3_flag(tp, HW_TSO_1) ||
15686              tg3_flag(tp, HW_TSO_2) ||
15687              tg3_flag(tp, HW_TSO_3)) &&
15688             (features & NETIF_F_IP_CSUM))
15689                 features |= NETIF_F_TSO;
15690         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15691                 if (features & NETIF_F_IPV6_CSUM)
15692                         features |= NETIF_F_TSO6;
15693                 if (tg3_flag(tp, HW_TSO_3) ||
15694                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15695                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15696                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15697                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15698                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15699                         features |= NETIF_F_TSO_ECN;
15700         }
15701
15702         dev->features |= features;
15703         dev->vlan_features |= features;
15704
15705         /*
15706          * Add loopback capability only for a subset of devices that support
15707          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15708          * loopback for the remaining devices.
15709          */
15710         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15711             !tg3_flag(tp, CPMU_PRESENT))
15712                 /* Add the loopback capability */
15713                 features |= NETIF_F_LOOPBACK;
15714
15715         dev->hw_features |= features;
15716
15717         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15718             !tg3_flag(tp, TSO_CAPABLE) &&
15719             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15720                 tg3_flag_set(tp, MAX_RXPEND_64);
15721                 tp->rx_pending = 63;
15722         }
15723
15724         err = tg3_get_device_address(tp);
15725         if (err) {
15726                 dev_err(&pdev->dev,
15727                         "Could not obtain valid ethernet address, aborting\n");
15728                 goto err_out_apeunmap;
15729         }
15730
15731         /*
15732          * Reset chip in case UNDI or EFI driver did not shutdown
15733          * DMA self test will enable WDMAC and we'll see (spurious)
15734          * pending DMA on the PCI bus at that point.
15735          */
15736         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15737             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15738                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15739                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15740         }
15741
15742         err = tg3_test_dma(tp);
15743         if (err) {
15744                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15745                 goto err_out_apeunmap;
15746         }
15747
15748         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15749         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15750         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15751         for (i = 0; i < tp->irq_max; i++) {
15752                 struct tg3_napi *tnapi = &tp->napi[i];
15753
15754                 tnapi->tp = tp;
15755                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15756
15757                 tnapi->int_mbox = intmbx;
15758                 if (i <= 4)
15759                         intmbx += 0x8;
15760                 else
15761                         intmbx += 0x4;
15762
15763                 tnapi->consmbox = rcvmbx;
15764                 tnapi->prodmbox = sndmbx;
15765
15766                 if (i)
15767                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15768                 else
15769                         tnapi->coal_now = HOSTCC_MODE_NOW;
15770
15771                 if (!tg3_flag(tp, SUPPORT_MSIX))
15772                         break;
15773
15774                 /*
15775                  * If we support MSIX, we'll be using RSS.  If we're using
15776                  * RSS, the first vector only handles link interrupts and the
15777                  * remaining vectors handle rx and tx interrupts.  Reuse the
15778                  * mailbox values for the next iteration.  The values we setup
15779                  * above are still useful for the single vectored mode.
15780                  */
15781                 if (!i)
15782                         continue;
15783
15784                 rcvmbx += 0x8;
15785
15786                 if (sndmbx & 0x4)
15787                         sndmbx -= 0x4;
15788                 else
15789                         sndmbx += 0xc;
15790         }
15791
15792         tg3_init_coal(tp);
15793
15794         pci_set_drvdata(pdev, dev);
15795
15796         if (tg3_flag(tp, 5717_PLUS)) {
15797                 /* Resume a low-power mode */
15798                 tg3_frob_aux_power(tp, false);
15799         }
15800
15801         tg3_timer_init(tp);
15802
15803         err = register_netdev(dev);
15804         if (err) {
15805                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15806                 goto err_out_apeunmap;
15807         }
15808
15809         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15810                     tp->board_part_number,
15811                     tp->pci_chip_rev_id,
15812                     tg3_bus_string(tp, str),
15813                     dev->dev_addr);
15814
15815         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15816                 struct phy_device *phydev;
15817                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15818                 netdev_info(dev,
15819                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15820                             phydev->drv->name, dev_name(&phydev->dev));
15821         } else {
15822                 char *ethtype;
15823
15824                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15825                         ethtype = "10/100Base-TX";
15826                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15827                         ethtype = "1000Base-SX";
15828                 else
15829                         ethtype = "10/100/1000Base-T";
15830
15831                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15832                             "(WireSpeed[%d], EEE[%d])\n",
15833                             tg3_phy_string(tp), ethtype,
15834                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15835                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15836         }
15837
15838         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15839                     (dev->features & NETIF_F_RXCSUM) != 0,
15840                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
15841                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15842                     tg3_flag(tp, ENABLE_ASF) != 0,
15843                     tg3_flag(tp, TSO_CAPABLE) != 0);
15844         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15845                     tp->dma_rwctrl,
15846                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15847                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15848
15849         pci_save_state(pdev);
15850
15851         return 0;
15852
15853 err_out_apeunmap:
15854         if (tp->aperegs) {
15855                 iounmap(tp->aperegs);
15856                 tp->aperegs = NULL;
15857         }
15858
15859 err_out_iounmap:
15860         if (tp->regs) {
15861                 iounmap(tp->regs);
15862                 tp->regs = NULL;
15863         }
15864
15865 err_out_free_dev:
15866         free_netdev(dev);
15867
15868 err_out_power_down:
15869         pci_set_power_state(pdev, PCI_D3hot);
15870
15871 err_out_free_res:
15872         pci_release_regions(pdev);
15873
15874 err_out_disable_pdev:
15875         pci_disable_device(pdev);
15876         pci_set_drvdata(pdev, NULL);
15877         return err;
15878 }
15879
15880 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15881 {
15882         struct net_device *dev = pci_get_drvdata(pdev);
15883
15884         if (dev) {
15885                 struct tg3 *tp = netdev_priv(dev);
15886
15887                 release_firmware(tp->fw);
15888
15889                 tg3_reset_task_cancel(tp);
15890
15891                 if (tg3_flag(tp, USE_PHYLIB)) {
15892                         tg3_phy_fini(tp);
15893                         tg3_mdio_fini(tp);
15894                 }
15895
15896                 unregister_netdev(dev);
15897                 if (tp->aperegs) {
15898                         iounmap(tp->aperegs);
15899                         tp->aperegs = NULL;
15900                 }
15901                 if (tp->regs) {
15902                         iounmap(tp->regs);
15903                         tp->regs = NULL;
15904                 }
15905                 free_netdev(dev);
15906                 pci_release_regions(pdev);
15907                 pci_disable_device(pdev);
15908                 pci_set_drvdata(pdev, NULL);
15909         }
15910 }
15911
15912 #ifdef CONFIG_PM_SLEEP
15913 static int tg3_suspend(struct device *device)
15914 {
15915         struct pci_dev *pdev = to_pci_dev(device);
15916         struct net_device *dev = pci_get_drvdata(pdev);
15917         struct tg3 *tp = netdev_priv(dev);
15918         int err;
15919
15920         if (!netif_running(dev))
15921                 return 0;
15922
15923         tg3_reset_task_cancel(tp);
15924         tg3_phy_stop(tp);
15925         tg3_netif_stop(tp);
15926
15927         tg3_timer_stop(tp);
15928
15929         tg3_full_lock(tp, 1);
15930         tg3_disable_ints(tp);
15931         tg3_full_unlock(tp);
15932
15933         netif_device_detach(dev);
15934
15935         tg3_full_lock(tp, 0);
15936         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15937         tg3_flag_clear(tp, INIT_COMPLETE);
15938         tg3_full_unlock(tp);
15939
15940         err = tg3_power_down_prepare(tp);
15941         if (err) {
15942                 int err2;
15943
15944                 tg3_full_lock(tp, 0);
15945
15946                 tg3_flag_set(tp, INIT_COMPLETE);
15947                 err2 = tg3_restart_hw(tp, 1);
15948                 if (err2)
15949                         goto out;
15950
15951                 tg3_timer_start(tp);
15952
15953                 netif_device_attach(dev);
15954                 tg3_netif_start(tp);
15955
15956 out:
15957                 tg3_full_unlock(tp);
15958
15959                 if (!err2)
15960                         tg3_phy_start(tp);
15961         }
15962
15963         return err;
15964 }
15965
15966 static int tg3_resume(struct device *device)
15967 {
15968         struct pci_dev *pdev = to_pci_dev(device);
15969         struct net_device *dev = pci_get_drvdata(pdev);
15970         struct tg3 *tp = netdev_priv(dev);
15971         int err;
15972
15973         if (!netif_running(dev))
15974                 return 0;
15975
15976         netif_device_attach(dev);
15977
15978         tg3_full_lock(tp, 0);
15979
15980         tg3_flag_set(tp, INIT_COMPLETE);
15981         err = tg3_restart_hw(tp, 1);
15982         if (err)
15983                 goto out;
15984
15985         tg3_timer_start(tp);
15986
15987         tg3_netif_start(tp);
15988
15989 out:
15990         tg3_full_unlock(tp);
15991
15992         if (!err)
15993                 tg3_phy_start(tp);
15994
15995         return err;
15996 }
15997
15998 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15999 #define TG3_PM_OPS (&tg3_pm_ops)
16000
16001 #else
16002
16003 #define TG3_PM_OPS NULL
16004
16005 #endif /* CONFIG_PM_SLEEP */
16006
16007 /**
16008  * tg3_io_error_detected - called when PCI error is detected
16009  * @pdev: Pointer to PCI device
16010  * @state: The current pci connection state
16011  *
16012  * This function is called after a PCI bus error affecting
16013  * this device has been detected.
16014  */
16015 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
16016                                               pci_channel_state_t state)
16017 {
16018         struct net_device *netdev = pci_get_drvdata(pdev);
16019         struct tg3 *tp = netdev_priv(netdev);
16020         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
16021
16022         netdev_info(netdev, "PCI I/O error detected\n");
16023
16024         rtnl_lock();
16025
16026         if (!netif_running(netdev))
16027                 goto done;
16028
16029         tg3_phy_stop(tp);
16030
16031         tg3_netif_stop(tp);
16032
16033         tg3_timer_stop(tp);
16034
16035         /* Want to make sure that the reset task doesn't run */
16036         tg3_reset_task_cancel(tp);
16037
16038         netif_device_detach(netdev);
16039
16040         /* Clean up software state, even if MMIO is blocked */
16041         tg3_full_lock(tp, 0);
16042         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
16043         tg3_full_unlock(tp);
16044
16045 done:
16046         if (state == pci_channel_io_perm_failure)
16047                 err = PCI_ERS_RESULT_DISCONNECT;
16048         else
16049                 pci_disable_device(pdev);
16050
16051         rtnl_unlock();
16052
16053         return err;
16054 }
16055
16056 /**
16057  * tg3_io_slot_reset - called after the pci bus has been reset.
16058  * @pdev: Pointer to PCI device
16059  *
16060  * Restart the card from scratch, as if from a cold-boot.
16061  * At this point, the card has exprienced a hard reset,
16062  * followed by fixups by BIOS, and has its config space
16063  * set up identically to what it was at cold boot.
16064  */
16065 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16066 {
16067         struct net_device *netdev = pci_get_drvdata(pdev);
16068         struct tg3 *tp = netdev_priv(netdev);
16069         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16070         int err;
16071
16072         rtnl_lock();
16073
16074         if (pci_enable_device(pdev)) {
16075                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16076                 goto done;
16077         }
16078
16079         pci_set_master(pdev);
16080         pci_restore_state(pdev);
16081         pci_save_state(pdev);
16082
16083         if (!netif_running(netdev)) {
16084                 rc = PCI_ERS_RESULT_RECOVERED;
16085                 goto done;
16086         }
16087
16088         err = tg3_power_up(tp);
16089         if (err)
16090                 goto done;
16091
16092         rc = PCI_ERS_RESULT_RECOVERED;
16093
16094 done:
16095         rtnl_unlock();
16096
16097         return rc;
16098 }
16099
16100 /**
16101  * tg3_io_resume - called when traffic can start flowing again.
16102  * @pdev: Pointer to PCI device
16103  *
16104  * This callback is called when the error recovery driver tells
16105  * us that its OK to resume normal operation.
16106  */
16107 static void tg3_io_resume(struct pci_dev *pdev)
16108 {
16109         struct net_device *netdev = pci_get_drvdata(pdev);
16110         struct tg3 *tp = netdev_priv(netdev);
16111         int err;
16112
16113         rtnl_lock();
16114
16115         if (!netif_running(netdev))
16116                 goto done;
16117
16118         tg3_full_lock(tp, 0);
16119         tg3_flag_set(tp, INIT_COMPLETE);
16120         err = tg3_restart_hw(tp, 1);
16121         tg3_full_unlock(tp);
16122         if (err) {
16123                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16124                 goto done;
16125         }
16126
16127         netif_device_attach(netdev);
16128
16129         tg3_timer_start(tp);
16130
16131         tg3_netif_start(tp);
16132
16133         tg3_phy_start(tp);
16134
16135 done:
16136         rtnl_unlock();
16137 }
16138
16139 static struct pci_error_handlers tg3_err_handler = {
16140         .error_detected = tg3_io_error_detected,
16141         .slot_reset     = tg3_io_slot_reset,
16142         .resume         = tg3_io_resume
16143 };
16144
16145 static struct pci_driver tg3_driver = {
16146         .name           = DRV_MODULE_NAME,
16147         .id_table       = tg3_pci_tbl,
16148         .probe          = tg3_init_one,
16149         .remove         = __devexit_p(tg3_remove_one),
16150         .err_handler    = &tg3_err_handler,
16151         .driver.pm      = TG3_PM_OPS,
16152 };
16153
16154 static int __init tg3_init(void)
16155 {
16156         return pci_register_driver(&tg3_driver);
16157 }
16158
16159 static void __exit tg3_cleanup(void)
16160 {
16161         pci_unregister_driver(&tg3_driver);
16162 }
16163
16164 module_init(tg3_init);
16165 module_exit(tg3_cleanup);