]> Pileus Git - ~andy/linux/blob - drivers/net/ethernet/broadcom/tg3.c
tg3: Allow number of rx and tx rings to be set independently.
[~andy/linux] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2012 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #if IS_ENABLED(CONFIG_HWMON)
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50 #endif
51
52 #include <net/checksum.h>
53 #include <net/ip.h>
54
55 #include <linux/io.h>
56 #include <asm/byteorder.h>
57 #include <linux/uaccess.h>
58
59 #ifdef CONFIG_SPARC
60 #include <asm/idprom.h>
61 #include <asm/prom.h>
62 #endif
63
64 #define BAR_0   0
65 #define BAR_2   2
66
67 #include "tg3.h"
68
69 /* Functions & macros to verify TG3_FLAGS types */
70
71 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
72 {
73         return test_bit(flag, bits);
74 }
75
76 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
77 {
78         set_bit(flag, bits);
79 }
80
81 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
82 {
83         clear_bit(flag, bits);
84 }
85
86 #define tg3_flag(tp, flag)                              \
87         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
88 #define tg3_flag_set(tp, flag)                          \
89         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_clear(tp, flag)                        \
91         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
92
93 #define DRV_MODULE_NAME         "tg3"
94 #define TG3_MAJ_NUM                     3
95 #define TG3_MIN_NUM                     124
96 #define DRV_MODULE_VERSION      \
97         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
98 #define DRV_MODULE_RELDATE      "March 21, 2012"
99
100 #define RESET_KIND_SHUTDOWN     0
101 #define RESET_KIND_INIT         1
102 #define RESET_KIND_SUSPEND      2
103
104 #define TG3_DEF_RX_MODE         0
105 #define TG3_DEF_TX_MODE         0
106 #define TG3_DEF_MSG_ENABLE        \
107         (NETIF_MSG_DRV          | \
108          NETIF_MSG_PROBE        | \
109          NETIF_MSG_LINK         | \
110          NETIF_MSG_TIMER        | \
111          NETIF_MSG_IFDOWN       | \
112          NETIF_MSG_IFUP         | \
113          NETIF_MSG_RX_ERR       | \
114          NETIF_MSG_TX_ERR)
115
116 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
117
118 /* length of time before we decide the hardware is borked,
119  * and dev->tx_timeout() should be called to fix the problem
120  */
121
122 #define TG3_TX_TIMEOUT                  (5 * HZ)
123
124 /* hardware minimum and maximum for a single frame's data payload */
125 #define TG3_MIN_MTU                     60
126 #define TG3_MAX_MTU(tp) \
127         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
128
129 /* These numbers seem to be hard coded in the NIC firmware somehow.
130  * You can't change the ring sizes, but you can change where you place
131  * them in the NIC onboard memory.
132  */
133 #define TG3_RX_STD_RING_SIZE(tp) \
134         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
135          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
136 #define TG3_DEF_RX_RING_PENDING         200
137 #define TG3_RX_JMB_RING_SIZE(tp) \
138         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
139          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
140 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
141
142 /* Do not place this n-ring entries value into the tp struct itself,
143  * we really want to expose these constants to GCC so that modulo et
144  * al.  operations are done with shifts and masks instead of with
145  * hw multiply/modulo instructions.  Another solution would be to
146  * replace things like '% foo' with '& (foo - 1)'.
147  */
148
149 #define TG3_TX_RING_SIZE                512
150 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
151
152 #define TG3_RX_STD_RING_BYTES(tp) \
153         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
154 #define TG3_RX_JMB_RING_BYTES(tp) \
155         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
156 #define TG3_RX_RCB_RING_BYTES(tp) \
157         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
158 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
159                                  TG3_TX_RING_SIZE)
160 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
161
162 #define TG3_DMA_BYTE_ENAB               64
163
164 #define TG3_RX_STD_DMA_SZ               1536
165 #define TG3_RX_JMB_DMA_SZ               9046
166
167 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
168
169 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
170 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
171
172 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
173         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
174
175 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
176         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
177
178 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
179  * that are at least dword aligned when used in PCIX mode.  The driver
180  * works around this bug by double copying the packet.  This workaround
181  * is built into the normal double copy length check for efficiency.
182  *
183  * However, the double copy is only necessary on those architectures
184  * where unaligned memory accesses are inefficient.  For those architectures
185  * where unaligned memory accesses incur little penalty, we can reintegrate
186  * the 5701 in the normal rx path.  Doing so saves a device structure
187  * dereference by hardcoding the double copy threshold in place.
188  */
189 #define TG3_RX_COPY_THRESHOLD           256
190 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
191         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
192 #else
193         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
194 #endif
195
196 #if (NET_IP_ALIGN != 0)
197 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
198 #else
199 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
200 #endif
201
202 /* minimum number of free TX descriptors required to wake up TX process */
203 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
204 #define TG3_TX_BD_DMA_MAX_2K            2048
205 #define TG3_TX_BD_DMA_MAX_4K            4096
206
207 #define TG3_RAW_IP_ALIGN 2
208
209 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
210 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
211
212 #define FIRMWARE_TG3            "tigon/tg3.bin"
213 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
214 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
215
216 static char version[] __devinitdata =
217         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
218
219 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
220 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
221 MODULE_LICENSE("GPL");
222 MODULE_VERSION(DRV_MODULE_VERSION);
223 MODULE_FIRMWARE(FIRMWARE_TG3);
224 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
225 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
226
227 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
228 module_param(tg3_debug, int, 0);
229 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
230
231 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
306         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
307         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
308         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
309         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
310         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
311         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
312         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
313         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
314         {}
315 };
316
317 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
318
319 static const struct {
320         const char string[ETH_GSTRING_LEN];
321 } ethtool_stats_keys[] = {
322         { "rx_octets" },
323         { "rx_fragments" },
324         { "rx_ucast_packets" },
325         { "rx_mcast_packets" },
326         { "rx_bcast_packets" },
327         { "rx_fcs_errors" },
328         { "rx_align_errors" },
329         { "rx_xon_pause_rcvd" },
330         { "rx_xoff_pause_rcvd" },
331         { "rx_mac_ctrl_rcvd" },
332         { "rx_xoff_entered" },
333         { "rx_frame_too_long_errors" },
334         { "rx_jabbers" },
335         { "rx_undersize_packets" },
336         { "rx_in_length_errors" },
337         { "rx_out_length_errors" },
338         { "rx_64_or_less_octet_packets" },
339         { "rx_65_to_127_octet_packets" },
340         { "rx_128_to_255_octet_packets" },
341         { "rx_256_to_511_octet_packets" },
342         { "rx_512_to_1023_octet_packets" },
343         { "rx_1024_to_1522_octet_packets" },
344         { "rx_1523_to_2047_octet_packets" },
345         { "rx_2048_to_4095_octet_packets" },
346         { "rx_4096_to_8191_octet_packets" },
347         { "rx_8192_to_9022_octet_packets" },
348
349         { "tx_octets" },
350         { "tx_collisions" },
351
352         { "tx_xon_sent" },
353         { "tx_xoff_sent" },
354         { "tx_flow_control" },
355         { "tx_mac_errors" },
356         { "tx_single_collisions" },
357         { "tx_mult_collisions" },
358         { "tx_deferred" },
359         { "tx_excessive_collisions" },
360         { "tx_late_collisions" },
361         { "tx_collide_2times" },
362         { "tx_collide_3times" },
363         { "tx_collide_4times" },
364         { "tx_collide_5times" },
365         { "tx_collide_6times" },
366         { "tx_collide_7times" },
367         { "tx_collide_8times" },
368         { "tx_collide_9times" },
369         { "tx_collide_10times" },
370         { "tx_collide_11times" },
371         { "tx_collide_12times" },
372         { "tx_collide_13times" },
373         { "tx_collide_14times" },
374         { "tx_collide_15times" },
375         { "tx_ucast_packets" },
376         { "tx_mcast_packets" },
377         { "tx_bcast_packets" },
378         { "tx_carrier_sense_errors" },
379         { "tx_discards" },
380         { "tx_errors" },
381
382         { "dma_writeq_full" },
383         { "dma_write_prioq_full" },
384         { "rxbds_empty" },
385         { "rx_discards" },
386         { "rx_errors" },
387         { "rx_threshold_hit" },
388
389         { "dma_readq_full" },
390         { "dma_read_prioq_full" },
391         { "tx_comp_queue_full" },
392
393         { "ring_set_send_prod_index" },
394         { "ring_status_update" },
395         { "nic_irqs" },
396         { "nic_avoided_irqs" },
397         { "nic_tx_threshold_hit" },
398
399         { "mbuf_lwm_thresh_hit" },
400 };
401
402 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
403
404
405 static const struct {
406         const char string[ETH_GSTRING_LEN];
407 } ethtool_test_keys[] = {
408         { "nvram test        (online) " },
409         { "link test         (online) " },
410         { "register test     (offline)" },
411         { "memory test       (offline)" },
412         { "mac loopback test (offline)" },
413         { "phy loopback test (offline)" },
414         { "ext loopback test (offline)" },
415         { "interrupt test    (offline)" },
416 };
417
418 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
419
420
421 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
422 {
423         writel(val, tp->regs + off);
424 }
425
426 static u32 tg3_read32(struct tg3 *tp, u32 off)
427 {
428         return readl(tp->regs + off);
429 }
430
431 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
432 {
433         writel(val, tp->aperegs + off);
434 }
435
436 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
437 {
438         return readl(tp->aperegs + off);
439 }
440
441 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
442 {
443         unsigned long flags;
444
445         spin_lock_irqsave(&tp->indirect_lock, flags);
446         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
447         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
448         spin_unlock_irqrestore(&tp->indirect_lock, flags);
449 }
450
451 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
452 {
453         writel(val, tp->regs + off);
454         readl(tp->regs + off);
455 }
456
457 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
458 {
459         unsigned long flags;
460         u32 val;
461
462         spin_lock_irqsave(&tp->indirect_lock, flags);
463         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
464         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
465         spin_unlock_irqrestore(&tp->indirect_lock, flags);
466         return val;
467 }
468
469 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
470 {
471         unsigned long flags;
472
473         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
475                                        TG3_64BIT_REG_LOW, val);
476                 return;
477         }
478         if (off == TG3_RX_STD_PROD_IDX_REG) {
479                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
480                                        TG3_64BIT_REG_LOW, val);
481                 return;
482         }
483
484         spin_lock_irqsave(&tp->indirect_lock, flags);
485         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
486         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
487         spin_unlock_irqrestore(&tp->indirect_lock, flags);
488
489         /* In indirect mode when disabling interrupts, we also need
490          * to clear the interrupt bit in the GRC local ctrl register.
491          */
492         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
493             (val == 0x1)) {
494                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
495                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
496         }
497 }
498
499 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
500 {
501         unsigned long flags;
502         u32 val;
503
504         spin_lock_irqsave(&tp->indirect_lock, flags);
505         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
506         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507         spin_unlock_irqrestore(&tp->indirect_lock, flags);
508         return val;
509 }
510
511 /* usec_wait specifies the wait time in usec when writing to certain registers
512  * where it is unsafe to read back the register without some delay.
513  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
514  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
515  */
516 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
517 {
518         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
519                 /* Non-posted methods */
520                 tp->write32(tp, off, val);
521         else {
522                 /* Posted method */
523                 tg3_write32(tp, off, val);
524                 if (usec_wait)
525                         udelay(usec_wait);
526                 tp->read32(tp, off);
527         }
528         /* Wait again after the read for the posted method to guarantee that
529          * the wait time is met.
530          */
531         if (usec_wait)
532                 udelay(usec_wait);
533 }
534
535 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
536 {
537         tp->write32_mbox(tp, off, val);
538         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
539                 tp->read32_mbox(tp, off);
540 }
541
542 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
543 {
544         void __iomem *mbox = tp->regs + off;
545         writel(val, mbox);
546         if (tg3_flag(tp, TXD_MBOX_HWBUG))
547                 writel(val, mbox);
548         if (tg3_flag(tp, MBOX_WRITE_REORDER))
549                 readl(mbox);
550 }
551
552 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
553 {
554         return readl(tp->regs + off + GRCMBOX_BASE);
555 }
556
557 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
558 {
559         writel(val, tp->regs + off + GRCMBOX_BASE);
560 }
561
562 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
563 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
564 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
565 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
566 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
567
568 #define tw32(reg, val)                  tp->write32(tp, reg, val)
569 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
570 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
571 #define tr32(reg)                       tp->read32(tp, reg)
572
573 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
574 {
575         unsigned long flags;
576
577         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
578             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
579                 return;
580
581         spin_lock_irqsave(&tp->indirect_lock, flags);
582         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
583                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
584                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
585
586                 /* Always leave this as zero. */
587                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
588         } else {
589                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
590                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
591
592                 /* Always leave this as zero. */
593                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
594         }
595         spin_unlock_irqrestore(&tp->indirect_lock, flags);
596 }
597
598 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
599 {
600         unsigned long flags;
601
602         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
603             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
604                 *val = 0;
605                 return;
606         }
607
608         spin_lock_irqsave(&tp->indirect_lock, flags);
609         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
610                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
611                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
612
613                 /* Always leave this as zero. */
614                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
615         } else {
616                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
617                 *val = tr32(TG3PCI_MEM_WIN_DATA);
618
619                 /* Always leave this as zero. */
620                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
621         }
622         spin_unlock_irqrestore(&tp->indirect_lock, flags);
623 }
624
625 static void tg3_ape_lock_init(struct tg3 *tp)
626 {
627         int i;
628         u32 regbase, bit;
629
630         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
631                 regbase = TG3_APE_LOCK_GRANT;
632         else
633                 regbase = TG3_APE_PER_LOCK_GRANT;
634
635         /* Make sure the driver hasn't any stale locks. */
636         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
637                 switch (i) {
638                 case TG3_APE_LOCK_PHY0:
639                 case TG3_APE_LOCK_PHY1:
640                 case TG3_APE_LOCK_PHY2:
641                 case TG3_APE_LOCK_PHY3:
642                         bit = APE_LOCK_GRANT_DRIVER;
643                         break;
644                 default:
645                         if (!tp->pci_fn)
646                                 bit = APE_LOCK_GRANT_DRIVER;
647                         else
648                                 bit = 1 << tp->pci_fn;
649                 }
650                 tg3_ape_write32(tp, regbase + 4 * i, bit);
651         }
652
653 }
654
655 static int tg3_ape_lock(struct tg3 *tp, int locknum)
656 {
657         int i, off;
658         int ret = 0;
659         u32 status, req, gnt, bit;
660
661         if (!tg3_flag(tp, ENABLE_APE))
662                 return 0;
663
664         switch (locknum) {
665         case TG3_APE_LOCK_GPIO:
666                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
667                         return 0;
668         case TG3_APE_LOCK_GRC:
669         case TG3_APE_LOCK_MEM:
670                 if (!tp->pci_fn)
671                         bit = APE_LOCK_REQ_DRIVER;
672                 else
673                         bit = 1 << tp->pci_fn;
674                 break;
675         case TG3_APE_LOCK_PHY0:
676         case TG3_APE_LOCK_PHY1:
677         case TG3_APE_LOCK_PHY2:
678         case TG3_APE_LOCK_PHY3:
679                 bit = APE_LOCK_REQ_DRIVER;
680                 break;
681         default:
682                 return -EINVAL;
683         }
684
685         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
686                 req = TG3_APE_LOCK_REQ;
687                 gnt = TG3_APE_LOCK_GRANT;
688         } else {
689                 req = TG3_APE_PER_LOCK_REQ;
690                 gnt = TG3_APE_PER_LOCK_GRANT;
691         }
692
693         off = 4 * locknum;
694
695         tg3_ape_write32(tp, req + off, bit);
696
697         /* Wait for up to 1 millisecond to acquire lock. */
698         for (i = 0; i < 100; i++) {
699                 status = tg3_ape_read32(tp, gnt + off);
700                 if (status == bit)
701                         break;
702                 udelay(10);
703         }
704
705         if (status != bit) {
706                 /* Revoke the lock request. */
707                 tg3_ape_write32(tp, gnt + off, bit);
708                 ret = -EBUSY;
709         }
710
711         return ret;
712 }
713
714 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
715 {
716         u32 gnt, bit;
717
718         if (!tg3_flag(tp, ENABLE_APE))
719                 return;
720
721         switch (locknum) {
722         case TG3_APE_LOCK_GPIO:
723                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
724                         return;
725         case TG3_APE_LOCK_GRC:
726         case TG3_APE_LOCK_MEM:
727                 if (!tp->pci_fn)
728                         bit = APE_LOCK_GRANT_DRIVER;
729                 else
730                         bit = 1 << tp->pci_fn;
731                 break;
732         case TG3_APE_LOCK_PHY0:
733         case TG3_APE_LOCK_PHY1:
734         case TG3_APE_LOCK_PHY2:
735         case TG3_APE_LOCK_PHY3:
736                 bit = APE_LOCK_GRANT_DRIVER;
737                 break;
738         default:
739                 return;
740         }
741
742         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
743                 gnt = TG3_APE_LOCK_GRANT;
744         else
745                 gnt = TG3_APE_PER_LOCK_GRANT;
746
747         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
748 }
749
750 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
751 {
752         u32 apedata;
753
754         while (timeout_us) {
755                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
756                         return -EBUSY;
757
758                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
759                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
760                         break;
761
762                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
763
764                 udelay(10);
765                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
766         }
767
768         return timeout_us ? 0 : -EBUSY;
769 }
770
771 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
772 {
773         u32 i, apedata;
774
775         for (i = 0; i < timeout_us / 10; i++) {
776                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
777
778                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
779                         break;
780
781                 udelay(10);
782         }
783
784         return i == timeout_us / 10;
785 }
786
787 int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off, u32 len)
788 {
789         int err;
790         u32 i, bufoff, msgoff, maxlen, apedata;
791
792         if (!tg3_flag(tp, APE_HAS_NCSI))
793                 return 0;
794
795         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
796         if (apedata != APE_SEG_SIG_MAGIC)
797                 return -ENODEV;
798
799         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
800         if (!(apedata & APE_FW_STATUS_READY))
801                 return -EAGAIN;
802
803         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
804                  TG3_APE_SHMEM_BASE;
805         msgoff = bufoff + 2 * sizeof(u32);
806         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
807
808         while (len) {
809                 u32 length;
810
811                 /* Cap xfer sizes to scratchpad limits. */
812                 length = (len > maxlen) ? maxlen : len;
813                 len -= length;
814
815                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
816                 if (!(apedata & APE_FW_STATUS_READY))
817                         return -EAGAIN;
818
819                 /* Wait for up to 1 msec for APE to service previous event. */
820                 err = tg3_ape_event_lock(tp, 1000);
821                 if (err)
822                         return err;
823
824                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
825                           APE_EVENT_STATUS_SCRTCHPD_READ |
826                           APE_EVENT_STATUS_EVENT_PENDING;
827                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
828
829                 tg3_ape_write32(tp, bufoff, base_off);
830                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
831
832                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
833                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
834
835                 base_off += length;
836
837                 if (tg3_ape_wait_for_event(tp, 30000))
838                         return -EAGAIN;
839
840                 for (i = 0; length; i += 4, length -= 4) {
841                         u32 val = tg3_ape_read32(tp, msgoff + i);
842                         memcpy(data, &val, sizeof(u32));
843                         data++;
844                 }
845         }
846
847         return 0;
848 }
849
850 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
851 {
852         int err;
853         u32 apedata;
854
855         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
856         if (apedata != APE_SEG_SIG_MAGIC)
857                 return -EAGAIN;
858
859         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
860         if (!(apedata & APE_FW_STATUS_READY))
861                 return -EAGAIN;
862
863         /* Wait for up to 1 millisecond for APE to service previous event. */
864         err = tg3_ape_event_lock(tp, 1000);
865         if (err)
866                 return err;
867
868         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
869                         event | APE_EVENT_STATUS_EVENT_PENDING);
870
871         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
872         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
873
874         return 0;
875 }
876
877 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
878 {
879         u32 event;
880         u32 apedata;
881
882         if (!tg3_flag(tp, ENABLE_APE))
883                 return;
884
885         switch (kind) {
886         case RESET_KIND_INIT:
887                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
888                                 APE_HOST_SEG_SIG_MAGIC);
889                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
890                                 APE_HOST_SEG_LEN_MAGIC);
891                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
892                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
893                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
894                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
895                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
896                                 APE_HOST_BEHAV_NO_PHYLOCK);
897                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
898                                     TG3_APE_HOST_DRVR_STATE_START);
899
900                 event = APE_EVENT_STATUS_STATE_START;
901                 break;
902         case RESET_KIND_SHUTDOWN:
903                 /* With the interface we are currently using,
904                  * APE does not track driver state.  Wiping
905                  * out the HOST SEGMENT SIGNATURE forces
906                  * the APE to assume OS absent status.
907                  */
908                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
909
910                 if (device_may_wakeup(&tp->pdev->dev) &&
911                     tg3_flag(tp, WOL_ENABLE)) {
912                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
913                                             TG3_APE_HOST_WOL_SPEED_AUTO);
914                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
915                 } else
916                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
917
918                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
919
920                 event = APE_EVENT_STATUS_STATE_UNLOAD;
921                 break;
922         case RESET_KIND_SUSPEND:
923                 event = APE_EVENT_STATUS_STATE_SUSPEND;
924                 break;
925         default:
926                 return;
927         }
928
929         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
930
931         tg3_ape_send_event(tp, event);
932 }
933
934 static void tg3_disable_ints(struct tg3 *tp)
935 {
936         int i;
937
938         tw32(TG3PCI_MISC_HOST_CTRL,
939              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
940         for (i = 0; i < tp->irq_max; i++)
941                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
942 }
943
944 static void tg3_enable_ints(struct tg3 *tp)
945 {
946         int i;
947
948         tp->irq_sync = 0;
949         wmb();
950
951         tw32(TG3PCI_MISC_HOST_CTRL,
952              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
953
954         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
955         for (i = 0; i < tp->irq_cnt; i++) {
956                 struct tg3_napi *tnapi = &tp->napi[i];
957
958                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
959                 if (tg3_flag(tp, 1SHOT_MSI))
960                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
961
962                 tp->coal_now |= tnapi->coal_now;
963         }
964
965         /* Force an initial interrupt */
966         if (!tg3_flag(tp, TAGGED_STATUS) &&
967             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
968                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
969         else
970                 tw32(HOSTCC_MODE, tp->coal_now);
971
972         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
973 }
974
975 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
976 {
977         struct tg3 *tp = tnapi->tp;
978         struct tg3_hw_status *sblk = tnapi->hw_status;
979         unsigned int work_exists = 0;
980
981         /* check for phy events */
982         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
983                 if (sblk->status & SD_STATUS_LINK_CHG)
984                         work_exists = 1;
985         }
986
987         /* check for TX work to do */
988         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
989                 work_exists = 1;
990
991         /* check for RX work to do */
992         if (tnapi->rx_rcb_prod_idx &&
993             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
994                 work_exists = 1;
995
996         return work_exists;
997 }
998
999 /* tg3_int_reenable
1000  *  similar to tg3_enable_ints, but it accurately determines whether there
1001  *  is new work pending and can return without flushing the PIO write
1002  *  which reenables interrupts
1003  */
1004 static void tg3_int_reenable(struct tg3_napi *tnapi)
1005 {
1006         struct tg3 *tp = tnapi->tp;
1007
1008         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1009         mmiowb();
1010
1011         /* When doing tagged status, this work check is unnecessary.
1012          * The last_tag we write above tells the chip which piece of
1013          * work we've completed.
1014          */
1015         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1016                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1017                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1018 }
1019
1020 static void tg3_switch_clocks(struct tg3 *tp)
1021 {
1022         u32 clock_ctrl;
1023         u32 orig_clock_ctrl;
1024
1025         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1026                 return;
1027
1028         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1029
1030         orig_clock_ctrl = clock_ctrl;
1031         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1032                        CLOCK_CTRL_CLKRUN_OENABLE |
1033                        0x1f);
1034         tp->pci_clock_ctrl = clock_ctrl;
1035
1036         if (tg3_flag(tp, 5705_PLUS)) {
1037                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1038                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1039                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1040                 }
1041         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1042                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1043                             clock_ctrl |
1044                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1045                             40);
1046                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1047                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1048                             40);
1049         }
1050         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1051 }
1052
1053 #define PHY_BUSY_LOOPS  5000
1054
1055 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1056 {
1057         u32 frame_val;
1058         unsigned int loops;
1059         int ret;
1060
1061         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1062                 tw32_f(MAC_MI_MODE,
1063                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1064                 udelay(80);
1065         }
1066
1067         tg3_ape_lock(tp, tp->phy_ape_lock);
1068
1069         *val = 0x0;
1070
1071         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1072                       MI_COM_PHY_ADDR_MASK);
1073         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1074                       MI_COM_REG_ADDR_MASK);
1075         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1076
1077         tw32_f(MAC_MI_COM, frame_val);
1078
1079         loops = PHY_BUSY_LOOPS;
1080         while (loops != 0) {
1081                 udelay(10);
1082                 frame_val = tr32(MAC_MI_COM);
1083
1084                 if ((frame_val & MI_COM_BUSY) == 0) {
1085                         udelay(5);
1086                         frame_val = tr32(MAC_MI_COM);
1087                         break;
1088                 }
1089                 loops -= 1;
1090         }
1091
1092         ret = -EBUSY;
1093         if (loops != 0) {
1094                 *val = frame_val & MI_COM_DATA_MASK;
1095                 ret = 0;
1096         }
1097
1098         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1099                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1100                 udelay(80);
1101         }
1102
1103         tg3_ape_unlock(tp, tp->phy_ape_lock);
1104
1105         return ret;
1106 }
1107
1108 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1109 {
1110         u32 frame_val;
1111         unsigned int loops;
1112         int ret;
1113
1114         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1115             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1116                 return 0;
1117
1118         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1119                 tw32_f(MAC_MI_MODE,
1120                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1121                 udelay(80);
1122         }
1123
1124         tg3_ape_lock(tp, tp->phy_ape_lock);
1125
1126         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1127                       MI_COM_PHY_ADDR_MASK);
1128         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1129                       MI_COM_REG_ADDR_MASK);
1130         frame_val |= (val & MI_COM_DATA_MASK);
1131         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1132
1133         tw32_f(MAC_MI_COM, frame_val);
1134
1135         loops = PHY_BUSY_LOOPS;
1136         while (loops != 0) {
1137                 udelay(10);
1138                 frame_val = tr32(MAC_MI_COM);
1139                 if ((frame_val & MI_COM_BUSY) == 0) {
1140                         udelay(5);
1141                         frame_val = tr32(MAC_MI_COM);
1142                         break;
1143                 }
1144                 loops -= 1;
1145         }
1146
1147         ret = -EBUSY;
1148         if (loops != 0)
1149                 ret = 0;
1150
1151         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1152                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1153                 udelay(80);
1154         }
1155
1156         tg3_ape_unlock(tp, tp->phy_ape_lock);
1157
1158         return ret;
1159 }
1160
1161 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1162 {
1163         int err;
1164
1165         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1166         if (err)
1167                 goto done;
1168
1169         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1170         if (err)
1171                 goto done;
1172
1173         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1174                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1175         if (err)
1176                 goto done;
1177
1178         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1179
1180 done:
1181         return err;
1182 }
1183
1184 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1185 {
1186         int err;
1187
1188         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1189         if (err)
1190                 goto done;
1191
1192         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1193         if (err)
1194                 goto done;
1195
1196         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1197                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1198         if (err)
1199                 goto done;
1200
1201         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1202
1203 done:
1204         return err;
1205 }
1206
1207 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1208 {
1209         int err;
1210
1211         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1212         if (!err)
1213                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1214
1215         return err;
1216 }
1217
1218 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1219 {
1220         int err;
1221
1222         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1223         if (!err)
1224                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1225
1226         return err;
1227 }
1228
1229 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1230 {
1231         int err;
1232
1233         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1234                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1235                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1236         if (!err)
1237                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1238
1239         return err;
1240 }
1241
1242 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1243 {
1244         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1245                 set |= MII_TG3_AUXCTL_MISC_WREN;
1246
1247         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1248 }
1249
1250 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1251         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1252                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1253                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1254
1255 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1256         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1257                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1258
1259 static int tg3_bmcr_reset(struct tg3 *tp)
1260 {
1261         u32 phy_control;
1262         int limit, err;
1263
1264         /* OK, reset it, and poll the BMCR_RESET bit until it
1265          * clears or we time out.
1266          */
1267         phy_control = BMCR_RESET;
1268         err = tg3_writephy(tp, MII_BMCR, phy_control);
1269         if (err != 0)
1270                 return -EBUSY;
1271
1272         limit = 5000;
1273         while (limit--) {
1274                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1275                 if (err != 0)
1276                         return -EBUSY;
1277
1278                 if ((phy_control & BMCR_RESET) == 0) {
1279                         udelay(40);
1280                         break;
1281                 }
1282                 udelay(10);
1283         }
1284         if (limit < 0)
1285                 return -EBUSY;
1286
1287         return 0;
1288 }
1289
1290 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1291 {
1292         struct tg3 *tp = bp->priv;
1293         u32 val;
1294
1295         spin_lock_bh(&tp->lock);
1296
1297         if (tg3_readphy(tp, reg, &val))
1298                 val = -EIO;
1299
1300         spin_unlock_bh(&tp->lock);
1301
1302         return val;
1303 }
1304
1305 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1306 {
1307         struct tg3 *tp = bp->priv;
1308         u32 ret = 0;
1309
1310         spin_lock_bh(&tp->lock);
1311
1312         if (tg3_writephy(tp, reg, val))
1313                 ret = -EIO;
1314
1315         spin_unlock_bh(&tp->lock);
1316
1317         return ret;
1318 }
1319
1320 static int tg3_mdio_reset(struct mii_bus *bp)
1321 {
1322         return 0;
1323 }
1324
1325 static void tg3_mdio_config_5785(struct tg3 *tp)
1326 {
1327         u32 val;
1328         struct phy_device *phydev;
1329
1330         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1331         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1332         case PHY_ID_BCM50610:
1333         case PHY_ID_BCM50610M:
1334                 val = MAC_PHYCFG2_50610_LED_MODES;
1335                 break;
1336         case PHY_ID_BCMAC131:
1337                 val = MAC_PHYCFG2_AC131_LED_MODES;
1338                 break;
1339         case PHY_ID_RTL8211C:
1340                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1341                 break;
1342         case PHY_ID_RTL8201E:
1343                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1344                 break;
1345         default:
1346                 return;
1347         }
1348
1349         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1350                 tw32(MAC_PHYCFG2, val);
1351
1352                 val = tr32(MAC_PHYCFG1);
1353                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1354                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1355                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1356                 tw32(MAC_PHYCFG1, val);
1357
1358                 return;
1359         }
1360
1361         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1362                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1363                        MAC_PHYCFG2_FMODE_MASK_MASK |
1364                        MAC_PHYCFG2_GMODE_MASK_MASK |
1365                        MAC_PHYCFG2_ACT_MASK_MASK   |
1366                        MAC_PHYCFG2_QUAL_MASK_MASK |
1367                        MAC_PHYCFG2_INBAND_ENABLE;
1368
1369         tw32(MAC_PHYCFG2, val);
1370
1371         val = tr32(MAC_PHYCFG1);
1372         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1373                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1374         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1375                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1376                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1377                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1378                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1379         }
1380         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1381                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1382         tw32(MAC_PHYCFG1, val);
1383
1384         val = tr32(MAC_EXT_RGMII_MODE);
1385         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1386                  MAC_RGMII_MODE_RX_QUALITY |
1387                  MAC_RGMII_MODE_RX_ACTIVITY |
1388                  MAC_RGMII_MODE_RX_ENG_DET |
1389                  MAC_RGMII_MODE_TX_ENABLE |
1390                  MAC_RGMII_MODE_TX_LOWPWR |
1391                  MAC_RGMII_MODE_TX_RESET);
1392         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1393                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1394                         val |= MAC_RGMII_MODE_RX_INT_B |
1395                                MAC_RGMII_MODE_RX_QUALITY |
1396                                MAC_RGMII_MODE_RX_ACTIVITY |
1397                                MAC_RGMII_MODE_RX_ENG_DET;
1398                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1399                         val |= MAC_RGMII_MODE_TX_ENABLE |
1400                                MAC_RGMII_MODE_TX_LOWPWR |
1401                                MAC_RGMII_MODE_TX_RESET;
1402         }
1403         tw32(MAC_EXT_RGMII_MODE, val);
1404 }
1405
1406 static void tg3_mdio_start(struct tg3 *tp)
1407 {
1408         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1409         tw32_f(MAC_MI_MODE, tp->mi_mode);
1410         udelay(80);
1411
1412         if (tg3_flag(tp, MDIOBUS_INITED) &&
1413             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1414                 tg3_mdio_config_5785(tp);
1415 }
1416
1417 static int tg3_mdio_init(struct tg3 *tp)
1418 {
1419         int i;
1420         u32 reg;
1421         struct phy_device *phydev;
1422
1423         if (tg3_flag(tp, 5717_PLUS)) {
1424                 u32 is_serdes;
1425
1426                 tp->phy_addr = tp->pci_fn + 1;
1427
1428                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1429                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1430                 else
1431                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1432                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1433                 if (is_serdes)
1434                         tp->phy_addr += 7;
1435         } else
1436                 tp->phy_addr = TG3_PHY_MII_ADDR;
1437
1438         tg3_mdio_start(tp);
1439
1440         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1441                 return 0;
1442
1443         tp->mdio_bus = mdiobus_alloc();
1444         if (tp->mdio_bus == NULL)
1445                 return -ENOMEM;
1446
1447         tp->mdio_bus->name     = "tg3 mdio bus";
1448         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1449                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1450         tp->mdio_bus->priv     = tp;
1451         tp->mdio_bus->parent   = &tp->pdev->dev;
1452         tp->mdio_bus->read     = &tg3_mdio_read;
1453         tp->mdio_bus->write    = &tg3_mdio_write;
1454         tp->mdio_bus->reset    = &tg3_mdio_reset;
1455         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1456         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1457
1458         for (i = 0; i < PHY_MAX_ADDR; i++)
1459                 tp->mdio_bus->irq[i] = PHY_POLL;
1460
1461         /* The bus registration will look for all the PHYs on the mdio bus.
1462          * Unfortunately, it does not ensure the PHY is powered up before
1463          * accessing the PHY ID registers.  A chip reset is the
1464          * quickest way to bring the device back to an operational state..
1465          */
1466         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1467                 tg3_bmcr_reset(tp);
1468
1469         i = mdiobus_register(tp->mdio_bus);
1470         if (i) {
1471                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1472                 mdiobus_free(tp->mdio_bus);
1473                 return i;
1474         }
1475
1476         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1477
1478         if (!phydev || !phydev->drv) {
1479                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1480                 mdiobus_unregister(tp->mdio_bus);
1481                 mdiobus_free(tp->mdio_bus);
1482                 return -ENODEV;
1483         }
1484
1485         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1486         case PHY_ID_BCM57780:
1487                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1488                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1489                 break;
1490         case PHY_ID_BCM50610:
1491         case PHY_ID_BCM50610M:
1492                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1493                                      PHY_BRCM_RX_REFCLK_UNUSED |
1494                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1495                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1496                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1497                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1498                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1499                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1500                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1501                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1502                 /* fallthru */
1503         case PHY_ID_RTL8211C:
1504                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1505                 break;
1506         case PHY_ID_RTL8201E:
1507         case PHY_ID_BCMAC131:
1508                 phydev->interface = PHY_INTERFACE_MODE_MII;
1509                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1510                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1511                 break;
1512         }
1513
1514         tg3_flag_set(tp, MDIOBUS_INITED);
1515
1516         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1517                 tg3_mdio_config_5785(tp);
1518
1519         return 0;
1520 }
1521
1522 static void tg3_mdio_fini(struct tg3 *tp)
1523 {
1524         if (tg3_flag(tp, MDIOBUS_INITED)) {
1525                 tg3_flag_clear(tp, MDIOBUS_INITED);
1526                 mdiobus_unregister(tp->mdio_bus);
1527                 mdiobus_free(tp->mdio_bus);
1528         }
1529 }
1530
1531 /* tp->lock is held. */
1532 static inline void tg3_generate_fw_event(struct tg3 *tp)
1533 {
1534         u32 val;
1535
1536         val = tr32(GRC_RX_CPU_EVENT);
1537         val |= GRC_RX_CPU_DRIVER_EVENT;
1538         tw32_f(GRC_RX_CPU_EVENT, val);
1539
1540         tp->last_event_jiffies = jiffies;
1541 }
1542
1543 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1544
1545 /* tp->lock is held. */
1546 static void tg3_wait_for_event_ack(struct tg3 *tp)
1547 {
1548         int i;
1549         unsigned int delay_cnt;
1550         long time_remain;
1551
1552         /* If enough time has passed, no wait is necessary. */
1553         time_remain = (long)(tp->last_event_jiffies + 1 +
1554                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1555                       (long)jiffies;
1556         if (time_remain < 0)
1557                 return;
1558
1559         /* Check if we can shorten the wait time. */
1560         delay_cnt = jiffies_to_usecs(time_remain);
1561         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1562                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1563         delay_cnt = (delay_cnt >> 3) + 1;
1564
1565         for (i = 0; i < delay_cnt; i++) {
1566                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1567                         break;
1568                 udelay(8);
1569         }
1570 }
1571
1572 /* tp->lock is held. */
1573 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1574 {
1575         u32 reg, val;
1576
1577         val = 0;
1578         if (!tg3_readphy(tp, MII_BMCR, &reg))
1579                 val = reg << 16;
1580         if (!tg3_readphy(tp, MII_BMSR, &reg))
1581                 val |= (reg & 0xffff);
1582         *data++ = val;
1583
1584         val = 0;
1585         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1586                 val = reg << 16;
1587         if (!tg3_readphy(tp, MII_LPA, &reg))
1588                 val |= (reg & 0xffff);
1589         *data++ = val;
1590
1591         val = 0;
1592         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1593                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1594                         val = reg << 16;
1595                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1596                         val |= (reg & 0xffff);
1597         }
1598         *data++ = val;
1599
1600         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1601                 val = reg << 16;
1602         else
1603                 val = 0;
1604         *data++ = val;
1605 }
1606
1607 /* tp->lock is held. */
1608 static void tg3_ump_link_report(struct tg3 *tp)
1609 {
1610         u32 data[4];
1611
1612         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1613                 return;
1614
1615         tg3_phy_gather_ump_data(tp, data);
1616
1617         tg3_wait_for_event_ack(tp);
1618
1619         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1620         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1621         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1622         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1623         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1624         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1625
1626         tg3_generate_fw_event(tp);
1627 }
1628
1629 /* tp->lock is held. */
1630 static void tg3_stop_fw(struct tg3 *tp)
1631 {
1632         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1633                 /* Wait for RX cpu to ACK the previous event. */
1634                 tg3_wait_for_event_ack(tp);
1635
1636                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1637
1638                 tg3_generate_fw_event(tp);
1639
1640                 /* Wait for RX cpu to ACK this event. */
1641                 tg3_wait_for_event_ack(tp);
1642         }
1643 }
1644
1645 /* tp->lock is held. */
1646 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1647 {
1648         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1649                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1650
1651         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1652                 switch (kind) {
1653                 case RESET_KIND_INIT:
1654                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1655                                       DRV_STATE_START);
1656                         break;
1657
1658                 case RESET_KIND_SHUTDOWN:
1659                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1660                                       DRV_STATE_UNLOAD);
1661                         break;
1662
1663                 case RESET_KIND_SUSPEND:
1664                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1665                                       DRV_STATE_SUSPEND);
1666                         break;
1667
1668                 default:
1669                         break;
1670                 }
1671         }
1672
1673         if (kind == RESET_KIND_INIT ||
1674             kind == RESET_KIND_SUSPEND)
1675                 tg3_ape_driver_state_change(tp, kind);
1676 }
1677
1678 /* tp->lock is held. */
1679 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1680 {
1681         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1682                 switch (kind) {
1683                 case RESET_KIND_INIT:
1684                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1685                                       DRV_STATE_START_DONE);
1686                         break;
1687
1688                 case RESET_KIND_SHUTDOWN:
1689                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1690                                       DRV_STATE_UNLOAD_DONE);
1691                         break;
1692
1693                 default:
1694                         break;
1695                 }
1696         }
1697
1698         if (kind == RESET_KIND_SHUTDOWN)
1699                 tg3_ape_driver_state_change(tp, kind);
1700 }
1701
1702 /* tp->lock is held. */
1703 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1704 {
1705         if (tg3_flag(tp, ENABLE_ASF)) {
1706                 switch (kind) {
1707                 case RESET_KIND_INIT:
1708                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1709                                       DRV_STATE_START);
1710                         break;
1711
1712                 case RESET_KIND_SHUTDOWN:
1713                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1714                                       DRV_STATE_UNLOAD);
1715                         break;
1716
1717                 case RESET_KIND_SUSPEND:
1718                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1719                                       DRV_STATE_SUSPEND);
1720                         break;
1721
1722                 default:
1723                         break;
1724                 }
1725         }
1726 }
1727
1728 static int tg3_poll_fw(struct tg3 *tp)
1729 {
1730         int i;
1731         u32 val;
1732
1733         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1734                 /* Wait up to 20ms for init done. */
1735                 for (i = 0; i < 200; i++) {
1736                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1737                                 return 0;
1738                         udelay(100);
1739                 }
1740                 return -ENODEV;
1741         }
1742
1743         /* Wait for firmware initialization to complete. */
1744         for (i = 0; i < 100000; i++) {
1745                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1746                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1747                         break;
1748                 udelay(10);
1749         }
1750
1751         /* Chip might not be fitted with firmware.  Some Sun onboard
1752          * parts are configured like that.  So don't signal the timeout
1753          * of the above loop as an error, but do report the lack of
1754          * running firmware once.
1755          */
1756         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1757                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1758
1759                 netdev_info(tp->dev, "No firmware running\n");
1760         }
1761
1762         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1763                 /* The 57765 A0 needs a little more
1764                  * time to do some important work.
1765                  */
1766                 mdelay(10);
1767         }
1768
1769         return 0;
1770 }
1771
1772 static void tg3_link_report(struct tg3 *tp)
1773 {
1774         if (!netif_carrier_ok(tp->dev)) {
1775                 netif_info(tp, link, tp->dev, "Link is down\n");
1776                 tg3_ump_link_report(tp);
1777         } else if (netif_msg_link(tp)) {
1778                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1779                             (tp->link_config.active_speed == SPEED_1000 ?
1780                              1000 :
1781                              (tp->link_config.active_speed == SPEED_100 ?
1782                               100 : 10)),
1783                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1784                              "full" : "half"));
1785
1786                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1787                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1788                             "on" : "off",
1789                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1790                             "on" : "off");
1791
1792                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1793                         netdev_info(tp->dev, "EEE is %s\n",
1794                                     tp->setlpicnt ? "enabled" : "disabled");
1795
1796                 tg3_ump_link_report(tp);
1797         }
1798 }
1799
1800 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1801 {
1802         u16 miireg;
1803
1804         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1805                 miireg = ADVERTISE_1000XPAUSE;
1806         else if (flow_ctrl & FLOW_CTRL_TX)
1807                 miireg = ADVERTISE_1000XPSE_ASYM;
1808         else if (flow_ctrl & FLOW_CTRL_RX)
1809                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1810         else
1811                 miireg = 0;
1812
1813         return miireg;
1814 }
1815
1816 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1817 {
1818         u8 cap = 0;
1819
1820         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1821                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1822         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1823                 if (lcladv & ADVERTISE_1000XPAUSE)
1824                         cap = FLOW_CTRL_RX;
1825                 if (rmtadv & ADVERTISE_1000XPAUSE)
1826                         cap = FLOW_CTRL_TX;
1827         }
1828
1829         return cap;
1830 }
1831
1832 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1833 {
1834         u8 autoneg;
1835         u8 flowctrl = 0;
1836         u32 old_rx_mode = tp->rx_mode;
1837         u32 old_tx_mode = tp->tx_mode;
1838
1839         if (tg3_flag(tp, USE_PHYLIB))
1840                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1841         else
1842                 autoneg = tp->link_config.autoneg;
1843
1844         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1845                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1846                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1847                 else
1848                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1849         } else
1850                 flowctrl = tp->link_config.flowctrl;
1851
1852         tp->link_config.active_flowctrl = flowctrl;
1853
1854         if (flowctrl & FLOW_CTRL_RX)
1855                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1856         else
1857                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1858
1859         if (old_rx_mode != tp->rx_mode)
1860                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1861
1862         if (flowctrl & FLOW_CTRL_TX)
1863                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1864         else
1865                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1866
1867         if (old_tx_mode != tp->tx_mode)
1868                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1869 }
1870
1871 static void tg3_adjust_link(struct net_device *dev)
1872 {
1873         u8 oldflowctrl, linkmesg = 0;
1874         u32 mac_mode, lcl_adv, rmt_adv;
1875         struct tg3 *tp = netdev_priv(dev);
1876         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1877
1878         spin_lock_bh(&tp->lock);
1879
1880         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1881                                     MAC_MODE_HALF_DUPLEX);
1882
1883         oldflowctrl = tp->link_config.active_flowctrl;
1884
1885         if (phydev->link) {
1886                 lcl_adv = 0;
1887                 rmt_adv = 0;
1888
1889                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1890                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1891                 else if (phydev->speed == SPEED_1000 ||
1892                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1893                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1894                 else
1895                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1896
1897                 if (phydev->duplex == DUPLEX_HALF)
1898                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1899                 else {
1900                         lcl_adv = mii_advertise_flowctrl(
1901                                   tp->link_config.flowctrl);
1902
1903                         if (phydev->pause)
1904                                 rmt_adv = LPA_PAUSE_CAP;
1905                         if (phydev->asym_pause)
1906                                 rmt_adv |= LPA_PAUSE_ASYM;
1907                 }
1908
1909                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1910         } else
1911                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1912
1913         if (mac_mode != tp->mac_mode) {
1914                 tp->mac_mode = mac_mode;
1915                 tw32_f(MAC_MODE, tp->mac_mode);
1916                 udelay(40);
1917         }
1918
1919         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1920                 if (phydev->speed == SPEED_10)
1921                         tw32(MAC_MI_STAT,
1922                              MAC_MI_STAT_10MBPS_MODE |
1923                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1924                 else
1925                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1926         }
1927
1928         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1929                 tw32(MAC_TX_LENGTHS,
1930                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1931                       (6 << TX_LENGTHS_IPG_SHIFT) |
1932                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1933         else
1934                 tw32(MAC_TX_LENGTHS,
1935                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1936                       (6 << TX_LENGTHS_IPG_SHIFT) |
1937                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1938
1939         if (phydev->link != tp->old_link ||
1940             phydev->speed != tp->link_config.active_speed ||
1941             phydev->duplex != tp->link_config.active_duplex ||
1942             oldflowctrl != tp->link_config.active_flowctrl)
1943                 linkmesg = 1;
1944
1945         tp->old_link = phydev->link;
1946         tp->link_config.active_speed = phydev->speed;
1947         tp->link_config.active_duplex = phydev->duplex;
1948
1949         spin_unlock_bh(&tp->lock);
1950
1951         if (linkmesg)
1952                 tg3_link_report(tp);
1953 }
1954
1955 static int tg3_phy_init(struct tg3 *tp)
1956 {
1957         struct phy_device *phydev;
1958
1959         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1960                 return 0;
1961
1962         /* Bring the PHY back to a known state. */
1963         tg3_bmcr_reset(tp);
1964
1965         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1966
1967         /* Attach the MAC to the PHY. */
1968         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1969                              phydev->dev_flags, phydev->interface);
1970         if (IS_ERR(phydev)) {
1971                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1972                 return PTR_ERR(phydev);
1973         }
1974
1975         /* Mask with MAC supported features. */
1976         switch (phydev->interface) {
1977         case PHY_INTERFACE_MODE_GMII:
1978         case PHY_INTERFACE_MODE_RGMII:
1979                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1980                         phydev->supported &= (PHY_GBIT_FEATURES |
1981                                               SUPPORTED_Pause |
1982                                               SUPPORTED_Asym_Pause);
1983                         break;
1984                 }
1985                 /* fallthru */
1986         case PHY_INTERFACE_MODE_MII:
1987                 phydev->supported &= (PHY_BASIC_FEATURES |
1988                                       SUPPORTED_Pause |
1989                                       SUPPORTED_Asym_Pause);
1990                 break;
1991         default:
1992                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1993                 return -EINVAL;
1994         }
1995
1996         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1997
1998         phydev->advertising = phydev->supported;
1999
2000         return 0;
2001 }
2002
2003 static void tg3_phy_start(struct tg3 *tp)
2004 {
2005         struct phy_device *phydev;
2006
2007         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2008                 return;
2009
2010         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2011
2012         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2013                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2014                 phydev->speed = tp->link_config.speed;
2015                 phydev->duplex = tp->link_config.duplex;
2016                 phydev->autoneg = tp->link_config.autoneg;
2017                 phydev->advertising = tp->link_config.advertising;
2018         }
2019
2020         phy_start(phydev);
2021
2022         phy_start_aneg(phydev);
2023 }
2024
2025 static void tg3_phy_stop(struct tg3 *tp)
2026 {
2027         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2028                 return;
2029
2030         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2031 }
2032
2033 static void tg3_phy_fini(struct tg3 *tp)
2034 {
2035         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2036                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2037                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2038         }
2039 }
2040
2041 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2042 {
2043         int err;
2044         u32 val;
2045
2046         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2047                 return 0;
2048
2049         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2050                 /* Cannot do read-modify-write on 5401 */
2051                 err = tg3_phy_auxctl_write(tp,
2052                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2053                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2054                                            0x4c20);
2055                 goto done;
2056         }
2057
2058         err = tg3_phy_auxctl_read(tp,
2059                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2060         if (err)
2061                 return err;
2062
2063         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2064         err = tg3_phy_auxctl_write(tp,
2065                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2066
2067 done:
2068         return err;
2069 }
2070
2071 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2072 {
2073         u32 phytest;
2074
2075         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2076                 u32 phy;
2077
2078                 tg3_writephy(tp, MII_TG3_FET_TEST,
2079                              phytest | MII_TG3_FET_SHADOW_EN);
2080                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2081                         if (enable)
2082                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2083                         else
2084                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2085                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2086                 }
2087                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2088         }
2089 }
2090
2091 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2092 {
2093         u32 reg;
2094
2095         if (!tg3_flag(tp, 5705_PLUS) ||
2096             (tg3_flag(tp, 5717_PLUS) &&
2097              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2098                 return;
2099
2100         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2101                 tg3_phy_fet_toggle_apd(tp, enable);
2102                 return;
2103         }
2104
2105         reg = MII_TG3_MISC_SHDW_WREN |
2106               MII_TG3_MISC_SHDW_SCR5_SEL |
2107               MII_TG3_MISC_SHDW_SCR5_LPED |
2108               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2109               MII_TG3_MISC_SHDW_SCR5_SDTL |
2110               MII_TG3_MISC_SHDW_SCR5_C125OE;
2111         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2112                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2113
2114         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2115
2116
2117         reg = MII_TG3_MISC_SHDW_WREN |
2118               MII_TG3_MISC_SHDW_APD_SEL |
2119               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2120         if (enable)
2121                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2122
2123         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2124 }
2125
2126 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2127 {
2128         u32 phy;
2129
2130         if (!tg3_flag(tp, 5705_PLUS) ||
2131             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2132                 return;
2133
2134         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2135                 u32 ephy;
2136
2137                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2138                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2139
2140                         tg3_writephy(tp, MII_TG3_FET_TEST,
2141                                      ephy | MII_TG3_FET_SHADOW_EN);
2142                         if (!tg3_readphy(tp, reg, &phy)) {
2143                                 if (enable)
2144                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2145                                 else
2146                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2147                                 tg3_writephy(tp, reg, phy);
2148                         }
2149                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2150                 }
2151         } else {
2152                 int ret;
2153
2154                 ret = tg3_phy_auxctl_read(tp,
2155                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2156                 if (!ret) {
2157                         if (enable)
2158                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2159                         else
2160                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2161                         tg3_phy_auxctl_write(tp,
2162                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2163                 }
2164         }
2165 }
2166
2167 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2168 {
2169         int ret;
2170         u32 val;
2171
2172         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2173                 return;
2174
2175         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2176         if (!ret)
2177                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2178                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2179 }
2180
2181 static void tg3_phy_apply_otp(struct tg3 *tp)
2182 {
2183         u32 otp, phy;
2184
2185         if (!tp->phy_otp)
2186                 return;
2187
2188         otp = tp->phy_otp;
2189
2190         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2191                 return;
2192
2193         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2194         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2195         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2196
2197         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2198               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2199         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2200
2201         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2202         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2203         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2204
2205         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2206         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2207
2208         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2209         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2210
2211         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2212               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2213         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2214
2215         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2216 }
2217
2218 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2219 {
2220         u32 val;
2221
2222         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2223                 return;
2224
2225         tp->setlpicnt = 0;
2226
2227         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2228             current_link_up == 1 &&
2229             tp->link_config.active_duplex == DUPLEX_FULL &&
2230             (tp->link_config.active_speed == SPEED_100 ||
2231              tp->link_config.active_speed == SPEED_1000)) {
2232                 u32 eeectl;
2233
2234                 if (tp->link_config.active_speed == SPEED_1000)
2235                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2236                 else
2237                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2238
2239                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2240
2241                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2242                                   TG3_CL45_D7_EEERES_STAT, &val);
2243
2244                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2245                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2246                         tp->setlpicnt = 2;
2247         }
2248
2249         if (!tp->setlpicnt) {
2250                 if (current_link_up == 1 &&
2251                    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2252                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2253                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2254                 }
2255
2256                 val = tr32(TG3_CPMU_EEE_MODE);
2257                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2258         }
2259 }
2260
2261 static void tg3_phy_eee_enable(struct tg3 *tp)
2262 {
2263         u32 val;
2264
2265         if (tp->link_config.active_speed == SPEED_1000 &&
2266             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2267              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2268              tg3_flag(tp, 57765_CLASS)) &&
2269             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2270                 val = MII_TG3_DSP_TAP26_ALNOKO |
2271                       MII_TG3_DSP_TAP26_RMRXSTO;
2272                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2273                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2274         }
2275
2276         val = tr32(TG3_CPMU_EEE_MODE);
2277         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2278 }
2279
2280 static int tg3_wait_macro_done(struct tg3 *tp)
2281 {
2282         int limit = 100;
2283
2284         while (limit--) {
2285                 u32 tmp32;
2286
2287                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2288                         if ((tmp32 & 0x1000) == 0)
2289                                 break;
2290                 }
2291         }
2292         if (limit < 0)
2293                 return -EBUSY;
2294
2295         return 0;
2296 }
2297
2298 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2299 {
2300         static const u32 test_pat[4][6] = {
2301         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2302         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2303         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2304         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2305         };
2306         int chan;
2307
2308         for (chan = 0; chan < 4; chan++) {
2309                 int i;
2310
2311                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2312                              (chan * 0x2000) | 0x0200);
2313                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2314
2315                 for (i = 0; i < 6; i++)
2316                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2317                                      test_pat[chan][i]);
2318
2319                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2320                 if (tg3_wait_macro_done(tp)) {
2321                         *resetp = 1;
2322                         return -EBUSY;
2323                 }
2324
2325                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2326                              (chan * 0x2000) | 0x0200);
2327                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2328                 if (tg3_wait_macro_done(tp)) {
2329                         *resetp = 1;
2330                         return -EBUSY;
2331                 }
2332
2333                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2334                 if (tg3_wait_macro_done(tp)) {
2335                         *resetp = 1;
2336                         return -EBUSY;
2337                 }
2338
2339                 for (i = 0; i < 6; i += 2) {
2340                         u32 low, high;
2341
2342                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2343                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2344                             tg3_wait_macro_done(tp)) {
2345                                 *resetp = 1;
2346                                 return -EBUSY;
2347                         }
2348                         low &= 0x7fff;
2349                         high &= 0x000f;
2350                         if (low != test_pat[chan][i] ||
2351                             high != test_pat[chan][i+1]) {
2352                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2353                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2354                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2355
2356                                 return -EBUSY;
2357                         }
2358                 }
2359         }
2360
2361         return 0;
2362 }
2363
2364 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2365 {
2366         int chan;
2367
2368         for (chan = 0; chan < 4; chan++) {
2369                 int i;
2370
2371                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2372                              (chan * 0x2000) | 0x0200);
2373                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2374                 for (i = 0; i < 6; i++)
2375                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2376                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2377                 if (tg3_wait_macro_done(tp))
2378                         return -EBUSY;
2379         }
2380
2381         return 0;
2382 }
2383
2384 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2385 {
2386         u32 reg32, phy9_orig;
2387         int retries, do_phy_reset, err;
2388
2389         retries = 10;
2390         do_phy_reset = 1;
2391         do {
2392                 if (do_phy_reset) {
2393                         err = tg3_bmcr_reset(tp);
2394                         if (err)
2395                                 return err;
2396                         do_phy_reset = 0;
2397                 }
2398
2399                 /* Disable transmitter and interrupt.  */
2400                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2401                         continue;
2402
2403                 reg32 |= 0x3000;
2404                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2405
2406                 /* Set full-duplex, 1000 mbps.  */
2407                 tg3_writephy(tp, MII_BMCR,
2408                              BMCR_FULLDPLX | BMCR_SPEED1000);
2409
2410                 /* Set to master mode.  */
2411                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2412                         continue;
2413
2414                 tg3_writephy(tp, MII_CTRL1000,
2415                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2416
2417                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2418                 if (err)
2419                         return err;
2420
2421                 /* Block the PHY control access.  */
2422                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2423
2424                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2425                 if (!err)
2426                         break;
2427         } while (--retries);
2428
2429         err = tg3_phy_reset_chanpat(tp);
2430         if (err)
2431                 return err;
2432
2433         tg3_phydsp_write(tp, 0x8005, 0x0000);
2434
2435         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2436         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2437
2438         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2439
2440         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2441
2442         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2443                 reg32 &= ~0x3000;
2444                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2445         } else if (!err)
2446                 err = -EBUSY;
2447
2448         return err;
2449 }
2450
2451 /* This will reset the tigon3 PHY if there is no valid
2452  * link unless the FORCE argument is non-zero.
2453  */
2454 static int tg3_phy_reset(struct tg3 *tp)
2455 {
2456         u32 val, cpmuctrl;
2457         int err;
2458
2459         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2460                 val = tr32(GRC_MISC_CFG);
2461                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2462                 udelay(40);
2463         }
2464         err  = tg3_readphy(tp, MII_BMSR, &val);
2465         err |= tg3_readphy(tp, MII_BMSR, &val);
2466         if (err != 0)
2467                 return -EBUSY;
2468
2469         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2470                 netif_carrier_off(tp->dev);
2471                 tg3_link_report(tp);
2472         }
2473
2474         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2475             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2476             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2477                 err = tg3_phy_reset_5703_4_5(tp);
2478                 if (err)
2479                         return err;
2480                 goto out;
2481         }
2482
2483         cpmuctrl = 0;
2484         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2485             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2486                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2487                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2488                         tw32(TG3_CPMU_CTRL,
2489                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2490         }
2491
2492         err = tg3_bmcr_reset(tp);
2493         if (err)
2494                 return err;
2495
2496         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2497                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2498                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2499
2500                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2501         }
2502
2503         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2504             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2505                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2506                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2507                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2508                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2509                         udelay(40);
2510                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2511                 }
2512         }
2513
2514         if (tg3_flag(tp, 5717_PLUS) &&
2515             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2516                 return 0;
2517
2518         tg3_phy_apply_otp(tp);
2519
2520         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2521                 tg3_phy_toggle_apd(tp, true);
2522         else
2523                 tg3_phy_toggle_apd(tp, false);
2524
2525 out:
2526         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2527             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2528                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2529                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2530                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2531         }
2532
2533         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2534                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2535                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2536         }
2537
2538         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2539                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2540                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2541                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2542                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2543                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2544                 }
2545         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2546                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2547                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2548                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2549                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2550                                 tg3_writephy(tp, MII_TG3_TEST1,
2551                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2552                         } else
2553                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2554
2555                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2556                 }
2557         }
2558
2559         /* Set Extended packet length bit (bit 14) on all chips that */
2560         /* support jumbo frames */
2561         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2562                 /* Cannot do read-modify-write on 5401 */
2563                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2564         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2565                 /* Set bit 14 with read-modify-write to preserve other bits */
2566                 err = tg3_phy_auxctl_read(tp,
2567                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2568                 if (!err)
2569                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2570                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2571         }
2572
2573         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2574          * jumbo frames transmission.
2575          */
2576         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2577                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2578                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2579                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2580         }
2581
2582         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2583                 /* adjust output voltage */
2584                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2585         }
2586
2587         tg3_phy_toggle_automdix(tp, 1);
2588         tg3_phy_set_wirespeed(tp);
2589         return 0;
2590 }
2591
2592 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2593 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2594 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2595                                           TG3_GPIO_MSG_NEED_VAUX)
2596 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2597         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2598          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2599          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2600          (TG3_GPIO_MSG_DRVR_PRES << 12))
2601
2602 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2603         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2604          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2605          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2606          (TG3_GPIO_MSG_NEED_VAUX << 12))
2607
2608 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2609 {
2610         u32 status, shift;
2611
2612         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2613             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2614                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2615         else
2616                 status = tr32(TG3_CPMU_DRV_STATUS);
2617
2618         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2619         status &= ~(TG3_GPIO_MSG_MASK << shift);
2620         status |= (newstat << shift);
2621
2622         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2623             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2624                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2625         else
2626                 tw32(TG3_CPMU_DRV_STATUS, status);
2627
2628         return status >> TG3_APE_GPIO_MSG_SHIFT;
2629 }
2630
2631 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2632 {
2633         if (!tg3_flag(tp, IS_NIC))
2634                 return 0;
2635
2636         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2637             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2638             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2639                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2640                         return -EIO;
2641
2642                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2643
2644                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2645                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2646
2647                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2648         } else {
2649                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2650                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2651         }
2652
2653         return 0;
2654 }
2655
2656 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2657 {
2658         u32 grc_local_ctrl;
2659
2660         if (!tg3_flag(tp, IS_NIC) ||
2661             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2662             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2663                 return;
2664
2665         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2666
2667         tw32_wait_f(GRC_LOCAL_CTRL,
2668                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2669                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2670
2671         tw32_wait_f(GRC_LOCAL_CTRL,
2672                     grc_local_ctrl,
2673                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2674
2675         tw32_wait_f(GRC_LOCAL_CTRL,
2676                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2677                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2678 }
2679
2680 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2681 {
2682         if (!tg3_flag(tp, IS_NIC))
2683                 return;
2684
2685         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2686             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2687                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2688                             (GRC_LCLCTRL_GPIO_OE0 |
2689                              GRC_LCLCTRL_GPIO_OE1 |
2690                              GRC_LCLCTRL_GPIO_OE2 |
2691                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2692                              GRC_LCLCTRL_GPIO_OUTPUT1),
2693                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2694         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2695                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2696                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2697                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2698                                      GRC_LCLCTRL_GPIO_OE1 |
2699                                      GRC_LCLCTRL_GPIO_OE2 |
2700                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2701                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2702                                      tp->grc_local_ctrl;
2703                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2704                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2705
2706                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2707                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2708                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2709
2710                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2711                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2712                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2713         } else {
2714                 u32 no_gpio2;
2715                 u32 grc_local_ctrl = 0;
2716
2717                 /* Workaround to prevent overdrawing Amps. */
2718                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2719                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2720                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2721                                     grc_local_ctrl,
2722                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2723                 }
2724
2725                 /* On 5753 and variants, GPIO2 cannot be used. */
2726                 no_gpio2 = tp->nic_sram_data_cfg &
2727                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2728
2729                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2730                                   GRC_LCLCTRL_GPIO_OE1 |
2731                                   GRC_LCLCTRL_GPIO_OE2 |
2732                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2733                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2734                 if (no_gpio2) {
2735                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2736                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2737                 }
2738                 tw32_wait_f(GRC_LOCAL_CTRL,
2739                             tp->grc_local_ctrl | grc_local_ctrl,
2740                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2741
2742                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2743
2744                 tw32_wait_f(GRC_LOCAL_CTRL,
2745                             tp->grc_local_ctrl | grc_local_ctrl,
2746                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2747
2748                 if (!no_gpio2) {
2749                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2750                         tw32_wait_f(GRC_LOCAL_CTRL,
2751                                     tp->grc_local_ctrl | grc_local_ctrl,
2752                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2753                 }
2754         }
2755 }
2756
2757 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2758 {
2759         u32 msg = 0;
2760
2761         /* Serialize power state transitions */
2762         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2763                 return;
2764
2765         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2766                 msg = TG3_GPIO_MSG_NEED_VAUX;
2767
2768         msg = tg3_set_function_status(tp, msg);
2769
2770         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2771                 goto done;
2772
2773         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2774                 tg3_pwrsrc_switch_to_vaux(tp);
2775         else
2776                 tg3_pwrsrc_die_with_vmain(tp);
2777
2778 done:
2779         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2780 }
2781
2782 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2783 {
2784         bool need_vaux = false;
2785
2786         /* The GPIOs do something completely different on 57765. */
2787         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2788                 return;
2789
2790         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2791             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2792             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2793                 tg3_frob_aux_power_5717(tp, include_wol ?
2794                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2795                 return;
2796         }
2797
2798         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2799                 struct net_device *dev_peer;
2800
2801                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2802
2803                 /* remove_one() may have been run on the peer. */
2804                 if (dev_peer) {
2805                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2806
2807                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2808                                 return;
2809
2810                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2811                             tg3_flag(tp_peer, ENABLE_ASF))
2812                                 need_vaux = true;
2813                 }
2814         }
2815
2816         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2817             tg3_flag(tp, ENABLE_ASF))
2818                 need_vaux = true;
2819
2820         if (need_vaux)
2821                 tg3_pwrsrc_switch_to_vaux(tp);
2822         else
2823                 tg3_pwrsrc_die_with_vmain(tp);
2824 }
2825
2826 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2827 {
2828         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2829                 return 1;
2830         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2831                 if (speed != SPEED_10)
2832                         return 1;
2833         } else if (speed == SPEED_10)
2834                 return 1;
2835
2836         return 0;
2837 }
2838
2839 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2840 {
2841         u32 val;
2842
2843         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2844                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2845                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2846                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2847
2848                         sg_dig_ctrl |=
2849                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2850                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2851                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2852                 }
2853                 return;
2854         }
2855
2856         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2857                 tg3_bmcr_reset(tp);
2858                 val = tr32(GRC_MISC_CFG);
2859                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2860                 udelay(40);
2861                 return;
2862         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2863                 u32 phytest;
2864                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2865                         u32 phy;
2866
2867                         tg3_writephy(tp, MII_ADVERTISE, 0);
2868                         tg3_writephy(tp, MII_BMCR,
2869                                      BMCR_ANENABLE | BMCR_ANRESTART);
2870
2871                         tg3_writephy(tp, MII_TG3_FET_TEST,
2872                                      phytest | MII_TG3_FET_SHADOW_EN);
2873                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2874                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2875                                 tg3_writephy(tp,
2876                                              MII_TG3_FET_SHDW_AUXMODE4,
2877                                              phy);
2878                         }
2879                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2880                 }
2881                 return;
2882         } else if (do_low_power) {
2883                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2884                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2885
2886                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2887                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2888                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2889                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2890         }
2891
2892         /* The PHY should not be powered down on some chips because
2893          * of bugs.
2894          */
2895         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2896             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2897             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2898              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2899             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2900              !tp->pci_fn))
2901                 return;
2902
2903         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2904             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2905                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2906                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2907                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2908                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2909         }
2910
2911         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2912 }
2913
2914 /* tp->lock is held. */
2915 static int tg3_nvram_lock(struct tg3 *tp)
2916 {
2917         if (tg3_flag(tp, NVRAM)) {
2918                 int i;
2919
2920                 if (tp->nvram_lock_cnt == 0) {
2921                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2922                         for (i = 0; i < 8000; i++) {
2923                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2924                                         break;
2925                                 udelay(20);
2926                         }
2927                         if (i == 8000) {
2928                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2929                                 return -ENODEV;
2930                         }
2931                 }
2932                 tp->nvram_lock_cnt++;
2933         }
2934         return 0;
2935 }
2936
2937 /* tp->lock is held. */
2938 static void tg3_nvram_unlock(struct tg3 *tp)
2939 {
2940         if (tg3_flag(tp, NVRAM)) {
2941                 if (tp->nvram_lock_cnt > 0)
2942                         tp->nvram_lock_cnt--;
2943                 if (tp->nvram_lock_cnt == 0)
2944                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2945         }
2946 }
2947
2948 /* tp->lock is held. */
2949 static void tg3_enable_nvram_access(struct tg3 *tp)
2950 {
2951         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2952                 u32 nvaccess = tr32(NVRAM_ACCESS);
2953
2954                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2955         }
2956 }
2957
2958 /* tp->lock is held. */
2959 static void tg3_disable_nvram_access(struct tg3 *tp)
2960 {
2961         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2962                 u32 nvaccess = tr32(NVRAM_ACCESS);
2963
2964                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2965         }
2966 }
2967
2968 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2969                                         u32 offset, u32 *val)
2970 {
2971         u32 tmp;
2972         int i;
2973
2974         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2975                 return -EINVAL;
2976
2977         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2978                                         EEPROM_ADDR_DEVID_MASK |
2979                                         EEPROM_ADDR_READ);
2980         tw32(GRC_EEPROM_ADDR,
2981              tmp |
2982              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2983              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2984               EEPROM_ADDR_ADDR_MASK) |
2985              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2986
2987         for (i = 0; i < 1000; i++) {
2988                 tmp = tr32(GRC_EEPROM_ADDR);
2989
2990                 if (tmp & EEPROM_ADDR_COMPLETE)
2991                         break;
2992                 msleep(1);
2993         }
2994         if (!(tmp & EEPROM_ADDR_COMPLETE))
2995                 return -EBUSY;
2996
2997         tmp = tr32(GRC_EEPROM_DATA);
2998
2999         /*
3000          * The data will always be opposite the native endian
3001          * format.  Perform a blind byteswap to compensate.
3002          */
3003         *val = swab32(tmp);
3004
3005         return 0;
3006 }
3007
3008 #define NVRAM_CMD_TIMEOUT 10000
3009
3010 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3011 {
3012         int i;
3013
3014         tw32(NVRAM_CMD, nvram_cmd);
3015         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3016                 udelay(10);
3017                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3018                         udelay(10);
3019                         break;
3020                 }
3021         }
3022
3023         if (i == NVRAM_CMD_TIMEOUT)
3024                 return -EBUSY;
3025
3026         return 0;
3027 }
3028
3029 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3030 {
3031         if (tg3_flag(tp, NVRAM) &&
3032             tg3_flag(tp, NVRAM_BUFFERED) &&
3033             tg3_flag(tp, FLASH) &&
3034             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3035             (tp->nvram_jedecnum == JEDEC_ATMEL))
3036
3037                 addr = ((addr / tp->nvram_pagesize) <<
3038                         ATMEL_AT45DB0X1B_PAGE_POS) +
3039                        (addr % tp->nvram_pagesize);
3040
3041         return addr;
3042 }
3043
3044 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3045 {
3046         if (tg3_flag(tp, NVRAM) &&
3047             tg3_flag(tp, NVRAM_BUFFERED) &&
3048             tg3_flag(tp, FLASH) &&
3049             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3050             (tp->nvram_jedecnum == JEDEC_ATMEL))
3051
3052                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3053                         tp->nvram_pagesize) +
3054                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3055
3056         return addr;
3057 }
3058
3059 /* NOTE: Data read in from NVRAM is byteswapped according to
3060  * the byteswapping settings for all other register accesses.
3061  * tg3 devices are BE devices, so on a BE machine, the data
3062  * returned will be exactly as it is seen in NVRAM.  On a LE
3063  * machine, the 32-bit value will be byteswapped.
3064  */
3065 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3066 {
3067         int ret;
3068
3069         if (!tg3_flag(tp, NVRAM))
3070                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3071
3072         offset = tg3_nvram_phys_addr(tp, offset);
3073
3074         if (offset > NVRAM_ADDR_MSK)
3075                 return -EINVAL;
3076
3077         ret = tg3_nvram_lock(tp);
3078         if (ret)
3079                 return ret;
3080
3081         tg3_enable_nvram_access(tp);
3082
3083         tw32(NVRAM_ADDR, offset);
3084         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3085                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3086
3087         if (ret == 0)
3088                 *val = tr32(NVRAM_RDDATA);
3089
3090         tg3_disable_nvram_access(tp);
3091
3092         tg3_nvram_unlock(tp);
3093
3094         return ret;
3095 }
3096
3097 /* Ensures NVRAM data is in bytestream format. */
3098 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3099 {
3100         u32 v;
3101         int res = tg3_nvram_read(tp, offset, &v);
3102         if (!res)
3103                 *val = cpu_to_be32(v);
3104         return res;
3105 }
3106
3107 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3108                                     u32 offset, u32 len, u8 *buf)
3109 {
3110         int i, j, rc = 0;
3111         u32 val;
3112
3113         for (i = 0; i < len; i += 4) {
3114                 u32 addr;
3115                 __be32 data;
3116
3117                 addr = offset + i;
3118
3119                 memcpy(&data, buf + i, 4);
3120
3121                 /*
3122                  * The SEEPROM interface expects the data to always be opposite
3123                  * the native endian format.  We accomplish this by reversing
3124                  * all the operations that would have been performed on the
3125                  * data from a call to tg3_nvram_read_be32().
3126                  */
3127                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3128
3129                 val = tr32(GRC_EEPROM_ADDR);
3130                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3131
3132                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3133                         EEPROM_ADDR_READ);
3134                 tw32(GRC_EEPROM_ADDR, val |
3135                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3136                         (addr & EEPROM_ADDR_ADDR_MASK) |
3137                         EEPROM_ADDR_START |
3138                         EEPROM_ADDR_WRITE);
3139
3140                 for (j = 0; j < 1000; j++) {
3141                         val = tr32(GRC_EEPROM_ADDR);
3142
3143                         if (val & EEPROM_ADDR_COMPLETE)
3144                                 break;
3145                         msleep(1);
3146                 }
3147                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3148                         rc = -EBUSY;
3149                         break;
3150                 }
3151         }
3152
3153         return rc;
3154 }
3155
3156 /* offset and length are dword aligned */
3157 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3158                 u8 *buf)
3159 {
3160         int ret = 0;
3161         u32 pagesize = tp->nvram_pagesize;
3162         u32 pagemask = pagesize - 1;
3163         u32 nvram_cmd;
3164         u8 *tmp;
3165
3166         tmp = kmalloc(pagesize, GFP_KERNEL);
3167         if (tmp == NULL)
3168                 return -ENOMEM;
3169
3170         while (len) {
3171                 int j;
3172                 u32 phy_addr, page_off, size;
3173
3174                 phy_addr = offset & ~pagemask;
3175
3176                 for (j = 0; j < pagesize; j += 4) {
3177                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3178                                                   (__be32 *) (tmp + j));
3179                         if (ret)
3180                                 break;
3181                 }
3182                 if (ret)
3183                         break;
3184
3185                 page_off = offset & pagemask;
3186                 size = pagesize;
3187                 if (len < size)
3188                         size = len;
3189
3190                 len -= size;
3191
3192                 memcpy(tmp + page_off, buf, size);
3193
3194                 offset = offset + (pagesize - page_off);
3195
3196                 tg3_enable_nvram_access(tp);
3197
3198                 /*
3199                  * Before we can erase the flash page, we need
3200                  * to issue a special "write enable" command.
3201                  */
3202                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3203
3204                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3205                         break;
3206
3207                 /* Erase the target page */
3208                 tw32(NVRAM_ADDR, phy_addr);
3209
3210                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3211                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3212
3213                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3214                         break;
3215
3216                 /* Issue another write enable to start the write. */
3217                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3218
3219                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3220                         break;
3221
3222                 for (j = 0; j < pagesize; j += 4) {
3223                         __be32 data;
3224
3225                         data = *((__be32 *) (tmp + j));
3226
3227                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3228
3229                         tw32(NVRAM_ADDR, phy_addr + j);
3230
3231                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3232                                 NVRAM_CMD_WR;
3233
3234                         if (j == 0)
3235                                 nvram_cmd |= NVRAM_CMD_FIRST;
3236                         else if (j == (pagesize - 4))
3237                                 nvram_cmd |= NVRAM_CMD_LAST;
3238
3239                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3240                         if (ret)
3241                                 break;
3242                 }
3243                 if (ret)
3244                         break;
3245         }
3246
3247         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3248         tg3_nvram_exec_cmd(tp, nvram_cmd);
3249
3250         kfree(tmp);
3251
3252         return ret;
3253 }
3254
3255 /* offset and length are dword aligned */
3256 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3257                 u8 *buf)
3258 {
3259         int i, ret = 0;
3260
3261         for (i = 0; i < len; i += 4, offset += 4) {
3262                 u32 page_off, phy_addr, nvram_cmd;
3263                 __be32 data;
3264
3265                 memcpy(&data, buf + i, 4);
3266                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3267
3268                 page_off = offset % tp->nvram_pagesize;
3269
3270                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3271
3272                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3273
3274                 if (page_off == 0 || i == 0)
3275                         nvram_cmd |= NVRAM_CMD_FIRST;
3276                 if (page_off == (tp->nvram_pagesize - 4))
3277                         nvram_cmd |= NVRAM_CMD_LAST;
3278
3279                 if (i == (len - 4))
3280                         nvram_cmd |= NVRAM_CMD_LAST;
3281
3282                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3283                     !tg3_flag(tp, FLASH) ||
3284                     !tg3_flag(tp, 57765_PLUS))
3285                         tw32(NVRAM_ADDR, phy_addr);
3286
3287                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3288                     !tg3_flag(tp, 5755_PLUS) &&
3289                     (tp->nvram_jedecnum == JEDEC_ST) &&
3290                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3291                         u32 cmd;
3292
3293                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3294                         ret = tg3_nvram_exec_cmd(tp, cmd);
3295                         if (ret)
3296                                 break;
3297                 }
3298                 if (!tg3_flag(tp, FLASH)) {
3299                         /* We always do complete word writes to eeprom. */
3300                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3301                 }
3302
3303                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3304                 if (ret)
3305                         break;
3306         }
3307         return ret;
3308 }
3309
3310 /* offset and length are dword aligned */
3311 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3312 {
3313         int ret;
3314
3315         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3316                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3317                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3318                 udelay(40);
3319         }
3320
3321         if (!tg3_flag(tp, NVRAM)) {
3322                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3323         } else {
3324                 u32 grc_mode;
3325
3326                 ret = tg3_nvram_lock(tp);
3327                 if (ret)
3328                         return ret;
3329
3330                 tg3_enable_nvram_access(tp);
3331                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3332                         tw32(NVRAM_WRITE1, 0x406);
3333
3334                 grc_mode = tr32(GRC_MODE);
3335                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3336
3337                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3338                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3339                                 buf);
3340                 } else {
3341                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3342                                 buf);
3343                 }
3344
3345                 grc_mode = tr32(GRC_MODE);
3346                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3347
3348                 tg3_disable_nvram_access(tp);
3349                 tg3_nvram_unlock(tp);
3350         }
3351
3352         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3353                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3354                 udelay(40);
3355         }
3356
3357         return ret;
3358 }
3359
3360 #define RX_CPU_SCRATCH_BASE     0x30000
3361 #define RX_CPU_SCRATCH_SIZE     0x04000
3362 #define TX_CPU_SCRATCH_BASE     0x34000
3363 #define TX_CPU_SCRATCH_SIZE     0x04000
3364
3365 /* tp->lock is held. */
3366 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3367 {
3368         int i;
3369
3370         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3371
3372         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3373                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3374
3375                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3376                 return 0;
3377         }
3378         if (offset == RX_CPU_BASE) {
3379                 for (i = 0; i < 10000; i++) {
3380                         tw32(offset + CPU_STATE, 0xffffffff);
3381                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3382                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3383                                 break;
3384                 }
3385
3386                 tw32(offset + CPU_STATE, 0xffffffff);
3387                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3388                 udelay(10);
3389         } else {
3390                 for (i = 0; i < 10000; i++) {
3391                         tw32(offset + CPU_STATE, 0xffffffff);
3392                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3393                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3394                                 break;
3395                 }
3396         }
3397
3398         if (i >= 10000) {
3399                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3400                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3401                 return -ENODEV;
3402         }
3403
3404         /* Clear firmware's nvram arbitration. */
3405         if (tg3_flag(tp, NVRAM))
3406                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3407         return 0;
3408 }
3409
3410 struct fw_info {
3411         unsigned int fw_base;
3412         unsigned int fw_len;
3413         const __be32 *fw_data;
3414 };
3415
3416 /* tp->lock is held. */
3417 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3418                                  u32 cpu_scratch_base, int cpu_scratch_size,
3419                                  struct fw_info *info)
3420 {
3421         int err, lock_err, i;
3422         void (*write_op)(struct tg3 *, u32, u32);
3423
3424         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3425                 netdev_err(tp->dev,
3426                            "%s: Trying to load TX cpu firmware which is 5705\n",
3427                            __func__);
3428                 return -EINVAL;
3429         }
3430
3431         if (tg3_flag(tp, 5705_PLUS))
3432                 write_op = tg3_write_mem;
3433         else
3434                 write_op = tg3_write_indirect_reg32;
3435
3436         /* It is possible that bootcode is still loading at this point.
3437          * Get the nvram lock first before halting the cpu.
3438          */
3439         lock_err = tg3_nvram_lock(tp);
3440         err = tg3_halt_cpu(tp, cpu_base);
3441         if (!lock_err)
3442                 tg3_nvram_unlock(tp);
3443         if (err)
3444                 goto out;
3445
3446         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3447                 write_op(tp, cpu_scratch_base + i, 0);
3448         tw32(cpu_base + CPU_STATE, 0xffffffff);
3449         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3450         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3451                 write_op(tp, (cpu_scratch_base +
3452                               (info->fw_base & 0xffff) +
3453                               (i * sizeof(u32))),
3454                               be32_to_cpu(info->fw_data[i]));
3455
3456         err = 0;
3457
3458 out:
3459         return err;
3460 }
3461
3462 /* tp->lock is held. */
3463 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3464 {
3465         struct fw_info info;
3466         const __be32 *fw_data;
3467         int err, i;
3468
3469         fw_data = (void *)tp->fw->data;
3470
3471         /* Firmware blob starts with version numbers, followed by
3472            start address and length. We are setting complete length.
3473            length = end_address_of_bss - start_address_of_text.
3474            Remainder is the blob to be loaded contiguously
3475            from start address. */
3476
3477         info.fw_base = be32_to_cpu(fw_data[1]);
3478         info.fw_len = tp->fw->size - 12;
3479         info.fw_data = &fw_data[3];
3480
3481         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3482                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3483                                     &info);
3484         if (err)
3485                 return err;
3486
3487         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3488                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3489                                     &info);
3490         if (err)
3491                 return err;
3492
3493         /* Now startup only the RX cpu. */
3494         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3495         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3496
3497         for (i = 0; i < 5; i++) {
3498                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3499                         break;
3500                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3501                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3502                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3503                 udelay(1000);
3504         }
3505         if (i >= 5) {
3506                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3507                            "should be %08x\n", __func__,
3508                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3509                 return -ENODEV;
3510         }
3511         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3512         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3513
3514         return 0;
3515 }
3516
3517 /* tp->lock is held. */
3518 static int tg3_load_tso_firmware(struct tg3 *tp)
3519 {
3520         struct fw_info info;
3521         const __be32 *fw_data;
3522         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3523         int err, i;
3524
3525         if (tg3_flag(tp, HW_TSO_1) ||
3526             tg3_flag(tp, HW_TSO_2) ||
3527             tg3_flag(tp, HW_TSO_3))
3528                 return 0;
3529
3530         fw_data = (void *)tp->fw->data;
3531
3532         /* Firmware blob starts with version numbers, followed by
3533            start address and length. We are setting complete length.
3534            length = end_address_of_bss - start_address_of_text.
3535            Remainder is the blob to be loaded contiguously
3536            from start address. */
3537
3538         info.fw_base = be32_to_cpu(fw_data[1]);
3539         cpu_scratch_size = tp->fw_len;
3540         info.fw_len = tp->fw->size - 12;
3541         info.fw_data = &fw_data[3];
3542
3543         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3544                 cpu_base = RX_CPU_BASE;
3545                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3546         } else {
3547                 cpu_base = TX_CPU_BASE;
3548                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3549                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3550         }
3551
3552         err = tg3_load_firmware_cpu(tp, cpu_base,
3553                                     cpu_scratch_base, cpu_scratch_size,
3554                                     &info);
3555         if (err)
3556                 return err;
3557
3558         /* Now startup the cpu. */
3559         tw32(cpu_base + CPU_STATE, 0xffffffff);
3560         tw32_f(cpu_base + CPU_PC, info.fw_base);
3561
3562         for (i = 0; i < 5; i++) {
3563                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3564                         break;
3565                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3566                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3567                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3568                 udelay(1000);
3569         }
3570         if (i >= 5) {
3571                 netdev_err(tp->dev,
3572                            "%s fails to set CPU PC, is %08x should be %08x\n",
3573                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3574                 return -ENODEV;
3575         }
3576         tw32(cpu_base + CPU_STATE, 0xffffffff);
3577         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3578         return 0;
3579 }
3580
3581
3582 /* tp->lock is held. */
3583 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3584 {
3585         u32 addr_high, addr_low;
3586         int i;
3587
3588         addr_high = ((tp->dev->dev_addr[0] << 8) |
3589                      tp->dev->dev_addr[1]);
3590         addr_low = ((tp->dev->dev_addr[2] << 24) |
3591                     (tp->dev->dev_addr[3] << 16) |
3592                     (tp->dev->dev_addr[4] <<  8) |
3593                     (tp->dev->dev_addr[5] <<  0));
3594         for (i = 0; i < 4; i++) {
3595                 if (i == 1 && skip_mac_1)
3596                         continue;
3597                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3598                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3599         }
3600
3601         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3602             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3603                 for (i = 0; i < 12; i++) {
3604                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3605                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3606                 }
3607         }
3608
3609         addr_high = (tp->dev->dev_addr[0] +
3610                      tp->dev->dev_addr[1] +
3611                      tp->dev->dev_addr[2] +
3612                      tp->dev->dev_addr[3] +
3613                      tp->dev->dev_addr[4] +
3614                      tp->dev->dev_addr[5]) &
3615                 TX_BACKOFF_SEED_MASK;
3616         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3617 }
3618
3619 static void tg3_enable_register_access(struct tg3 *tp)
3620 {
3621         /*
3622          * Make sure register accesses (indirect or otherwise) will function
3623          * correctly.
3624          */
3625         pci_write_config_dword(tp->pdev,
3626                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3627 }
3628
3629 static int tg3_power_up(struct tg3 *tp)
3630 {
3631         int err;
3632
3633         tg3_enable_register_access(tp);
3634
3635         err = pci_set_power_state(tp->pdev, PCI_D0);
3636         if (!err) {
3637                 /* Switch out of Vaux if it is a NIC */
3638                 tg3_pwrsrc_switch_to_vmain(tp);
3639         } else {
3640                 netdev_err(tp->dev, "Transition to D0 failed\n");
3641         }
3642
3643         return err;
3644 }
3645
3646 static int tg3_setup_phy(struct tg3 *, int);
3647
3648 static int tg3_power_down_prepare(struct tg3 *tp)
3649 {
3650         u32 misc_host_ctrl;
3651         bool device_should_wake, do_low_power;
3652
3653         tg3_enable_register_access(tp);
3654
3655         /* Restore the CLKREQ setting. */
3656         if (tg3_flag(tp, CLKREQ_BUG)) {
3657                 u16 lnkctl;
3658
3659                 pci_read_config_word(tp->pdev,
3660                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3661                                      &lnkctl);
3662                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3663                 pci_write_config_word(tp->pdev,
3664                                       pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3665                                       lnkctl);
3666         }
3667
3668         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3669         tw32(TG3PCI_MISC_HOST_CTRL,
3670              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3671
3672         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3673                              tg3_flag(tp, WOL_ENABLE);
3674
3675         if (tg3_flag(tp, USE_PHYLIB)) {
3676                 do_low_power = false;
3677                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3678                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3679                         struct phy_device *phydev;
3680                         u32 phyid, advertising;
3681
3682                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3683
3684                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3685
3686                         tp->link_config.speed = phydev->speed;
3687                         tp->link_config.duplex = phydev->duplex;
3688                         tp->link_config.autoneg = phydev->autoneg;
3689                         tp->link_config.advertising = phydev->advertising;
3690
3691                         advertising = ADVERTISED_TP |
3692                                       ADVERTISED_Pause |
3693                                       ADVERTISED_Autoneg |
3694                                       ADVERTISED_10baseT_Half;
3695
3696                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3697                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3698                                         advertising |=
3699                                                 ADVERTISED_100baseT_Half |
3700                                                 ADVERTISED_100baseT_Full |
3701                                                 ADVERTISED_10baseT_Full;
3702                                 else
3703                                         advertising |= ADVERTISED_10baseT_Full;
3704                         }
3705
3706                         phydev->advertising = advertising;
3707
3708                         phy_start_aneg(phydev);
3709
3710                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3711                         if (phyid != PHY_ID_BCMAC131) {
3712                                 phyid &= PHY_BCM_OUI_MASK;
3713                                 if (phyid == PHY_BCM_OUI_1 ||
3714                                     phyid == PHY_BCM_OUI_2 ||
3715                                     phyid == PHY_BCM_OUI_3)
3716                                         do_low_power = true;
3717                         }
3718                 }
3719         } else {
3720                 do_low_power = true;
3721
3722                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3723                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3724
3725                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3726                         tg3_setup_phy(tp, 0);
3727         }
3728
3729         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3730                 u32 val;
3731
3732                 val = tr32(GRC_VCPU_EXT_CTRL);
3733                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3734         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3735                 int i;
3736                 u32 val;
3737
3738                 for (i = 0; i < 200; i++) {
3739                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3740                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3741                                 break;
3742                         msleep(1);
3743                 }
3744         }
3745         if (tg3_flag(tp, WOL_CAP))
3746                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3747                                                      WOL_DRV_STATE_SHUTDOWN |
3748                                                      WOL_DRV_WOL |
3749                                                      WOL_SET_MAGIC_PKT);
3750
3751         if (device_should_wake) {
3752                 u32 mac_mode;
3753
3754                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3755                         if (do_low_power &&
3756                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3757                                 tg3_phy_auxctl_write(tp,
3758                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3759                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3760                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3761                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3762                                 udelay(40);
3763                         }
3764
3765                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3766                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3767                         else
3768                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3769
3770                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3771                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3772                             ASIC_REV_5700) {
3773                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3774                                              SPEED_100 : SPEED_10;
3775                                 if (tg3_5700_link_polarity(tp, speed))
3776                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3777                                 else
3778                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3779                         }
3780                 } else {
3781                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3782                 }
3783
3784                 if (!tg3_flag(tp, 5750_PLUS))
3785                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3786
3787                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3788                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3789                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3790                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3791
3792                 if (tg3_flag(tp, ENABLE_APE))
3793                         mac_mode |= MAC_MODE_APE_TX_EN |
3794                                     MAC_MODE_APE_RX_EN |
3795                                     MAC_MODE_TDE_ENABLE;
3796
3797                 tw32_f(MAC_MODE, mac_mode);
3798                 udelay(100);
3799
3800                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3801                 udelay(10);
3802         }
3803
3804         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3805             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3806              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3807                 u32 base_val;
3808
3809                 base_val = tp->pci_clock_ctrl;
3810                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3811                              CLOCK_CTRL_TXCLK_DISABLE);
3812
3813                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3814                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3815         } else if (tg3_flag(tp, 5780_CLASS) ||
3816                    tg3_flag(tp, CPMU_PRESENT) ||
3817                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3818                 /* do nothing */
3819         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3820                 u32 newbits1, newbits2;
3821
3822                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3823                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3824                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3825                                     CLOCK_CTRL_TXCLK_DISABLE |
3826                                     CLOCK_CTRL_ALTCLK);
3827                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3828                 } else if (tg3_flag(tp, 5705_PLUS)) {
3829                         newbits1 = CLOCK_CTRL_625_CORE;
3830                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3831                 } else {
3832                         newbits1 = CLOCK_CTRL_ALTCLK;
3833                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3834                 }
3835
3836                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3837                             40);
3838
3839                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3840                             40);
3841
3842                 if (!tg3_flag(tp, 5705_PLUS)) {
3843                         u32 newbits3;
3844
3845                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3846                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3847                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3848                                             CLOCK_CTRL_TXCLK_DISABLE |
3849                                             CLOCK_CTRL_44MHZ_CORE);
3850                         } else {
3851                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3852                         }
3853
3854                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3855                                     tp->pci_clock_ctrl | newbits3, 40);
3856                 }
3857         }
3858
3859         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3860                 tg3_power_down_phy(tp, do_low_power);
3861
3862         tg3_frob_aux_power(tp, true);
3863
3864         /* Workaround for unstable PLL clock */
3865         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3866             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3867                 u32 val = tr32(0x7d00);
3868
3869                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3870                 tw32(0x7d00, val);
3871                 if (!tg3_flag(tp, ENABLE_ASF)) {
3872                         int err;
3873
3874                         err = tg3_nvram_lock(tp);
3875                         tg3_halt_cpu(tp, RX_CPU_BASE);
3876                         if (!err)
3877                                 tg3_nvram_unlock(tp);
3878                 }
3879         }
3880
3881         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3882
3883         return 0;
3884 }
3885
3886 static void tg3_power_down(struct tg3 *tp)
3887 {
3888         tg3_power_down_prepare(tp);
3889
3890         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3891         pci_set_power_state(tp->pdev, PCI_D3hot);
3892 }
3893
3894 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3895 {
3896         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3897         case MII_TG3_AUX_STAT_10HALF:
3898                 *speed = SPEED_10;
3899                 *duplex = DUPLEX_HALF;
3900                 break;
3901
3902         case MII_TG3_AUX_STAT_10FULL:
3903                 *speed = SPEED_10;
3904                 *duplex = DUPLEX_FULL;
3905                 break;
3906
3907         case MII_TG3_AUX_STAT_100HALF:
3908                 *speed = SPEED_100;
3909                 *duplex = DUPLEX_HALF;
3910                 break;
3911
3912         case MII_TG3_AUX_STAT_100FULL:
3913                 *speed = SPEED_100;
3914                 *duplex = DUPLEX_FULL;
3915                 break;
3916
3917         case MII_TG3_AUX_STAT_1000HALF:
3918                 *speed = SPEED_1000;
3919                 *duplex = DUPLEX_HALF;
3920                 break;
3921
3922         case MII_TG3_AUX_STAT_1000FULL:
3923                 *speed = SPEED_1000;
3924                 *duplex = DUPLEX_FULL;
3925                 break;
3926
3927         default:
3928                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3929                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3930                                  SPEED_10;
3931                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3932                                   DUPLEX_HALF;
3933                         break;
3934                 }
3935                 *speed = SPEED_UNKNOWN;
3936                 *duplex = DUPLEX_UNKNOWN;
3937                 break;
3938         }
3939 }
3940
3941 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3942 {
3943         int err = 0;
3944         u32 val, new_adv;
3945
3946         new_adv = ADVERTISE_CSMA;
3947         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3948         new_adv |= mii_advertise_flowctrl(flowctrl);
3949
3950         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3951         if (err)
3952                 goto done;
3953
3954         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3955                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3956
3957                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3958                     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3959                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3960
3961                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3962                 if (err)
3963                         goto done;
3964         }
3965
3966         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3967                 goto done;
3968
3969         tw32(TG3_CPMU_EEE_MODE,
3970              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3971
3972         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3973         if (!err) {
3974                 u32 err2;
3975
3976                 val = 0;
3977                 /* Advertise 100-BaseTX EEE ability */
3978                 if (advertise & ADVERTISED_100baseT_Full)
3979                         val |= MDIO_AN_EEE_ADV_100TX;
3980                 /* Advertise 1000-BaseT EEE ability */
3981                 if (advertise & ADVERTISED_1000baseT_Full)
3982                         val |= MDIO_AN_EEE_ADV_1000T;
3983                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3984                 if (err)
3985                         val = 0;
3986
3987                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3988                 case ASIC_REV_5717:
3989                 case ASIC_REV_57765:
3990                 case ASIC_REV_57766:
3991                 case ASIC_REV_5719:
3992                         /* If we advertised any eee advertisements above... */
3993                         if (val)
3994                                 val = MII_TG3_DSP_TAP26_ALNOKO |
3995                                       MII_TG3_DSP_TAP26_RMRXSTO |
3996                                       MII_TG3_DSP_TAP26_OPCSINPT;
3997                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3998                         /* Fall through */
3999                 case ASIC_REV_5720:
4000                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4001                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4002                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4003                 }
4004
4005                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
4006                 if (!err)
4007                         err = err2;
4008         }
4009
4010 done:
4011         return err;
4012 }
4013
4014 static void tg3_phy_copper_begin(struct tg3 *tp)
4015 {
4016         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4017             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4018                 u32 adv, fc;
4019
4020                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4021                         adv = ADVERTISED_10baseT_Half |
4022                               ADVERTISED_10baseT_Full;
4023                         if (tg3_flag(tp, WOL_SPEED_100MB))
4024                                 adv |= ADVERTISED_100baseT_Half |
4025                                        ADVERTISED_100baseT_Full;
4026
4027                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4028                 } else {
4029                         adv = tp->link_config.advertising;
4030                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4031                                 adv &= ~(ADVERTISED_1000baseT_Half |
4032                                          ADVERTISED_1000baseT_Full);
4033
4034                         fc = tp->link_config.flowctrl;
4035                 }
4036
4037                 tg3_phy_autoneg_cfg(tp, adv, fc);
4038
4039                 tg3_writephy(tp, MII_BMCR,
4040                              BMCR_ANENABLE | BMCR_ANRESTART);
4041         } else {
4042                 int i;
4043                 u32 bmcr, orig_bmcr;
4044
4045                 tp->link_config.active_speed = tp->link_config.speed;
4046                 tp->link_config.active_duplex = tp->link_config.duplex;
4047
4048                 bmcr = 0;
4049                 switch (tp->link_config.speed) {
4050                 default:
4051                 case SPEED_10:
4052                         break;
4053
4054                 case SPEED_100:
4055                         bmcr |= BMCR_SPEED100;
4056                         break;
4057
4058                 case SPEED_1000:
4059                         bmcr |= BMCR_SPEED1000;
4060                         break;
4061                 }
4062
4063                 if (tp->link_config.duplex == DUPLEX_FULL)
4064                         bmcr |= BMCR_FULLDPLX;
4065
4066                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4067                     (bmcr != orig_bmcr)) {
4068                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4069                         for (i = 0; i < 1500; i++) {
4070                                 u32 tmp;
4071
4072                                 udelay(10);
4073                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4074                                     tg3_readphy(tp, MII_BMSR, &tmp))
4075                                         continue;
4076                                 if (!(tmp & BMSR_LSTATUS)) {
4077                                         udelay(40);
4078                                         break;
4079                                 }
4080                         }
4081                         tg3_writephy(tp, MII_BMCR, bmcr);
4082                         udelay(40);
4083                 }
4084         }
4085 }
4086
4087 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4088 {
4089         int err;
4090
4091         /* Turn off tap power management. */
4092         /* Set Extended packet length bit */
4093         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4094
4095         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4096         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4097         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4098         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4099         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4100
4101         udelay(40);
4102
4103         return err;
4104 }
4105
4106 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4107 {
4108         u32 advmsk, tgtadv, advertising;
4109
4110         advertising = tp->link_config.advertising;
4111         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4112
4113         advmsk = ADVERTISE_ALL;
4114         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4115                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4116                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4117         }
4118
4119         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4120                 return false;
4121
4122         if ((*lcladv & advmsk) != tgtadv)
4123                 return false;
4124
4125         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4126                 u32 tg3_ctrl;
4127
4128                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4129
4130                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4131                         return false;
4132
4133                 if (tgtadv &&
4134                     (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4135                      tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4136                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4137                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4138                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4139                 } else {
4140                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4141                 }
4142
4143                 if (tg3_ctrl != tgtadv)
4144                         return false;
4145         }
4146
4147         return true;
4148 }
4149
4150 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4151 {
4152         u32 lpeth = 0;
4153
4154         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4155                 u32 val;
4156
4157                 if (tg3_readphy(tp, MII_STAT1000, &val))
4158                         return false;
4159
4160                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4161         }
4162
4163         if (tg3_readphy(tp, MII_LPA, rmtadv))
4164                 return false;
4165
4166         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4167         tp->link_config.rmt_adv = lpeth;
4168
4169         return true;
4170 }
4171
4172 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4173 {
4174         int current_link_up;
4175         u32 bmsr, val;
4176         u32 lcl_adv, rmt_adv;
4177         u16 current_speed;
4178         u8 current_duplex;
4179         int i, err;
4180
4181         tw32(MAC_EVENT, 0);
4182
4183         tw32_f(MAC_STATUS,
4184              (MAC_STATUS_SYNC_CHANGED |
4185               MAC_STATUS_CFG_CHANGED |
4186               MAC_STATUS_MI_COMPLETION |
4187               MAC_STATUS_LNKSTATE_CHANGED));
4188         udelay(40);
4189
4190         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4191                 tw32_f(MAC_MI_MODE,
4192                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4193                 udelay(80);
4194         }
4195
4196         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4197
4198         /* Some third-party PHYs need to be reset on link going
4199          * down.
4200          */
4201         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4202              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4203              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4204             netif_carrier_ok(tp->dev)) {
4205                 tg3_readphy(tp, MII_BMSR, &bmsr);
4206                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4207                     !(bmsr & BMSR_LSTATUS))
4208                         force_reset = 1;
4209         }
4210         if (force_reset)
4211                 tg3_phy_reset(tp);
4212
4213         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4214                 tg3_readphy(tp, MII_BMSR, &bmsr);
4215                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4216                     !tg3_flag(tp, INIT_COMPLETE))
4217                         bmsr = 0;
4218
4219                 if (!(bmsr & BMSR_LSTATUS)) {
4220                         err = tg3_init_5401phy_dsp(tp);
4221                         if (err)
4222                                 return err;
4223
4224                         tg3_readphy(tp, MII_BMSR, &bmsr);
4225                         for (i = 0; i < 1000; i++) {
4226                                 udelay(10);
4227                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4228                                     (bmsr & BMSR_LSTATUS)) {
4229                                         udelay(40);
4230                                         break;
4231                                 }
4232                         }
4233
4234                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4235                             TG3_PHY_REV_BCM5401_B0 &&
4236                             !(bmsr & BMSR_LSTATUS) &&
4237                             tp->link_config.active_speed == SPEED_1000) {
4238                                 err = tg3_phy_reset(tp);
4239                                 if (!err)
4240                                         err = tg3_init_5401phy_dsp(tp);
4241                                 if (err)
4242                                         return err;
4243                         }
4244                 }
4245         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4246                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4247                 /* 5701 {A0,B0} CRC bug workaround */
4248                 tg3_writephy(tp, 0x15, 0x0a75);
4249                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4250                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4251                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4252         }
4253
4254         /* Clear pending interrupts... */
4255         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4256         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4257
4258         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4259                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4260         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4261                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4262
4263         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4264             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4265                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4266                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4267                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4268                 else
4269                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4270         }
4271
4272         current_link_up = 0;
4273         current_speed = SPEED_UNKNOWN;
4274         current_duplex = DUPLEX_UNKNOWN;
4275         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4276         tp->link_config.rmt_adv = 0;
4277
4278         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4279                 err = tg3_phy_auxctl_read(tp,
4280                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4281                                           &val);
4282                 if (!err && !(val & (1 << 10))) {
4283                         tg3_phy_auxctl_write(tp,
4284                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4285                                              val | (1 << 10));
4286                         goto relink;
4287                 }
4288         }
4289
4290         bmsr = 0;
4291         for (i = 0; i < 100; i++) {
4292                 tg3_readphy(tp, MII_BMSR, &bmsr);
4293                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4294                     (bmsr & BMSR_LSTATUS))
4295                         break;
4296                 udelay(40);
4297         }
4298
4299         if (bmsr & BMSR_LSTATUS) {
4300                 u32 aux_stat, bmcr;
4301
4302                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4303                 for (i = 0; i < 2000; i++) {
4304                         udelay(10);
4305                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4306                             aux_stat)
4307                                 break;
4308                 }
4309
4310                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4311                                              &current_speed,
4312                                              &current_duplex);
4313
4314                 bmcr = 0;
4315                 for (i = 0; i < 200; i++) {
4316                         tg3_readphy(tp, MII_BMCR, &bmcr);
4317                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4318                                 continue;
4319                         if (bmcr && bmcr != 0x7fff)
4320                                 break;
4321                         udelay(10);
4322                 }
4323
4324                 lcl_adv = 0;
4325                 rmt_adv = 0;
4326
4327                 tp->link_config.active_speed = current_speed;
4328                 tp->link_config.active_duplex = current_duplex;
4329
4330                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4331                         if ((bmcr & BMCR_ANENABLE) &&
4332                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4333                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4334                                 current_link_up = 1;
4335                 } else {
4336                         if (!(bmcr & BMCR_ANENABLE) &&
4337                             tp->link_config.speed == current_speed &&
4338                             tp->link_config.duplex == current_duplex &&
4339                             tp->link_config.flowctrl ==
4340                             tp->link_config.active_flowctrl) {
4341                                 current_link_up = 1;
4342                         }
4343                 }
4344
4345                 if (current_link_up == 1 &&
4346                     tp->link_config.active_duplex == DUPLEX_FULL) {
4347                         u32 reg, bit;
4348
4349                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4350                                 reg = MII_TG3_FET_GEN_STAT;
4351                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4352                         } else {
4353                                 reg = MII_TG3_EXT_STAT;
4354                                 bit = MII_TG3_EXT_STAT_MDIX;
4355                         }
4356
4357                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4358                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4359
4360                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4361                 }
4362         }
4363
4364 relink:
4365         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4366                 tg3_phy_copper_begin(tp);
4367
4368                 tg3_readphy(tp, MII_BMSR, &bmsr);
4369                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4370                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4371                         current_link_up = 1;
4372         }
4373
4374         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4375         if (current_link_up == 1) {
4376                 if (tp->link_config.active_speed == SPEED_100 ||
4377                     tp->link_config.active_speed == SPEED_10)
4378                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4379                 else
4380                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4381         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4382                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4383         else
4384                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4385
4386         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4387         if (tp->link_config.active_duplex == DUPLEX_HALF)
4388                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4389
4390         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4391                 if (current_link_up == 1 &&
4392                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4393                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4394                 else
4395                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4396         }
4397
4398         /* ??? Without this setting Netgear GA302T PHY does not
4399          * ??? send/receive packets...
4400          */
4401         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4402             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4403                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4404                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4405                 udelay(80);
4406         }
4407
4408         tw32_f(MAC_MODE, tp->mac_mode);
4409         udelay(40);
4410
4411         tg3_phy_eee_adjust(tp, current_link_up);
4412
4413         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4414                 /* Polled via timer. */
4415                 tw32_f(MAC_EVENT, 0);
4416         } else {
4417                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4418         }
4419         udelay(40);
4420
4421         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4422             current_link_up == 1 &&
4423             tp->link_config.active_speed == SPEED_1000 &&
4424             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4425                 udelay(120);
4426                 tw32_f(MAC_STATUS,
4427                      (MAC_STATUS_SYNC_CHANGED |
4428                       MAC_STATUS_CFG_CHANGED));
4429                 udelay(40);
4430                 tg3_write_mem(tp,
4431                               NIC_SRAM_FIRMWARE_MBOX,
4432                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4433         }
4434
4435         /* Prevent send BD corruption. */
4436         if (tg3_flag(tp, CLKREQ_BUG)) {
4437                 u16 oldlnkctl, newlnkctl;
4438
4439                 pci_read_config_word(tp->pdev,
4440                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4441                                      &oldlnkctl);
4442                 if (tp->link_config.active_speed == SPEED_100 ||
4443                     tp->link_config.active_speed == SPEED_10)
4444                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4445                 else
4446                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4447                 if (newlnkctl != oldlnkctl)
4448                         pci_write_config_word(tp->pdev,
4449                                               pci_pcie_cap(tp->pdev) +
4450                                               PCI_EXP_LNKCTL, newlnkctl);
4451         }
4452
4453         if (current_link_up != netif_carrier_ok(tp->dev)) {
4454                 if (current_link_up)
4455                         netif_carrier_on(tp->dev);
4456                 else
4457                         netif_carrier_off(tp->dev);
4458                 tg3_link_report(tp);
4459         }
4460
4461         return 0;
4462 }
4463
4464 struct tg3_fiber_aneginfo {
4465         int state;
4466 #define ANEG_STATE_UNKNOWN              0
4467 #define ANEG_STATE_AN_ENABLE            1
4468 #define ANEG_STATE_RESTART_INIT         2
4469 #define ANEG_STATE_RESTART              3
4470 #define ANEG_STATE_DISABLE_LINK_OK      4
4471 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4472 #define ANEG_STATE_ABILITY_DETECT       6
4473 #define ANEG_STATE_ACK_DETECT_INIT      7
4474 #define ANEG_STATE_ACK_DETECT           8
4475 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4476 #define ANEG_STATE_COMPLETE_ACK         10
4477 #define ANEG_STATE_IDLE_DETECT_INIT     11
4478 #define ANEG_STATE_IDLE_DETECT          12
4479 #define ANEG_STATE_LINK_OK              13
4480 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4481 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4482
4483         u32 flags;
4484 #define MR_AN_ENABLE            0x00000001
4485 #define MR_RESTART_AN           0x00000002
4486 #define MR_AN_COMPLETE          0x00000004
4487 #define MR_PAGE_RX              0x00000008
4488 #define MR_NP_LOADED            0x00000010
4489 #define MR_TOGGLE_TX            0x00000020
4490 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4491 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4492 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4493 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4494 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4495 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4496 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4497 #define MR_TOGGLE_RX            0x00002000
4498 #define MR_NP_RX                0x00004000
4499
4500 #define MR_LINK_OK              0x80000000
4501
4502         unsigned long link_time, cur_time;
4503
4504         u32 ability_match_cfg;
4505         int ability_match_count;
4506
4507         char ability_match, idle_match, ack_match;
4508
4509         u32 txconfig, rxconfig;
4510 #define ANEG_CFG_NP             0x00000080
4511 #define ANEG_CFG_ACK            0x00000040
4512 #define ANEG_CFG_RF2            0x00000020
4513 #define ANEG_CFG_RF1            0x00000010
4514 #define ANEG_CFG_PS2            0x00000001
4515 #define ANEG_CFG_PS1            0x00008000
4516 #define ANEG_CFG_HD             0x00004000
4517 #define ANEG_CFG_FD             0x00002000
4518 #define ANEG_CFG_INVAL          0x00001f06
4519
4520 };
4521 #define ANEG_OK         0
4522 #define ANEG_DONE       1
4523 #define ANEG_TIMER_ENAB 2
4524 #define ANEG_FAILED     -1
4525
4526 #define ANEG_STATE_SETTLE_TIME  10000
4527
4528 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4529                                    struct tg3_fiber_aneginfo *ap)
4530 {
4531         u16 flowctrl;
4532         unsigned long delta;
4533         u32 rx_cfg_reg;
4534         int ret;
4535
4536         if (ap->state == ANEG_STATE_UNKNOWN) {
4537                 ap->rxconfig = 0;
4538                 ap->link_time = 0;
4539                 ap->cur_time = 0;
4540                 ap->ability_match_cfg = 0;
4541                 ap->ability_match_count = 0;
4542                 ap->ability_match = 0;
4543                 ap->idle_match = 0;
4544                 ap->ack_match = 0;
4545         }
4546         ap->cur_time++;
4547
4548         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4549                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4550
4551                 if (rx_cfg_reg != ap->ability_match_cfg) {
4552                         ap->ability_match_cfg = rx_cfg_reg;
4553                         ap->ability_match = 0;
4554                         ap->ability_match_count = 0;
4555                 } else {
4556                         if (++ap->ability_match_count > 1) {
4557                                 ap->ability_match = 1;
4558                                 ap->ability_match_cfg = rx_cfg_reg;
4559                         }
4560                 }
4561                 if (rx_cfg_reg & ANEG_CFG_ACK)
4562                         ap->ack_match = 1;
4563                 else
4564                         ap->ack_match = 0;
4565
4566                 ap->idle_match = 0;
4567         } else {
4568                 ap->idle_match = 1;
4569                 ap->ability_match_cfg = 0;
4570                 ap->ability_match_count = 0;
4571                 ap->ability_match = 0;
4572                 ap->ack_match = 0;
4573
4574                 rx_cfg_reg = 0;
4575         }
4576
4577         ap->rxconfig = rx_cfg_reg;
4578         ret = ANEG_OK;
4579
4580         switch (ap->state) {
4581         case ANEG_STATE_UNKNOWN:
4582                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4583                         ap->state = ANEG_STATE_AN_ENABLE;
4584
4585                 /* fallthru */
4586         case ANEG_STATE_AN_ENABLE:
4587                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4588                 if (ap->flags & MR_AN_ENABLE) {
4589                         ap->link_time = 0;
4590                         ap->cur_time = 0;
4591                         ap->ability_match_cfg = 0;
4592                         ap->ability_match_count = 0;
4593                         ap->ability_match = 0;
4594                         ap->idle_match = 0;
4595                         ap->ack_match = 0;
4596
4597                         ap->state = ANEG_STATE_RESTART_INIT;
4598                 } else {
4599                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4600                 }
4601                 break;
4602
4603         case ANEG_STATE_RESTART_INIT:
4604                 ap->link_time = ap->cur_time;
4605                 ap->flags &= ~(MR_NP_LOADED);
4606                 ap->txconfig = 0;
4607                 tw32(MAC_TX_AUTO_NEG, 0);
4608                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4609                 tw32_f(MAC_MODE, tp->mac_mode);
4610                 udelay(40);
4611
4612                 ret = ANEG_TIMER_ENAB;
4613                 ap->state = ANEG_STATE_RESTART;
4614
4615                 /* fallthru */
4616         case ANEG_STATE_RESTART:
4617                 delta = ap->cur_time - ap->link_time;
4618                 if (delta > ANEG_STATE_SETTLE_TIME)
4619                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4620                 else
4621                         ret = ANEG_TIMER_ENAB;
4622                 break;
4623
4624         case ANEG_STATE_DISABLE_LINK_OK:
4625                 ret = ANEG_DONE;
4626                 break;
4627
4628         case ANEG_STATE_ABILITY_DETECT_INIT:
4629                 ap->flags &= ~(MR_TOGGLE_TX);
4630                 ap->txconfig = ANEG_CFG_FD;
4631                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4632                 if (flowctrl & ADVERTISE_1000XPAUSE)
4633                         ap->txconfig |= ANEG_CFG_PS1;
4634                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4635                         ap->txconfig |= ANEG_CFG_PS2;
4636                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4637                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4638                 tw32_f(MAC_MODE, tp->mac_mode);
4639                 udelay(40);
4640
4641                 ap->state = ANEG_STATE_ABILITY_DETECT;
4642                 break;
4643
4644         case ANEG_STATE_ABILITY_DETECT:
4645                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4646                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4647                 break;
4648
4649         case ANEG_STATE_ACK_DETECT_INIT:
4650                 ap->txconfig |= ANEG_CFG_ACK;
4651                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4652                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4653                 tw32_f(MAC_MODE, tp->mac_mode);
4654                 udelay(40);
4655
4656                 ap->state = ANEG_STATE_ACK_DETECT;
4657
4658                 /* fallthru */
4659         case ANEG_STATE_ACK_DETECT:
4660                 if (ap->ack_match != 0) {
4661                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4662                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4663                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4664                         } else {
4665                                 ap->state = ANEG_STATE_AN_ENABLE;
4666                         }
4667                 } else if (ap->ability_match != 0 &&
4668                            ap->rxconfig == 0) {
4669                         ap->state = ANEG_STATE_AN_ENABLE;
4670                 }
4671                 break;
4672
4673         case ANEG_STATE_COMPLETE_ACK_INIT:
4674                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4675                         ret = ANEG_FAILED;
4676                         break;
4677                 }
4678                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4679                                MR_LP_ADV_HALF_DUPLEX |
4680                                MR_LP_ADV_SYM_PAUSE |
4681                                MR_LP_ADV_ASYM_PAUSE |
4682                                MR_LP_ADV_REMOTE_FAULT1 |
4683                                MR_LP_ADV_REMOTE_FAULT2 |
4684                                MR_LP_ADV_NEXT_PAGE |
4685                                MR_TOGGLE_RX |
4686                                MR_NP_RX);
4687                 if (ap->rxconfig & ANEG_CFG_FD)
4688                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4689                 if (ap->rxconfig & ANEG_CFG_HD)
4690                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4691                 if (ap->rxconfig & ANEG_CFG_PS1)
4692                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4693                 if (ap->rxconfig & ANEG_CFG_PS2)
4694                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4695                 if (ap->rxconfig & ANEG_CFG_RF1)
4696                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4697                 if (ap->rxconfig & ANEG_CFG_RF2)
4698                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4699                 if (ap->rxconfig & ANEG_CFG_NP)
4700                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4701
4702                 ap->link_time = ap->cur_time;
4703
4704                 ap->flags ^= (MR_TOGGLE_TX);
4705                 if (ap->rxconfig & 0x0008)
4706                         ap->flags |= MR_TOGGLE_RX;
4707                 if (ap->rxconfig & ANEG_CFG_NP)
4708                         ap->flags |= MR_NP_RX;
4709                 ap->flags |= MR_PAGE_RX;
4710
4711                 ap->state = ANEG_STATE_COMPLETE_ACK;
4712                 ret = ANEG_TIMER_ENAB;
4713                 break;
4714
4715         case ANEG_STATE_COMPLETE_ACK:
4716                 if (ap->ability_match != 0 &&
4717                     ap->rxconfig == 0) {
4718                         ap->state = ANEG_STATE_AN_ENABLE;
4719                         break;
4720                 }
4721                 delta = ap->cur_time - ap->link_time;
4722                 if (delta > ANEG_STATE_SETTLE_TIME) {
4723                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4724                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4725                         } else {
4726                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4727                                     !(ap->flags & MR_NP_RX)) {
4728                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4729                                 } else {
4730                                         ret = ANEG_FAILED;
4731                                 }
4732                         }
4733                 }
4734                 break;
4735
4736         case ANEG_STATE_IDLE_DETECT_INIT:
4737                 ap->link_time = ap->cur_time;
4738                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4739                 tw32_f(MAC_MODE, tp->mac_mode);
4740                 udelay(40);
4741
4742                 ap->state = ANEG_STATE_IDLE_DETECT;
4743                 ret = ANEG_TIMER_ENAB;
4744                 break;
4745
4746         case ANEG_STATE_IDLE_DETECT:
4747                 if (ap->ability_match != 0 &&
4748                     ap->rxconfig == 0) {
4749                         ap->state = ANEG_STATE_AN_ENABLE;
4750                         break;
4751                 }
4752                 delta = ap->cur_time - ap->link_time;
4753                 if (delta > ANEG_STATE_SETTLE_TIME) {
4754                         /* XXX another gem from the Broadcom driver :( */
4755                         ap->state = ANEG_STATE_LINK_OK;
4756                 }
4757                 break;
4758
4759         case ANEG_STATE_LINK_OK:
4760                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4761                 ret = ANEG_DONE;
4762                 break;
4763
4764         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4765                 /* ??? unimplemented */
4766                 break;
4767
4768         case ANEG_STATE_NEXT_PAGE_WAIT:
4769                 /* ??? unimplemented */
4770                 break;
4771
4772         default:
4773                 ret = ANEG_FAILED;
4774                 break;
4775         }
4776
4777         return ret;
4778 }
4779
4780 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4781 {
4782         int res = 0;
4783         struct tg3_fiber_aneginfo aninfo;
4784         int status = ANEG_FAILED;
4785         unsigned int tick;
4786         u32 tmp;
4787
4788         tw32_f(MAC_TX_AUTO_NEG, 0);
4789
4790         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4791         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4792         udelay(40);
4793
4794         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4795         udelay(40);
4796
4797         memset(&aninfo, 0, sizeof(aninfo));
4798         aninfo.flags |= MR_AN_ENABLE;
4799         aninfo.state = ANEG_STATE_UNKNOWN;
4800         aninfo.cur_time = 0;
4801         tick = 0;
4802         while (++tick < 195000) {
4803                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4804                 if (status == ANEG_DONE || status == ANEG_FAILED)
4805                         break;
4806
4807                 udelay(1);
4808         }
4809
4810         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4811         tw32_f(MAC_MODE, tp->mac_mode);
4812         udelay(40);
4813
4814         *txflags = aninfo.txconfig;
4815         *rxflags = aninfo.flags;
4816
4817         if (status == ANEG_DONE &&
4818             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4819                              MR_LP_ADV_FULL_DUPLEX)))
4820                 res = 1;
4821
4822         return res;
4823 }
4824
4825 static void tg3_init_bcm8002(struct tg3 *tp)
4826 {
4827         u32 mac_status = tr32(MAC_STATUS);
4828         int i;
4829
4830         /* Reset when initting first time or we have a link. */
4831         if (tg3_flag(tp, INIT_COMPLETE) &&
4832             !(mac_status & MAC_STATUS_PCS_SYNCED))
4833                 return;
4834
4835         /* Set PLL lock range. */
4836         tg3_writephy(tp, 0x16, 0x8007);
4837
4838         /* SW reset */
4839         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4840
4841         /* Wait for reset to complete. */
4842         /* XXX schedule_timeout() ... */
4843         for (i = 0; i < 500; i++)
4844                 udelay(10);
4845
4846         /* Config mode; select PMA/Ch 1 regs. */
4847         tg3_writephy(tp, 0x10, 0x8411);
4848
4849         /* Enable auto-lock and comdet, select txclk for tx. */
4850         tg3_writephy(tp, 0x11, 0x0a10);
4851
4852         tg3_writephy(tp, 0x18, 0x00a0);
4853         tg3_writephy(tp, 0x16, 0x41ff);
4854
4855         /* Assert and deassert POR. */
4856         tg3_writephy(tp, 0x13, 0x0400);
4857         udelay(40);
4858         tg3_writephy(tp, 0x13, 0x0000);
4859
4860         tg3_writephy(tp, 0x11, 0x0a50);
4861         udelay(40);
4862         tg3_writephy(tp, 0x11, 0x0a10);
4863
4864         /* Wait for signal to stabilize */
4865         /* XXX schedule_timeout() ... */
4866         for (i = 0; i < 15000; i++)
4867                 udelay(10);
4868
4869         /* Deselect the channel register so we can read the PHYID
4870          * later.
4871          */
4872         tg3_writephy(tp, 0x10, 0x8011);
4873 }
4874
4875 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4876 {
4877         u16 flowctrl;
4878         u32 sg_dig_ctrl, sg_dig_status;
4879         u32 serdes_cfg, expected_sg_dig_ctrl;
4880         int workaround, port_a;
4881         int current_link_up;
4882
4883         serdes_cfg = 0;
4884         expected_sg_dig_ctrl = 0;
4885         workaround = 0;
4886         port_a = 1;
4887         current_link_up = 0;
4888
4889         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4890             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4891                 workaround = 1;
4892                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4893                         port_a = 0;
4894
4895                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4896                 /* preserve bits 20-23 for voltage regulator */
4897                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4898         }
4899
4900         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4901
4902         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4903                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4904                         if (workaround) {
4905                                 u32 val = serdes_cfg;
4906
4907                                 if (port_a)
4908                                         val |= 0xc010000;
4909                                 else
4910                                         val |= 0x4010000;
4911                                 tw32_f(MAC_SERDES_CFG, val);
4912                         }
4913
4914                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4915                 }
4916                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4917                         tg3_setup_flow_control(tp, 0, 0);
4918                         current_link_up = 1;
4919                 }
4920                 goto out;
4921         }
4922
4923         /* Want auto-negotiation.  */
4924         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4925
4926         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4927         if (flowctrl & ADVERTISE_1000XPAUSE)
4928                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4929         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4930                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4931
4932         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4933                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4934                     tp->serdes_counter &&
4935                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4936                                     MAC_STATUS_RCVD_CFG)) ==
4937                      MAC_STATUS_PCS_SYNCED)) {
4938                         tp->serdes_counter--;
4939                         current_link_up = 1;
4940                         goto out;
4941                 }
4942 restart_autoneg:
4943                 if (workaround)
4944                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4945                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4946                 udelay(5);
4947                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4948
4949                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4950                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4951         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4952                                  MAC_STATUS_SIGNAL_DET)) {
4953                 sg_dig_status = tr32(SG_DIG_STATUS);
4954                 mac_status = tr32(MAC_STATUS);
4955
4956                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4957                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4958                         u32 local_adv = 0, remote_adv = 0;
4959
4960                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4961                                 local_adv |= ADVERTISE_1000XPAUSE;
4962                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4963                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4964
4965                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4966                                 remote_adv |= LPA_1000XPAUSE;
4967                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4968                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4969
4970                         tp->link_config.rmt_adv =
4971                                            mii_adv_to_ethtool_adv_x(remote_adv);
4972
4973                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4974                         current_link_up = 1;
4975                         tp->serdes_counter = 0;
4976                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4977                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4978                         if (tp->serdes_counter)
4979                                 tp->serdes_counter--;
4980                         else {
4981                                 if (workaround) {
4982                                         u32 val = serdes_cfg;
4983
4984                                         if (port_a)
4985                                                 val |= 0xc010000;
4986                                         else
4987                                                 val |= 0x4010000;
4988
4989                                         tw32_f(MAC_SERDES_CFG, val);
4990                                 }
4991
4992                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4993                                 udelay(40);
4994
4995                                 /* Link parallel detection - link is up */
4996                                 /* only if we have PCS_SYNC and not */
4997                                 /* receiving config code words */
4998                                 mac_status = tr32(MAC_STATUS);
4999                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5000                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5001                                         tg3_setup_flow_control(tp, 0, 0);
5002                                         current_link_up = 1;
5003                                         tp->phy_flags |=
5004                                                 TG3_PHYFLG_PARALLEL_DETECT;
5005                                         tp->serdes_counter =
5006                                                 SERDES_PARALLEL_DET_TIMEOUT;
5007                                 } else
5008                                         goto restart_autoneg;
5009                         }
5010                 }
5011         } else {
5012                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5013                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5014         }
5015
5016 out:
5017         return current_link_up;
5018 }
5019
5020 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5021 {
5022         int current_link_up = 0;
5023
5024         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5025                 goto out;
5026
5027         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5028                 u32 txflags, rxflags;
5029                 int i;
5030
5031                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5032                         u32 local_adv = 0, remote_adv = 0;
5033
5034                         if (txflags & ANEG_CFG_PS1)
5035                                 local_adv |= ADVERTISE_1000XPAUSE;
5036                         if (txflags & ANEG_CFG_PS2)
5037                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5038
5039                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5040                                 remote_adv |= LPA_1000XPAUSE;
5041                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5042                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5043
5044                         tp->link_config.rmt_adv =
5045                                            mii_adv_to_ethtool_adv_x(remote_adv);
5046
5047                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5048
5049                         current_link_up = 1;
5050                 }
5051                 for (i = 0; i < 30; i++) {
5052                         udelay(20);
5053                         tw32_f(MAC_STATUS,
5054                                (MAC_STATUS_SYNC_CHANGED |
5055                                 MAC_STATUS_CFG_CHANGED));
5056                         udelay(40);
5057                         if ((tr32(MAC_STATUS) &
5058                              (MAC_STATUS_SYNC_CHANGED |
5059                               MAC_STATUS_CFG_CHANGED)) == 0)
5060                                 break;
5061                 }
5062
5063                 mac_status = tr32(MAC_STATUS);
5064                 if (current_link_up == 0 &&
5065                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5066                     !(mac_status & MAC_STATUS_RCVD_CFG))
5067                         current_link_up = 1;
5068         } else {
5069                 tg3_setup_flow_control(tp, 0, 0);
5070
5071                 /* Forcing 1000FD link up. */
5072                 current_link_up = 1;
5073
5074                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5075                 udelay(40);
5076
5077                 tw32_f(MAC_MODE, tp->mac_mode);
5078                 udelay(40);
5079         }
5080
5081 out:
5082         return current_link_up;
5083 }
5084
5085 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5086 {
5087         u32 orig_pause_cfg;
5088         u16 orig_active_speed;
5089         u8 orig_active_duplex;
5090         u32 mac_status;
5091         int current_link_up;
5092         int i;
5093
5094         orig_pause_cfg = tp->link_config.active_flowctrl;
5095         orig_active_speed = tp->link_config.active_speed;
5096         orig_active_duplex = tp->link_config.active_duplex;
5097
5098         if (!tg3_flag(tp, HW_AUTONEG) &&
5099             netif_carrier_ok(tp->dev) &&
5100             tg3_flag(tp, INIT_COMPLETE)) {
5101                 mac_status = tr32(MAC_STATUS);
5102                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5103                                MAC_STATUS_SIGNAL_DET |
5104                                MAC_STATUS_CFG_CHANGED |
5105                                MAC_STATUS_RCVD_CFG);
5106                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5107                                    MAC_STATUS_SIGNAL_DET)) {
5108                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5109                                             MAC_STATUS_CFG_CHANGED));
5110                         return 0;
5111                 }
5112         }
5113
5114         tw32_f(MAC_TX_AUTO_NEG, 0);
5115
5116         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5117         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5118         tw32_f(MAC_MODE, tp->mac_mode);
5119         udelay(40);
5120
5121         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5122                 tg3_init_bcm8002(tp);
5123
5124         /* Enable link change event even when serdes polling.  */
5125         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5126         udelay(40);
5127
5128         current_link_up = 0;
5129         tp->link_config.rmt_adv = 0;
5130         mac_status = tr32(MAC_STATUS);
5131
5132         if (tg3_flag(tp, HW_AUTONEG))
5133                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5134         else
5135                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5136
5137         tp->napi[0].hw_status->status =
5138                 (SD_STATUS_UPDATED |
5139                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5140
5141         for (i = 0; i < 100; i++) {
5142                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5143                                     MAC_STATUS_CFG_CHANGED));
5144                 udelay(5);
5145                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5146                                          MAC_STATUS_CFG_CHANGED |
5147                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5148                         break;
5149         }
5150
5151         mac_status = tr32(MAC_STATUS);
5152         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5153                 current_link_up = 0;
5154                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5155                     tp->serdes_counter == 0) {
5156                         tw32_f(MAC_MODE, (tp->mac_mode |
5157                                           MAC_MODE_SEND_CONFIGS));
5158                         udelay(1);
5159                         tw32_f(MAC_MODE, tp->mac_mode);
5160                 }
5161         }
5162
5163         if (current_link_up == 1) {
5164                 tp->link_config.active_speed = SPEED_1000;
5165                 tp->link_config.active_duplex = DUPLEX_FULL;
5166                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5167                                     LED_CTRL_LNKLED_OVERRIDE |
5168                                     LED_CTRL_1000MBPS_ON));
5169         } else {
5170                 tp->link_config.active_speed = SPEED_UNKNOWN;
5171                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5172                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5173                                     LED_CTRL_LNKLED_OVERRIDE |
5174                                     LED_CTRL_TRAFFIC_OVERRIDE));
5175         }
5176
5177         if (current_link_up != netif_carrier_ok(tp->dev)) {
5178                 if (current_link_up)
5179                         netif_carrier_on(tp->dev);
5180                 else
5181                         netif_carrier_off(tp->dev);
5182                 tg3_link_report(tp);
5183         } else {
5184                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5185                 if (orig_pause_cfg != now_pause_cfg ||
5186                     orig_active_speed != tp->link_config.active_speed ||
5187                     orig_active_duplex != tp->link_config.active_duplex)
5188                         tg3_link_report(tp);
5189         }
5190
5191         return 0;
5192 }
5193
5194 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5195 {
5196         int current_link_up, err = 0;
5197         u32 bmsr, bmcr;
5198         u16 current_speed;
5199         u8 current_duplex;
5200         u32 local_adv, remote_adv;
5201
5202         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5203         tw32_f(MAC_MODE, tp->mac_mode);
5204         udelay(40);
5205
5206         tw32(MAC_EVENT, 0);
5207
5208         tw32_f(MAC_STATUS,
5209              (MAC_STATUS_SYNC_CHANGED |
5210               MAC_STATUS_CFG_CHANGED |
5211               MAC_STATUS_MI_COMPLETION |
5212               MAC_STATUS_LNKSTATE_CHANGED));
5213         udelay(40);
5214
5215         if (force_reset)
5216                 tg3_phy_reset(tp);
5217
5218         current_link_up = 0;
5219         current_speed = SPEED_UNKNOWN;
5220         current_duplex = DUPLEX_UNKNOWN;
5221         tp->link_config.rmt_adv = 0;
5222
5223         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5224         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5225         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5226                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5227                         bmsr |= BMSR_LSTATUS;
5228                 else
5229                         bmsr &= ~BMSR_LSTATUS;
5230         }
5231
5232         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5233
5234         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5235             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5236                 /* do nothing, just check for link up at the end */
5237         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5238                 u32 adv, newadv;
5239
5240                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5241                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5242                                  ADVERTISE_1000XPAUSE |
5243                                  ADVERTISE_1000XPSE_ASYM |
5244                                  ADVERTISE_SLCT);
5245
5246                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5247                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5248
5249                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5250                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5251                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5252                         tg3_writephy(tp, MII_BMCR, bmcr);
5253
5254                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5255                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5256                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5257
5258                         return err;
5259                 }
5260         } else {
5261                 u32 new_bmcr;
5262
5263                 bmcr &= ~BMCR_SPEED1000;
5264                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5265
5266                 if (tp->link_config.duplex == DUPLEX_FULL)
5267                         new_bmcr |= BMCR_FULLDPLX;
5268
5269                 if (new_bmcr != bmcr) {
5270                         /* BMCR_SPEED1000 is a reserved bit that needs
5271                          * to be set on write.
5272                          */
5273                         new_bmcr |= BMCR_SPEED1000;
5274
5275                         /* Force a linkdown */
5276                         if (netif_carrier_ok(tp->dev)) {
5277                                 u32 adv;
5278
5279                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5280                                 adv &= ~(ADVERTISE_1000XFULL |
5281                                          ADVERTISE_1000XHALF |
5282                                          ADVERTISE_SLCT);
5283                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5284                                 tg3_writephy(tp, MII_BMCR, bmcr |
5285                                                            BMCR_ANRESTART |
5286                                                            BMCR_ANENABLE);
5287                                 udelay(10);
5288                                 netif_carrier_off(tp->dev);
5289                         }
5290                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5291                         bmcr = new_bmcr;
5292                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5293                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5294                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5295                             ASIC_REV_5714) {
5296                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5297                                         bmsr |= BMSR_LSTATUS;
5298                                 else
5299                                         bmsr &= ~BMSR_LSTATUS;
5300                         }
5301                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5302                 }
5303         }
5304
5305         if (bmsr & BMSR_LSTATUS) {
5306                 current_speed = SPEED_1000;
5307                 current_link_up = 1;
5308                 if (bmcr & BMCR_FULLDPLX)
5309                         current_duplex = DUPLEX_FULL;
5310                 else
5311                         current_duplex = DUPLEX_HALF;
5312
5313                 local_adv = 0;
5314                 remote_adv = 0;
5315
5316                 if (bmcr & BMCR_ANENABLE) {
5317                         u32 common;
5318
5319                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5320                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5321                         common = local_adv & remote_adv;
5322                         if (common & (ADVERTISE_1000XHALF |
5323                                       ADVERTISE_1000XFULL)) {
5324                                 if (common & ADVERTISE_1000XFULL)
5325                                         current_duplex = DUPLEX_FULL;
5326                                 else
5327                                         current_duplex = DUPLEX_HALF;
5328
5329                                 tp->link_config.rmt_adv =
5330                                            mii_adv_to_ethtool_adv_x(remote_adv);
5331                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5332                                 /* Link is up via parallel detect */
5333                         } else {
5334                                 current_link_up = 0;
5335                         }
5336                 }
5337         }
5338
5339         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5340                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5341
5342         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5343         if (tp->link_config.active_duplex == DUPLEX_HALF)
5344                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5345
5346         tw32_f(MAC_MODE, tp->mac_mode);
5347         udelay(40);
5348
5349         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5350
5351         tp->link_config.active_speed = current_speed;
5352         tp->link_config.active_duplex = current_duplex;
5353
5354         if (current_link_up != netif_carrier_ok(tp->dev)) {
5355                 if (current_link_up)
5356                         netif_carrier_on(tp->dev);
5357                 else {
5358                         netif_carrier_off(tp->dev);
5359                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5360                 }
5361                 tg3_link_report(tp);
5362         }
5363         return err;
5364 }
5365
5366 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5367 {
5368         if (tp->serdes_counter) {
5369                 /* Give autoneg time to complete. */
5370                 tp->serdes_counter--;
5371                 return;
5372         }
5373
5374         if (!netif_carrier_ok(tp->dev) &&
5375             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5376                 u32 bmcr;
5377
5378                 tg3_readphy(tp, MII_BMCR, &bmcr);
5379                 if (bmcr & BMCR_ANENABLE) {
5380                         u32 phy1, phy2;
5381
5382                         /* Select shadow register 0x1f */
5383                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5384                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5385
5386                         /* Select expansion interrupt status register */
5387                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5388                                          MII_TG3_DSP_EXP1_INT_STAT);
5389                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5390                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5391
5392                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5393                                 /* We have signal detect and not receiving
5394                                  * config code words, link is up by parallel
5395                                  * detection.
5396                                  */
5397
5398                                 bmcr &= ~BMCR_ANENABLE;
5399                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5400                                 tg3_writephy(tp, MII_BMCR, bmcr);
5401                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5402                         }
5403                 }
5404         } else if (netif_carrier_ok(tp->dev) &&
5405                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5406                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5407                 u32 phy2;
5408
5409                 /* Select expansion interrupt status register */
5410                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5411                                  MII_TG3_DSP_EXP1_INT_STAT);
5412                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5413                 if (phy2 & 0x20) {
5414                         u32 bmcr;
5415
5416                         /* Config code words received, turn on autoneg. */
5417                         tg3_readphy(tp, MII_BMCR, &bmcr);
5418                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5419
5420                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5421
5422                 }
5423         }
5424 }
5425
5426 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5427 {
5428         u32 val;
5429         int err;
5430
5431         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5432                 err = tg3_setup_fiber_phy(tp, force_reset);
5433         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5434                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5435         else
5436                 err = tg3_setup_copper_phy(tp, force_reset);
5437
5438         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5439                 u32 scale;
5440
5441                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5442                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5443                         scale = 65;
5444                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5445                         scale = 6;
5446                 else
5447                         scale = 12;
5448
5449                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5450                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5451                 tw32(GRC_MISC_CFG, val);
5452         }
5453
5454         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5455               (6 << TX_LENGTHS_IPG_SHIFT);
5456         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5457                 val |= tr32(MAC_TX_LENGTHS) &
5458                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5459                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5460
5461         if (tp->link_config.active_speed == SPEED_1000 &&
5462             tp->link_config.active_duplex == DUPLEX_HALF)
5463                 tw32(MAC_TX_LENGTHS, val |
5464                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5465         else
5466                 tw32(MAC_TX_LENGTHS, val |
5467                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5468
5469         if (!tg3_flag(tp, 5705_PLUS)) {
5470                 if (netif_carrier_ok(tp->dev)) {
5471                         tw32(HOSTCC_STAT_COAL_TICKS,
5472                              tp->coal.stats_block_coalesce_usecs);
5473                 } else {
5474                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5475                 }
5476         }
5477
5478         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5479                 val = tr32(PCIE_PWR_MGMT_THRESH);
5480                 if (!netif_carrier_ok(tp->dev))
5481                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5482                               tp->pwrmgmt_thresh;
5483                 else
5484                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5485                 tw32(PCIE_PWR_MGMT_THRESH, val);
5486         }
5487
5488         return err;
5489 }
5490
5491 static inline int tg3_irq_sync(struct tg3 *tp)
5492 {
5493         return tp->irq_sync;
5494 }
5495
5496 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5497 {
5498         int i;
5499
5500         dst = (u32 *)((u8 *)dst + off);
5501         for (i = 0; i < len; i += sizeof(u32))
5502                 *dst++ = tr32(off + i);
5503 }
5504
5505 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5506 {
5507         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5508         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5509         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5510         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5511         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5512         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5513         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5514         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5515         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5516         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5517         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5518         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5519         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5520         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5521         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5522         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5523         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5524         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5525         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5526
5527         if (tg3_flag(tp, SUPPORT_MSIX))
5528                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5529
5530         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5531         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5532         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5533         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5534         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5535         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5536         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5537         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5538
5539         if (!tg3_flag(tp, 5705_PLUS)) {
5540                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5541                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5542                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5543         }
5544
5545         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5546         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5547         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5548         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5549         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5550
5551         if (tg3_flag(tp, NVRAM))
5552                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5553 }
5554
5555 static void tg3_dump_state(struct tg3 *tp)
5556 {
5557         int i;
5558         u32 *regs;
5559
5560         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5561         if (!regs) {
5562                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5563                 return;
5564         }
5565
5566         if (tg3_flag(tp, PCI_EXPRESS)) {
5567                 /* Read up to but not including private PCI registers */
5568                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5569                         regs[i / sizeof(u32)] = tr32(i);
5570         } else
5571                 tg3_dump_legacy_regs(tp, regs);
5572
5573         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5574                 if (!regs[i + 0] && !regs[i + 1] &&
5575                     !regs[i + 2] && !regs[i + 3])
5576                         continue;
5577
5578                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5579                            i * 4,
5580                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5581         }
5582
5583         kfree(regs);
5584
5585         for (i = 0; i < tp->irq_cnt; i++) {
5586                 struct tg3_napi *tnapi = &tp->napi[i];
5587
5588                 /* SW status block */
5589                 netdev_err(tp->dev,
5590                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5591                            i,
5592                            tnapi->hw_status->status,
5593                            tnapi->hw_status->status_tag,
5594                            tnapi->hw_status->rx_jumbo_consumer,
5595                            tnapi->hw_status->rx_consumer,
5596                            tnapi->hw_status->rx_mini_consumer,
5597                            tnapi->hw_status->idx[0].rx_producer,
5598                            tnapi->hw_status->idx[0].tx_consumer);
5599
5600                 netdev_err(tp->dev,
5601                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5602                            i,
5603                            tnapi->last_tag, tnapi->last_irq_tag,
5604                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5605                            tnapi->rx_rcb_ptr,
5606                            tnapi->prodring.rx_std_prod_idx,
5607                            tnapi->prodring.rx_std_cons_idx,
5608                            tnapi->prodring.rx_jmb_prod_idx,
5609                            tnapi->prodring.rx_jmb_cons_idx);
5610         }
5611 }
5612
5613 /* This is called whenever we suspect that the system chipset is re-
5614  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5615  * is bogus tx completions. We try to recover by setting the
5616  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5617  * in the workqueue.
5618  */
5619 static void tg3_tx_recover(struct tg3 *tp)
5620 {
5621         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5622                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5623
5624         netdev_warn(tp->dev,
5625                     "The system may be re-ordering memory-mapped I/O "
5626                     "cycles to the network device, attempting to recover. "
5627                     "Please report the problem to the driver maintainer "
5628                     "and include system chipset information.\n");
5629
5630         spin_lock(&tp->lock);
5631         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5632         spin_unlock(&tp->lock);
5633 }
5634
5635 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5636 {
5637         /* Tell compiler to fetch tx indices from memory. */
5638         barrier();
5639         return tnapi->tx_pending -
5640                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5641 }
5642
5643 /* Tigon3 never reports partial packet sends.  So we do not
5644  * need special logic to handle SKBs that have not had all
5645  * of their frags sent yet, like SunGEM does.
5646  */
5647 static void tg3_tx(struct tg3_napi *tnapi)
5648 {
5649         struct tg3 *tp = tnapi->tp;
5650         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5651         u32 sw_idx = tnapi->tx_cons;
5652         struct netdev_queue *txq;
5653         int index = tnapi - tp->napi;
5654         unsigned int pkts_compl = 0, bytes_compl = 0;
5655
5656         if (tg3_flag(tp, ENABLE_TSS))
5657                 index--;
5658
5659         txq = netdev_get_tx_queue(tp->dev, index);
5660
5661         while (sw_idx != hw_idx) {
5662                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5663                 struct sk_buff *skb = ri->skb;
5664                 int i, tx_bug = 0;
5665
5666                 if (unlikely(skb == NULL)) {
5667                         tg3_tx_recover(tp);
5668                         return;
5669                 }
5670
5671                 pci_unmap_single(tp->pdev,
5672                                  dma_unmap_addr(ri, mapping),
5673                                  skb_headlen(skb),
5674                                  PCI_DMA_TODEVICE);
5675
5676                 ri->skb = NULL;
5677
5678                 while (ri->fragmented) {
5679                         ri->fragmented = false;
5680                         sw_idx = NEXT_TX(sw_idx);
5681                         ri = &tnapi->tx_buffers[sw_idx];
5682                 }
5683
5684                 sw_idx = NEXT_TX(sw_idx);
5685
5686                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5687                         ri = &tnapi->tx_buffers[sw_idx];
5688                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5689                                 tx_bug = 1;
5690
5691                         pci_unmap_page(tp->pdev,
5692                                        dma_unmap_addr(ri, mapping),
5693                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5694                                        PCI_DMA_TODEVICE);
5695
5696                         while (ri->fragmented) {
5697                                 ri->fragmented = false;
5698                                 sw_idx = NEXT_TX(sw_idx);
5699                                 ri = &tnapi->tx_buffers[sw_idx];
5700                         }
5701
5702                         sw_idx = NEXT_TX(sw_idx);
5703                 }
5704
5705                 pkts_compl++;
5706                 bytes_compl += skb->len;
5707
5708                 dev_kfree_skb(skb);
5709
5710                 if (unlikely(tx_bug)) {
5711                         tg3_tx_recover(tp);
5712                         return;
5713                 }
5714         }
5715
5716         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
5717
5718         tnapi->tx_cons = sw_idx;
5719
5720         /* Need to make the tx_cons update visible to tg3_start_xmit()
5721          * before checking for netif_queue_stopped().  Without the
5722          * memory barrier, there is a small possibility that tg3_start_xmit()
5723          * will miss it and cause the queue to be stopped forever.
5724          */
5725         smp_mb();
5726
5727         if (unlikely(netif_tx_queue_stopped(txq) &&
5728                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5729                 __netif_tx_lock(txq, smp_processor_id());
5730                 if (netif_tx_queue_stopped(txq) &&
5731                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5732                         netif_tx_wake_queue(txq);
5733                 __netif_tx_unlock(txq);
5734         }
5735 }
5736
5737 static void tg3_frag_free(bool is_frag, void *data)
5738 {
5739         if (is_frag)
5740                 put_page(virt_to_head_page(data));
5741         else
5742                 kfree(data);
5743 }
5744
5745 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5746 {
5747         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
5748                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5749
5750         if (!ri->data)
5751                 return;
5752
5753         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5754                          map_sz, PCI_DMA_FROMDEVICE);
5755         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
5756         ri->data = NULL;
5757 }
5758
5759
5760 /* Returns size of skb allocated or < 0 on error.
5761  *
5762  * We only need to fill in the address because the other members
5763  * of the RX descriptor are invariant, see tg3_init_rings.
5764  *
5765  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5766  * posting buffers we only dirty the first cache line of the RX
5767  * descriptor (containing the address).  Whereas for the RX status
5768  * buffers the cpu only reads the last cacheline of the RX descriptor
5769  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5770  */
5771 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5772                              u32 opaque_key, u32 dest_idx_unmasked,
5773                              unsigned int *frag_size)
5774 {
5775         struct tg3_rx_buffer_desc *desc;
5776         struct ring_info *map;
5777         u8 *data;
5778         dma_addr_t mapping;
5779         int skb_size, data_size, dest_idx;
5780
5781         switch (opaque_key) {
5782         case RXD_OPAQUE_RING_STD:
5783                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5784                 desc = &tpr->rx_std[dest_idx];
5785                 map = &tpr->rx_std_buffers[dest_idx];
5786                 data_size = tp->rx_pkt_map_sz;
5787                 break;
5788
5789         case RXD_OPAQUE_RING_JUMBO:
5790                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5791                 desc = &tpr->rx_jmb[dest_idx].std;
5792                 map = &tpr->rx_jmb_buffers[dest_idx];
5793                 data_size = TG3_RX_JMB_MAP_SZ;
5794                 break;
5795
5796         default:
5797                 return -EINVAL;
5798         }
5799
5800         /* Do not overwrite any of the map or rp information
5801          * until we are sure we can commit to a new buffer.
5802          *
5803          * Callers depend upon this behavior and assume that
5804          * we leave everything unchanged if we fail.
5805          */
5806         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5807                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5808         if (skb_size <= PAGE_SIZE) {
5809                 data = netdev_alloc_frag(skb_size);
5810                 *frag_size = skb_size;
5811         } else {
5812                 data = kmalloc(skb_size, GFP_ATOMIC);
5813                 *frag_size = 0;
5814         }
5815         if (!data)
5816                 return -ENOMEM;
5817
5818         mapping = pci_map_single(tp->pdev,
5819                                  data + TG3_RX_OFFSET(tp),
5820                                  data_size,
5821                                  PCI_DMA_FROMDEVICE);
5822         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
5823                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
5824                 return -EIO;
5825         }
5826
5827         map->data = data;
5828         dma_unmap_addr_set(map, mapping, mapping);
5829
5830         desc->addr_hi = ((u64)mapping >> 32);
5831         desc->addr_lo = ((u64)mapping & 0xffffffff);
5832
5833         return data_size;
5834 }
5835
5836 /* We only need to move over in the address because the other
5837  * members of the RX descriptor are invariant.  See notes above
5838  * tg3_alloc_rx_data for full details.
5839  */
5840 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5841                            struct tg3_rx_prodring_set *dpr,
5842                            u32 opaque_key, int src_idx,
5843                            u32 dest_idx_unmasked)
5844 {
5845         struct tg3 *tp = tnapi->tp;
5846         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5847         struct ring_info *src_map, *dest_map;
5848         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5849         int dest_idx;
5850
5851         switch (opaque_key) {
5852         case RXD_OPAQUE_RING_STD:
5853                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5854                 dest_desc = &dpr->rx_std[dest_idx];
5855                 dest_map = &dpr->rx_std_buffers[dest_idx];
5856                 src_desc = &spr->rx_std[src_idx];
5857                 src_map = &spr->rx_std_buffers[src_idx];
5858                 break;
5859
5860         case RXD_OPAQUE_RING_JUMBO:
5861                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5862                 dest_desc = &dpr->rx_jmb[dest_idx].std;
5863                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5864                 src_desc = &spr->rx_jmb[src_idx].std;
5865                 src_map = &spr->rx_jmb_buffers[src_idx];
5866                 break;
5867
5868         default:
5869                 return;
5870         }
5871
5872         dest_map->data = src_map->data;
5873         dma_unmap_addr_set(dest_map, mapping,
5874                            dma_unmap_addr(src_map, mapping));
5875         dest_desc->addr_hi = src_desc->addr_hi;
5876         dest_desc->addr_lo = src_desc->addr_lo;
5877
5878         /* Ensure that the update to the skb happens after the physical
5879          * addresses have been transferred to the new BD location.
5880          */
5881         smp_wmb();
5882
5883         src_map->data = NULL;
5884 }
5885
5886 /* The RX ring scheme is composed of multiple rings which post fresh
5887  * buffers to the chip, and one special ring the chip uses to report
5888  * status back to the host.
5889  *
5890  * The special ring reports the status of received packets to the
5891  * host.  The chip does not write into the original descriptor the
5892  * RX buffer was obtained from.  The chip simply takes the original
5893  * descriptor as provided by the host, updates the status and length
5894  * field, then writes this into the next status ring entry.
5895  *
5896  * Each ring the host uses to post buffers to the chip is described
5897  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5898  * it is first placed into the on-chip ram.  When the packet's length
5899  * is known, it walks down the TG3_BDINFO entries to select the ring.
5900  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5901  * which is within the range of the new packet's length is chosen.
5902  *
5903  * The "separate ring for rx status" scheme may sound queer, but it makes
5904  * sense from a cache coherency perspective.  If only the host writes
5905  * to the buffer post rings, and only the chip writes to the rx status
5906  * rings, then cache lines never move beyond shared-modified state.
5907  * If both the host and chip were to write into the same ring, cache line
5908  * eviction could occur since both entities want it in an exclusive state.
5909  */
5910 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5911 {
5912         struct tg3 *tp = tnapi->tp;
5913         u32 work_mask, rx_std_posted = 0;
5914         u32 std_prod_idx, jmb_prod_idx;
5915         u32 sw_idx = tnapi->rx_rcb_ptr;
5916         u16 hw_idx;
5917         int received;
5918         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5919
5920         hw_idx = *(tnapi->rx_rcb_prod_idx);
5921         /*
5922          * We need to order the read of hw_idx and the read of
5923          * the opaque cookie.
5924          */
5925         rmb();
5926         work_mask = 0;
5927         received = 0;
5928         std_prod_idx = tpr->rx_std_prod_idx;
5929         jmb_prod_idx = tpr->rx_jmb_prod_idx;
5930         while (sw_idx != hw_idx && budget > 0) {
5931                 struct ring_info *ri;
5932                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5933                 unsigned int len;
5934                 struct sk_buff *skb;
5935                 dma_addr_t dma_addr;
5936                 u32 opaque_key, desc_idx, *post_ptr;
5937                 u8 *data;
5938
5939                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5940                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5941                 if (opaque_key == RXD_OPAQUE_RING_STD) {
5942                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5943                         dma_addr = dma_unmap_addr(ri, mapping);
5944                         data = ri->data;
5945                         post_ptr = &std_prod_idx;
5946                         rx_std_posted++;
5947                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5948                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5949                         dma_addr = dma_unmap_addr(ri, mapping);
5950                         data = ri->data;
5951                         post_ptr = &jmb_prod_idx;
5952                 } else
5953                         goto next_pkt_nopost;
5954
5955                 work_mask |= opaque_key;
5956
5957                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5958                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5959                 drop_it:
5960                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5961                                        desc_idx, *post_ptr);
5962                 drop_it_no_recycle:
5963                         /* Other statistics kept track of by card. */
5964                         tp->rx_dropped++;
5965                         goto next_pkt;
5966                 }
5967
5968                 prefetch(data + TG3_RX_OFFSET(tp));
5969                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5970                       ETH_FCS_LEN;
5971
5972                 if (len > TG3_RX_COPY_THRESH(tp)) {
5973                         int skb_size;
5974                         unsigned int frag_size;
5975
5976                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5977                                                     *post_ptr, &frag_size);
5978                         if (skb_size < 0)
5979                                 goto drop_it;
5980
5981                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
5982                                          PCI_DMA_FROMDEVICE);
5983
5984                         skb = build_skb(data, frag_size);
5985                         if (!skb) {
5986                                 tg3_frag_free(frag_size != 0, data);
5987                                 goto drop_it_no_recycle;
5988                         }
5989                         skb_reserve(skb, TG3_RX_OFFSET(tp));
5990                         /* Ensure that the update to the data happens
5991                          * after the usage of the old DMA mapping.
5992                          */
5993                         smp_wmb();
5994
5995                         ri->data = NULL;
5996
5997                 } else {
5998                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5999                                        desc_idx, *post_ptr);
6000
6001                         skb = netdev_alloc_skb(tp->dev,
6002                                                len + TG3_RAW_IP_ALIGN);
6003                         if (skb == NULL)
6004                                 goto drop_it_no_recycle;
6005
6006                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6007                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6008                         memcpy(skb->data,
6009                                data + TG3_RX_OFFSET(tp),
6010                                len);
6011                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6012                 }
6013
6014                 skb_put(skb, len);
6015                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6016                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6017                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6018                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6019                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6020                 else
6021                         skb_checksum_none_assert(skb);
6022
6023                 skb->protocol = eth_type_trans(skb, tp->dev);
6024
6025                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6026                     skb->protocol != htons(ETH_P_8021Q)) {
6027                         dev_kfree_skb(skb);
6028                         goto drop_it_no_recycle;
6029                 }
6030
6031                 if (desc->type_flags & RXD_FLAG_VLAN &&
6032                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6033                         __vlan_hwaccel_put_tag(skb,
6034                                                desc->err_vlan & RXD_VLAN_MASK);
6035
6036                 napi_gro_receive(&tnapi->napi, skb);
6037
6038                 received++;
6039                 budget--;
6040
6041 next_pkt:
6042                 (*post_ptr)++;
6043
6044                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6045                         tpr->rx_std_prod_idx = std_prod_idx &
6046                                                tp->rx_std_ring_mask;
6047                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6048                                      tpr->rx_std_prod_idx);
6049                         work_mask &= ~RXD_OPAQUE_RING_STD;
6050                         rx_std_posted = 0;
6051                 }
6052 next_pkt_nopost:
6053                 sw_idx++;
6054                 sw_idx &= tp->rx_ret_ring_mask;
6055
6056                 /* Refresh hw_idx to see if there is new work */
6057                 if (sw_idx == hw_idx) {
6058                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6059                         rmb();
6060                 }
6061         }
6062
6063         /* ACK the status ring. */
6064         tnapi->rx_rcb_ptr = sw_idx;
6065         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6066
6067         /* Refill RX ring(s). */
6068         if (!tg3_flag(tp, ENABLE_RSS)) {
6069                 /* Sync BD data before updating mailbox */
6070                 wmb();
6071
6072                 if (work_mask & RXD_OPAQUE_RING_STD) {
6073                         tpr->rx_std_prod_idx = std_prod_idx &
6074                                                tp->rx_std_ring_mask;
6075                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6076                                      tpr->rx_std_prod_idx);
6077                 }
6078                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6079                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6080                                                tp->rx_jmb_ring_mask;
6081                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6082                                      tpr->rx_jmb_prod_idx);
6083                 }
6084                 mmiowb();
6085         } else if (work_mask) {
6086                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6087                  * updated before the producer indices can be updated.
6088                  */
6089                 smp_wmb();
6090
6091                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6092                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6093
6094                 if (tnapi != &tp->napi[1]) {
6095                         tp->rx_refill = true;
6096                         napi_schedule(&tp->napi[1].napi);
6097                 }
6098         }
6099
6100         return received;
6101 }
6102
6103 static void tg3_poll_link(struct tg3 *tp)
6104 {
6105         /* handle link change and other phy events */
6106         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6107                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6108
6109                 if (sblk->status & SD_STATUS_LINK_CHG) {
6110                         sblk->status = SD_STATUS_UPDATED |
6111                                        (sblk->status & ~SD_STATUS_LINK_CHG);
6112                         spin_lock(&tp->lock);
6113                         if (tg3_flag(tp, USE_PHYLIB)) {
6114                                 tw32_f(MAC_STATUS,
6115                                      (MAC_STATUS_SYNC_CHANGED |
6116                                       MAC_STATUS_CFG_CHANGED |
6117                                       MAC_STATUS_MI_COMPLETION |
6118                                       MAC_STATUS_LNKSTATE_CHANGED));
6119                                 udelay(40);
6120                         } else
6121                                 tg3_setup_phy(tp, 0);
6122                         spin_unlock(&tp->lock);
6123                 }
6124         }
6125 }
6126
6127 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6128                                 struct tg3_rx_prodring_set *dpr,
6129                                 struct tg3_rx_prodring_set *spr)
6130 {
6131         u32 si, di, cpycnt, src_prod_idx;
6132         int i, err = 0;
6133
6134         while (1) {
6135                 src_prod_idx = spr->rx_std_prod_idx;
6136
6137                 /* Make sure updates to the rx_std_buffers[] entries and the
6138                  * standard producer index are seen in the correct order.
6139                  */
6140                 smp_rmb();
6141
6142                 if (spr->rx_std_cons_idx == src_prod_idx)
6143                         break;
6144
6145                 if (spr->rx_std_cons_idx < src_prod_idx)
6146                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6147                 else
6148                         cpycnt = tp->rx_std_ring_mask + 1 -
6149                                  spr->rx_std_cons_idx;
6150
6151                 cpycnt = min(cpycnt,
6152                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6153
6154                 si = spr->rx_std_cons_idx;
6155                 di = dpr->rx_std_prod_idx;
6156
6157                 for (i = di; i < di + cpycnt; i++) {
6158                         if (dpr->rx_std_buffers[i].data) {
6159                                 cpycnt = i - di;
6160                                 err = -ENOSPC;
6161                                 break;
6162                         }
6163                 }
6164
6165                 if (!cpycnt)
6166                         break;
6167
6168                 /* Ensure that updates to the rx_std_buffers ring and the
6169                  * shadowed hardware producer ring from tg3_recycle_skb() are
6170                  * ordered correctly WRT the skb check above.
6171                  */
6172                 smp_rmb();
6173
6174                 memcpy(&dpr->rx_std_buffers[di],
6175                        &spr->rx_std_buffers[si],
6176                        cpycnt * sizeof(struct ring_info));
6177
6178                 for (i = 0; i < cpycnt; i++, di++, si++) {
6179                         struct tg3_rx_buffer_desc *sbd, *dbd;
6180                         sbd = &spr->rx_std[si];
6181                         dbd = &dpr->rx_std[di];
6182                         dbd->addr_hi = sbd->addr_hi;
6183                         dbd->addr_lo = sbd->addr_lo;
6184                 }
6185
6186                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6187                                        tp->rx_std_ring_mask;
6188                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6189                                        tp->rx_std_ring_mask;
6190         }
6191
6192         while (1) {
6193                 src_prod_idx = spr->rx_jmb_prod_idx;
6194
6195                 /* Make sure updates to the rx_jmb_buffers[] entries and
6196                  * the jumbo producer index are seen in the correct order.
6197                  */
6198                 smp_rmb();
6199
6200                 if (spr->rx_jmb_cons_idx == src_prod_idx)
6201                         break;
6202
6203                 if (spr->rx_jmb_cons_idx < src_prod_idx)
6204                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6205                 else
6206                         cpycnt = tp->rx_jmb_ring_mask + 1 -
6207                                  spr->rx_jmb_cons_idx;
6208
6209                 cpycnt = min(cpycnt,
6210                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6211
6212                 si = spr->rx_jmb_cons_idx;
6213                 di = dpr->rx_jmb_prod_idx;
6214
6215                 for (i = di; i < di + cpycnt; i++) {
6216                         if (dpr->rx_jmb_buffers[i].data) {
6217                                 cpycnt = i - di;
6218                                 err = -ENOSPC;
6219                                 break;
6220                         }
6221                 }
6222
6223                 if (!cpycnt)
6224                         break;
6225
6226                 /* Ensure that updates to the rx_jmb_buffers ring and the
6227                  * shadowed hardware producer ring from tg3_recycle_skb() are
6228                  * ordered correctly WRT the skb check above.
6229                  */
6230                 smp_rmb();
6231
6232                 memcpy(&dpr->rx_jmb_buffers[di],
6233                        &spr->rx_jmb_buffers[si],
6234                        cpycnt * sizeof(struct ring_info));
6235
6236                 for (i = 0; i < cpycnt; i++, di++, si++) {
6237                         struct tg3_rx_buffer_desc *sbd, *dbd;
6238                         sbd = &spr->rx_jmb[si].std;
6239                         dbd = &dpr->rx_jmb[di].std;
6240                         dbd->addr_hi = sbd->addr_hi;
6241                         dbd->addr_lo = sbd->addr_lo;
6242                 }
6243
6244                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6245                                        tp->rx_jmb_ring_mask;
6246                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6247                                        tp->rx_jmb_ring_mask;
6248         }
6249
6250         return err;
6251 }
6252
6253 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6254 {
6255         struct tg3 *tp = tnapi->tp;
6256
6257         /* run TX completion thread */
6258         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6259                 tg3_tx(tnapi);
6260                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6261                         return work_done;
6262         }
6263
6264         if (!tnapi->rx_rcb_prod_idx)
6265                 return work_done;
6266
6267         /* run RX thread, within the bounds set by NAPI.
6268          * All RX "locking" is done by ensuring outside
6269          * code synchronizes with tg3->napi.poll()
6270          */
6271         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6272                 work_done += tg3_rx(tnapi, budget - work_done);
6273
6274         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6275                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6276                 int i, err = 0;
6277                 u32 std_prod_idx = dpr->rx_std_prod_idx;
6278                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6279
6280                 tp->rx_refill = false;
6281                 for (i = 1; i <= tp->rxq_cnt; i++)
6282                         err |= tg3_rx_prodring_xfer(tp, dpr,
6283                                                     &tp->napi[i].prodring);
6284
6285                 wmb();
6286
6287                 if (std_prod_idx != dpr->rx_std_prod_idx)
6288                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6289                                      dpr->rx_std_prod_idx);
6290
6291                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6292                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6293                                      dpr->rx_jmb_prod_idx);
6294
6295                 mmiowb();
6296
6297                 if (err)
6298                         tw32_f(HOSTCC_MODE, tp->coal_now);
6299         }
6300
6301         return work_done;
6302 }
6303
6304 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6305 {
6306         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6307                 schedule_work(&tp->reset_task);
6308 }
6309
6310 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6311 {
6312         cancel_work_sync(&tp->reset_task);
6313         tg3_flag_clear(tp, RESET_TASK_PENDING);
6314         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6315 }
6316
6317 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6318 {
6319         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6320         struct tg3 *tp = tnapi->tp;
6321         int work_done = 0;
6322         struct tg3_hw_status *sblk = tnapi->hw_status;
6323
6324         while (1) {
6325                 work_done = tg3_poll_work(tnapi, work_done, budget);
6326
6327                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6328                         goto tx_recovery;
6329
6330                 if (unlikely(work_done >= budget))
6331                         break;
6332
6333                 /* tp->last_tag is used in tg3_int_reenable() below
6334                  * to tell the hw how much work has been processed,
6335                  * so we must read it before checking for more work.
6336                  */
6337                 tnapi->last_tag = sblk->status_tag;
6338                 tnapi->last_irq_tag = tnapi->last_tag;
6339                 rmb();
6340
6341                 /* check for RX/TX work to do */
6342                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6343                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6344
6345                         /* This test here is not race free, but will reduce
6346                          * the number of interrupts by looping again.
6347                          */
6348                         if (tnapi == &tp->napi[1] && tp->rx_refill)
6349                                 continue;
6350
6351                         napi_complete(napi);
6352                         /* Reenable interrupts. */
6353                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6354
6355                         /* This test here is synchronized by napi_schedule()
6356                          * and napi_complete() to close the race condition.
6357                          */
6358                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6359                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
6360                                                   HOSTCC_MODE_ENABLE |
6361                                                   tnapi->coal_now);
6362                         }
6363                         mmiowb();
6364                         break;
6365                 }
6366         }
6367
6368         return work_done;
6369
6370 tx_recovery:
6371         /* work_done is guaranteed to be less than budget. */
6372         napi_complete(napi);
6373         tg3_reset_task_schedule(tp);
6374         return work_done;
6375 }
6376
6377 static void tg3_process_error(struct tg3 *tp)
6378 {
6379         u32 val;
6380         bool real_error = false;
6381
6382         if (tg3_flag(tp, ERROR_PROCESSED))
6383                 return;
6384
6385         /* Check Flow Attention register */
6386         val = tr32(HOSTCC_FLOW_ATTN);
6387         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6388                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6389                 real_error = true;
6390         }
6391
6392         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6393                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6394                 real_error = true;
6395         }
6396
6397         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6398                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6399                 real_error = true;
6400         }
6401
6402         if (!real_error)
6403                 return;
6404
6405         tg3_dump_state(tp);
6406
6407         tg3_flag_set(tp, ERROR_PROCESSED);
6408         tg3_reset_task_schedule(tp);
6409 }
6410
6411 static int tg3_poll(struct napi_struct *napi, int budget)
6412 {
6413         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6414         struct tg3 *tp = tnapi->tp;
6415         int work_done = 0;
6416         struct tg3_hw_status *sblk = tnapi->hw_status;
6417
6418         while (1) {
6419                 if (sblk->status & SD_STATUS_ERROR)
6420                         tg3_process_error(tp);
6421
6422                 tg3_poll_link(tp);
6423
6424                 work_done = tg3_poll_work(tnapi, work_done, budget);
6425
6426                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6427                         goto tx_recovery;
6428
6429                 if (unlikely(work_done >= budget))
6430                         break;
6431
6432                 if (tg3_flag(tp, TAGGED_STATUS)) {
6433                         /* tp->last_tag is used in tg3_int_reenable() below
6434                          * to tell the hw how much work has been processed,
6435                          * so we must read it before checking for more work.
6436                          */
6437                         tnapi->last_tag = sblk->status_tag;
6438                         tnapi->last_irq_tag = tnapi->last_tag;
6439                         rmb();
6440                 } else
6441                         sblk->status &= ~SD_STATUS_UPDATED;
6442
6443                 if (likely(!tg3_has_work(tnapi))) {
6444                         napi_complete(napi);
6445                         tg3_int_reenable(tnapi);
6446                         break;
6447                 }
6448         }
6449
6450         return work_done;
6451
6452 tx_recovery:
6453         /* work_done is guaranteed to be less than budget. */
6454         napi_complete(napi);
6455         tg3_reset_task_schedule(tp);
6456         return work_done;
6457 }
6458
6459 static void tg3_napi_disable(struct tg3 *tp)
6460 {
6461         int i;
6462
6463         for (i = tp->irq_cnt - 1; i >= 0; i--)
6464                 napi_disable(&tp->napi[i].napi);
6465 }
6466
6467 static void tg3_napi_enable(struct tg3 *tp)
6468 {
6469         int i;
6470
6471         for (i = 0; i < tp->irq_cnt; i++)
6472                 napi_enable(&tp->napi[i].napi);
6473 }
6474
6475 static void tg3_napi_init(struct tg3 *tp)
6476 {
6477         int i;
6478
6479         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6480         for (i = 1; i < tp->irq_cnt; i++)
6481                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6482 }
6483
6484 static void tg3_napi_fini(struct tg3 *tp)
6485 {
6486         int i;
6487
6488         for (i = 0; i < tp->irq_cnt; i++)
6489                 netif_napi_del(&tp->napi[i].napi);
6490 }
6491
6492 static inline void tg3_netif_stop(struct tg3 *tp)
6493 {
6494         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6495         tg3_napi_disable(tp);
6496         netif_tx_disable(tp->dev);
6497 }
6498
6499 static inline void tg3_netif_start(struct tg3 *tp)
6500 {
6501         /* NOTE: unconditional netif_tx_wake_all_queues is only
6502          * appropriate so long as all callers are assured to
6503          * have free tx slots (such as after tg3_init_hw)
6504          */
6505         netif_tx_wake_all_queues(tp->dev);
6506
6507         tg3_napi_enable(tp);
6508         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6509         tg3_enable_ints(tp);
6510 }
6511
6512 static void tg3_irq_quiesce(struct tg3 *tp)
6513 {
6514         int i;
6515
6516         BUG_ON(tp->irq_sync);
6517
6518         tp->irq_sync = 1;
6519         smp_mb();
6520
6521         for (i = 0; i < tp->irq_cnt; i++)
6522                 synchronize_irq(tp->napi[i].irq_vec);
6523 }
6524
6525 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6526  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6527  * with as well.  Most of the time, this is not necessary except when
6528  * shutting down the device.
6529  */
6530 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6531 {
6532         spin_lock_bh(&tp->lock);
6533         if (irq_sync)
6534                 tg3_irq_quiesce(tp);
6535 }
6536
6537 static inline void tg3_full_unlock(struct tg3 *tp)
6538 {
6539         spin_unlock_bh(&tp->lock);
6540 }
6541
6542 /* One-shot MSI handler - Chip automatically disables interrupt
6543  * after sending MSI so driver doesn't have to do it.
6544  */
6545 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6546 {
6547         struct tg3_napi *tnapi = dev_id;
6548         struct tg3 *tp = tnapi->tp;
6549
6550         prefetch(tnapi->hw_status);
6551         if (tnapi->rx_rcb)
6552                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6553
6554         if (likely(!tg3_irq_sync(tp)))
6555                 napi_schedule(&tnapi->napi);
6556
6557         return IRQ_HANDLED;
6558 }
6559
6560 /* MSI ISR - No need to check for interrupt sharing and no need to
6561  * flush status block and interrupt mailbox. PCI ordering rules
6562  * guarantee that MSI will arrive after the status block.
6563  */
6564 static irqreturn_t tg3_msi(int irq, void *dev_id)
6565 {
6566         struct tg3_napi *tnapi = dev_id;
6567         struct tg3 *tp = tnapi->tp;
6568
6569         prefetch(tnapi->hw_status);
6570         if (tnapi->rx_rcb)
6571                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6572         /*
6573          * Writing any value to intr-mbox-0 clears PCI INTA# and
6574          * chip-internal interrupt pending events.
6575          * Writing non-zero to intr-mbox-0 additional tells the
6576          * NIC to stop sending us irqs, engaging "in-intr-handler"
6577          * event coalescing.
6578          */
6579         tw32_mailbox(tnapi->int_mbox, 0x00000001);
6580         if (likely(!tg3_irq_sync(tp)))
6581                 napi_schedule(&tnapi->napi);
6582
6583         return IRQ_RETVAL(1);
6584 }
6585
6586 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6587 {
6588         struct tg3_napi *tnapi = dev_id;
6589         struct tg3 *tp = tnapi->tp;
6590         struct tg3_hw_status *sblk = tnapi->hw_status;
6591         unsigned int handled = 1;
6592
6593         /* In INTx mode, it is possible for the interrupt to arrive at
6594          * the CPU before the status block posted prior to the interrupt.
6595          * Reading the PCI State register will confirm whether the
6596          * interrupt is ours and will flush the status block.
6597          */
6598         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6599                 if (tg3_flag(tp, CHIP_RESETTING) ||
6600                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6601                         handled = 0;
6602                         goto out;
6603                 }
6604         }
6605
6606         /*
6607          * Writing any value to intr-mbox-0 clears PCI INTA# and
6608          * chip-internal interrupt pending events.
6609          * Writing non-zero to intr-mbox-0 additional tells the
6610          * NIC to stop sending us irqs, engaging "in-intr-handler"
6611          * event coalescing.
6612          *
6613          * Flush the mailbox to de-assert the IRQ immediately to prevent
6614          * spurious interrupts.  The flush impacts performance but
6615          * excessive spurious interrupts can be worse in some cases.
6616          */
6617         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6618         if (tg3_irq_sync(tp))
6619                 goto out;
6620         sblk->status &= ~SD_STATUS_UPDATED;
6621         if (likely(tg3_has_work(tnapi))) {
6622                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6623                 napi_schedule(&tnapi->napi);
6624         } else {
6625                 /* No work, shared interrupt perhaps?  re-enable
6626                  * interrupts, and flush that PCI write
6627                  */
6628                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6629                                0x00000000);
6630         }
6631 out:
6632         return IRQ_RETVAL(handled);
6633 }
6634
6635 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6636 {
6637         struct tg3_napi *tnapi = dev_id;
6638         struct tg3 *tp = tnapi->tp;
6639         struct tg3_hw_status *sblk = tnapi->hw_status;
6640         unsigned int handled = 1;
6641
6642         /* In INTx mode, it is possible for the interrupt to arrive at
6643          * the CPU before the status block posted prior to the interrupt.
6644          * Reading the PCI State register will confirm whether the
6645          * interrupt is ours and will flush the status block.
6646          */
6647         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6648                 if (tg3_flag(tp, CHIP_RESETTING) ||
6649                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6650                         handled = 0;
6651                         goto out;
6652                 }
6653         }
6654
6655         /*
6656          * writing any value to intr-mbox-0 clears PCI INTA# and
6657          * chip-internal interrupt pending events.
6658          * writing non-zero to intr-mbox-0 additional tells the
6659          * NIC to stop sending us irqs, engaging "in-intr-handler"
6660          * event coalescing.
6661          *
6662          * Flush the mailbox to de-assert the IRQ immediately to prevent
6663          * spurious interrupts.  The flush impacts performance but
6664          * excessive spurious interrupts can be worse in some cases.
6665          */
6666         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6667
6668         /*
6669          * In a shared interrupt configuration, sometimes other devices'
6670          * interrupts will scream.  We record the current status tag here
6671          * so that the above check can report that the screaming interrupts
6672          * are unhandled.  Eventually they will be silenced.
6673          */
6674         tnapi->last_irq_tag = sblk->status_tag;
6675
6676         if (tg3_irq_sync(tp))
6677                 goto out;
6678
6679         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6680
6681         napi_schedule(&tnapi->napi);
6682
6683 out:
6684         return IRQ_RETVAL(handled);
6685 }
6686
6687 /* ISR for interrupt test */
6688 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6689 {
6690         struct tg3_napi *tnapi = dev_id;
6691         struct tg3 *tp = tnapi->tp;
6692         struct tg3_hw_status *sblk = tnapi->hw_status;
6693
6694         if ((sblk->status & SD_STATUS_UPDATED) ||
6695             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6696                 tg3_disable_ints(tp);
6697                 return IRQ_RETVAL(1);
6698         }
6699         return IRQ_RETVAL(0);
6700 }
6701
6702 #ifdef CONFIG_NET_POLL_CONTROLLER
6703 static void tg3_poll_controller(struct net_device *dev)
6704 {
6705         int i;
6706         struct tg3 *tp = netdev_priv(dev);
6707
6708         for (i = 0; i < tp->irq_cnt; i++)
6709                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6710 }
6711 #endif
6712
6713 static void tg3_tx_timeout(struct net_device *dev)
6714 {
6715         struct tg3 *tp = netdev_priv(dev);
6716
6717         if (netif_msg_tx_err(tp)) {
6718                 netdev_err(dev, "transmit timed out, resetting\n");
6719                 tg3_dump_state(tp);
6720         }
6721
6722         tg3_reset_task_schedule(tp);
6723 }
6724
6725 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6726 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6727 {
6728         u32 base = (u32) mapping & 0xffffffff;
6729
6730         return (base > 0xffffdcc0) && (base + len + 8 < base);
6731 }
6732
6733 /* Test for DMA addresses > 40-bit */
6734 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6735                                           int len)
6736 {
6737 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6738         if (tg3_flag(tp, 40BIT_DMA_BUG))
6739                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6740         return 0;
6741 #else
6742         return 0;
6743 #endif
6744 }
6745
6746 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6747                                  dma_addr_t mapping, u32 len, u32 flags,
6748                                  u32 mss, u32 vlan)
6749 {
6750         txbd->addr_hi = ((u64) mapping >> 32);
6751         txbd->addr_lo = ((u64) mapping & 0xffffffff);
6752         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6753         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6754 }
6755
6756 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6757                             dma_addr_t map, u32 len, u32 flags,
6758                             u32 mss, u32 vlan)
6759 {
6760         struct tg3 *tp = tnapi->tp;
6761         bool hwbug = false;
6762
6763         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6764                 hwbug = true;
6765
6766         if (tg3_4g_overflow_test(map, len))
6767                 hwbug = true;
6768
6769         if (tg3_40bit_overflow_test(tp, map, len))
6770                 hwbug = true;
6771
6772         if (tp->dma_limit) {
6773                 u32 prvidx = *entry;
6774                 u32 tmp_flag = flags & ~TXD_FLAG_END;
6775                 while (len > tp->dma_limit && *budget) {
6776                         u32 frag_len = tp->dma_limit;
6777                         len -= tp->dma_limit;
6778
6779                         /* Avoid the 8byte DMA problem */
6780                         if (len <= 8) {
6781                                 len += tp->dma_limit / 2;
6782                                 frag_len = tp->dma_limit / 2;
6783                         }
6784
6785                         tnapi->tx_buffers[*entry].fragmented = true;
6786
6787                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6788                                       frag_len, tmp_flag, mss, vlan);
6789                         *budget -= 1;
6790                         prvidx = *entry;
6791                         *entry = NEXT_TX(*entry);
6792
6793                         map += frag_len;
6794                 }
6795
6796                 if (len) {
6797                         if (*budget) {
6798                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6799                                               len, flags, mss, vlan);
6800                                 *budget -= 1;
6801                                 *entry = NEXT_TX(*entry);
6802                         } else {
6803                                 hwbug = true;
6804                                 tnapi->tx_buffers[prvidx].fragmented = false;
6805                         }
6806                 }
6807         } else {
6808                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6809                               len, flags, mss, vlan);
6810                 *entry = NEXT_TX(*entry);
6811         }
6812
6813         return hwbug;
6814 }
6815
6816 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6817 {
6818         int i;
6819         struct sk_buff *skb;
6820         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6821
6822         skb = txb->skb;
6823         txb->skb = NULL;
6824
6825         pci_unmap_single(tnapi->tp->pdev,
6826                          dma_unmap_addr(txb, mapping),
6827                          skb_headlen(skb),
6828                          PCI_DMA_TODEVICE);
6829
6830         while (txb->fragmented) {
6831                 txb->fragmented = false;
6832                 entry = NEXT_TX(entry);
6833                 txb = &tnapi->tx_buffers[entry];
6834         }
6835
6836         for (i = 0; i <= last; i++) {
6837                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6838
6839                 entry = NEXT_TX(entry);
6840                 txb = &tnapi->tx_buffers[entry];
6841
6842                 pci_unmap_page(tnapi->tp->pdev,
6843                                dma_unmap_addr(txb, mapping),
6844                                skb_frag_size(frag), PCI_DMA_TODEVICE);
6845
6846                 while (txb->fragmented) {
6847                         txb->fragmented = false;
6848                         entry = NEXT_TX(entry);
6849                         txb = &tnapi->tx_buffers[entry];
6850                 }
6851         }
6852 }
6853
6854 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6855 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6856                                        struct sk_buff **pskb,
6857                                        u32 *entry, u32 *budget,
6858                                        u32 base_flags, u32 mss, u32 vlan)
6859 {
6860         struct tg3 *tp = tnapi->tp;
6861         struct sk_buff *new_skb, *skb = *pskb;
6862         dma_addr_t new_addr = 0;
6863         int ret = 0;
6864
6865         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6866                 new_skb = skb_copy(skb, GFP_ATOMIC);
6867         else {
6868                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6869
6870                 new_skb = skb_copy_expand(skb,
6871                                           skb_headroom(skb) + more_headroom,
6872                                           skb_tailroom(skb), GFP_ATOMIC);
6873         }
6874
6875         if (!new_skb) {
6876                 ret = -1;
6877         } else {
6878                 /* New SKB is guaranteed to be linear. */
6879                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6880                                           PCI_DMA_TODEVICE);
6881                 /* Make sure the mapping succeeded */
6882                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6883                         dev_kfree_skb(new_skb);
6884                         ret = -1;
6885                 } else {
6886                         u32 save_entry = *entry;
6887
6888                         base_flags |= TXD_FLAG_END;
6889
6890                         tnapi->tx_buffers[*entry].skb = new_skb;
6891                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6892                                            mapping, new_addr);
6893
6894                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6895                                             new_skb->len, base_flags,
6896                                             mss, vlan)) {
6897                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6898                                 dev_kfree_skb(new_skb);
6899                                 ret = -1;
6900                         }
6901                 }
6902         }
6903
6904         dev_kfree_skb(skb);
6905         *pskb = new_skb;
6906         return ret;
6907 }
6908
6909 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6910
6911 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6912  * TSO header is greater than 80 bytes.
6913  */
6914 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6915 {
6916         struct sk_buff *segs, *nskb;
6917         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6918
6919         /* Estimate the number of fragments in the worst case */
6920         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6921                 netif_stop_queue(tp->dev);
6922
6923                 /* netif_tx_stop_queue() must be done before checking
6924                  * checking tx index in tg3_tx_avail() below, because in
6925                  * tg3_tx(), we update tx index before checking for
6926                  * netif_tx_queue_stopped().
6927                  */
6928                 smp_mb();
6929                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6930                         return NETDEV_TX_BUSY;
6931
6932                 netif_wake_queue(tp->dev);
6933         }
6934
6935         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6936         if (IS_ERR(segs))
6937                 goto tg3_tso_bug_end;
6938
6939         do {
6940                 nskb = segs;
6941                 segs = segs->next;
6942                 nskb->next = NULL;
6943                 tg3_start_xmit(nskb, tp->dev);
6944         } while (segs);
6945
6946 tg3_tso_bug_end:
6947         dev_kfree_skb(skb);
6948
6949         return NETDEV_TX_OK;
6950 }
6951
6952 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6953  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6954  */
6955 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6956 {
6957         struct tg3 *tp = netdev_priv(dev);
6958         u32 len, entry, base_flags, mss, vlan = 0;
6959         u32 budget;
6960         int i = -1, would_hit_hwbug;
6961         dma_addr_t mapping;
6962         struct tg3_napi *tnapi;
6963         struct netdev_queue *txq;
6964         unsigned int last;
6965
6966         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6967         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6968         if (tg3_flag(tp, ENABLE_TSS))
6969                 tnapi++;
6970
6971         budget = tg3_tx_avail(tnapi);
6972
6973         /* We are running in BH disabled context with netif_tx_lock
6974          * and TX reclaim runs via tp->napi.poll inside of a software
6975          * interrupt.  Furthermore, IRQ processing runs lockless so we have
6976          * no IRQ context deadlocks to worry about either.  Rejoice!
6977          */
6978         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6979                 if (!netif_tx_queue_stopped(txq)) {
6980                         netif_tx_stop_queue(txq);
6981
6982                         /* This is a hard error, log it. */
6983                         netdev_err(dev,
6984                                    "BUG! Tx Ring full when queue awake!\n");
6985                 }
6986                 return NETDEV_TX_BUSY;
6987         }
6988
6989         entry = tnapi->tx_prod;
6990         base_flags = 0;
6991         if (skb->ip_summed == CHECKSUM_PARTIAL)
6992                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6993
6994         mss = skb_shinfo(skb)->gso_size;
6995         if (mss) {
6996                 struct iphdr *iph;
6997                 u32 tcp_opt_len, hdr_len;
6998
6999                 if (skb_header_cloned(skb) &&
7000                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7001                         goto drop;
7002
7003                 iph = ip_hdr(skb);
7004                 tcp_opt_len = tcp_optlen(skb);
7005
7006                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7007
7008                 if (!skb_is_gso_v6(skb)) {
7009                         iph->check = 0;
7010                         iph->tot_len = htons(mss + hdr_len);
7011                 }
7012
7013                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7014                     tg3_flag(tp, TSO_BUG))
7015                         return tg3_tso_bug(tp, skb);
7016
7017                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7018                                TXD_FLAG_CPU_POST_DMA);
7019
7020                 if (tg3_flag(tp, HW_TSO_1) ||
7021                     tg3_flag(tp, HW_TSO_2) ||
7022                     tg3_flag(tp, HW_TSO_3)) {
7023                         tcp_hdr(skb)->check = 0;
7024                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7025                 } else
7026                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7027                                                                  iph->daddr, 0,
7028                                                                  IPPROTO_TCP,
7029                                                                  0);
7030
7031                 if (tg3_flag(tp, HW_TSO_3)) {
7032                         mss |= (hdr_len & 0xc) << 12;
7033                         if (hdr_len & 0x10)
7034                                 base_flags |= 0x00000010;
7035                         base_flags |= (hdr_len & 0x3e0) << 5;
7036                 } else if (tg3_flag(tp, HW_TSO_2))
7037                         mss |= hdr_len << 9;
7038                 else if (tg3_flag(tp, HW_TSO_1) ||
7039                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7040                         if (tcp_opt_len || iph->ihl > 5) {
7041                                 int tsflags;
7042
7043                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7044                                 mss |= (tsflags << 11);
7045                         }
7046                 } else {
7047                         if (tcp_opt_len || iph->ihl > 5) {
7048                                 int tsflags;
7049
7050                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7051                                 base_flags |= tsflags << 12;
7052                         }
7053                 }
7054         }
7055
7056         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7057             !mss && skb->len > VLAN_ETH_FRAME_LEN)
7058                 base_flags |= TXD_FLAG_JMB_PKT;
7059
7060         if (vlan_tx_tag_present(skb)) {
7061                 base_flags |= TXD_FLAG_VLAN;
7062                 vlan = vlan_tx_tag_get(skb);
7063         }
7064
7065         len = skb_headlen(skb);
7066
7067         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7068         if (pci_dma_mapping_error(tp->pdev, mapping))
7069                 goto drop;
7070
7071
7072         tnapi->tx_buffers[entry].skb = skb;
7073         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7074
7075         would_hit_hwbug = 0;
7076
7077         if (tg3_flag(tp, 5701_DMA_BUG))
7078                 would_hit_hwbug = 1;
7079
7080         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7081                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7082                             mss, vlan)) {
7083                 would_hit_hwbug = 1;
7084         } else if (skb_shinfo(skb)->nr_frags > 0) {
7085                 u32 tmp_mss = mss;
7086
7087                 if (!tg3_flag(tp, HW_TSO_1) &&
7088                     !tg3_flag(tp, HW_TSO_2) &&
7089                     !tg3_flag(tp, HW_TSO_3))
7090                         tmp_mss = 0;
7091
7092                 /* Now loop through additional data
7093                  * fragments, and queue them.
7094                  */
7095                 last = skb_shinfo(skb)->nr_frags - 1;
7096                 for (i = 0; i <= last; i++) {
7097                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7098
7099                         len = skb_frag_size(frag);
7100                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7101                                                    len, DMA_TO_DEVICE);
7102
7103                         tnapi->tx_buffers[entry].skb = NULL;
7104                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7105                                            mapping);
7106                         if (dma_mapping_error(&tp->pdev->dev, mapping))
7107                                 goto dma_error;
7108
7109                         if (!budget ||
7110                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7111                                             len, base_flags |
7112                                             ((i == last) ? TXD_FLAG_END : 0),
7113                                             tmp_mss, vlan)) {
7114                                 would_hit_hwbug = 1;
7115                                 break;
7116                         }
7117                 }
7118         }
7119
7120         if (would_hit_hwbug) {
7121                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7122
7123                 /* If the workaround fails due to memory/mapping
7124                  * failure, silently drop this packet.
7125                  */
7126                 entry = tnapi->tx_prod;
7127                 budget = tg3_tx_avail(tnapi);
7128                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7129                                                 base_flags, mss, vlan))
7130                         goto drop_nofree;
7131         }
7132
7133         skb_tx_timestamp(skb);
7134         netdev_tx_sent_queue(txq, skb->len);
7135
7136         /* Sync BD data before updating mailbox */
7137         wmb();
7138
7139         /* Packets are ready, update Tx producer idx local and on card. */
7140         tw32_tx_mbox(tnapi->prodmbox, entry);
7141
7142         tnapi->tx_prod = entry;
7143         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7144                 netif_tx_stop_queue(txq);
7145
7146                 /* netif_tx_stop_queue() must be done before checking
7147                  * checking tx index in tg3_tx_avail() below, because in
7148                  * tg3_tx(), we update tx index before checking for
7149                  * netif_tx_queue_stopped().
7150                  */
7151                 smp_mb();
7152                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7153                         netif_tx_wake_queue(txq);
7154         }
7155
7156         mmiowb();
7157         return NETDEV_TX_OK;
7158
7159 dma_error:
7160         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7161         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7162 drop:
7163         dev_kfree_skb(skb);
7164 drop_nofree:
7165         tp->tx_dropped++;
7166         return NETDEV_TX_OK;
7167 }
7168
7169 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7170 {
7171         if (enable) {
7172                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7173                                   MAC_MODE_PORT_MODE_MASK);
7174
7175                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7176
7177                 if (!tg3_flag(tp, 5705_PLUS))
7178                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7179
7180                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7181                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7182                 else
7183                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7184         } else {
7185                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7186
7187                 if (tg3_flag(tp, 5705_PLUS) ||
7188                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7189                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7190                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7191         }
7192
7193         tw32(MAC_MODE, tp->mac_mode);
7194         udelay(40);
7195 }
7196
7197 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7198 {
7199         u32 val, bmcr, mac_mode, ptest = 0;
7200
7201         tg3_phy_toggle_apd(tp, false);
7202         tg3_phy_toggle_automdix(tp, 0);
7203
7204         if (extlpbk && tg3_phy_set_extloopbk(tp))
7205                 return -EIO;
7206
7207         bmcr = BMCR_FULLDPLX;
7208         switch (speed) {
7209         case SPEED_10:
7210                 break;
7211         case SPEED_100:
7212                 bmcr |= BMCR_SPEED100;
7213                 break;
7214         case SPEED_1000:
7215         default:
7216                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7217                         speed = SPEED_100;
7218                         bmcr |= BMCR_SPEED100;
7219                 } else {
7220                         speed = SPEED_1000;
7221                         bmcr |= BMCR_SPEED1000;
7222                 }
7223         }
7224
7225         if (extlpbk) {
7226                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7227                         tg3_readphy(tp, MII_CTRL1000, &val);
7228                         val |= CTL1000_AS_MASTER |
7229                                CTL1000_ENABLE_MASTER;
7230                         tg3_writephy(tp, MII_CTRL1000, val);
7231                 } else {
7232                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7233                                 MII_TG3_FET_PTEST_TRIM_2;
7234                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7235                 }
7236         } else
7237                 bmcr |= BMCR_LOOPBACK;
7238
7239         tg3_writephy(tp, MII_BMCR, bmcr);
7240
7241         /* The write needs to be flushed for the FETs */
7242         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7243                 tg3_readphy(tp, MII_BMCR, &bmcr);
7244
7245         udelay(40);
7246
7247         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7248             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7249                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7250                              MII_TG3_FET_PTEST_FRC_TX_LINK |
7251                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
7252
7253                 /* The write needs to be flushed for the AC131 */
7254                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7255         }
7256
7257         /* Reset to prevent losing 1st rx packet intermittently */
7258         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7259             tg3_flag(tp, 5780_CLASS)) {
7260                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7261                 udelay(10);
7262                 tw32_f(MAC_RX_MODE, tp->rx_mode);
7263         }
7264
7265         mac_mode = tp->mac_mode &
7266                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7267         if (speed == SPEED_1000)
7268                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7269         else
7270                 mac_mode |= MAC_MODE_PORT_MODE_MII;
7271
7272         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7273                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7274
7275                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7276                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
7277                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7278                         mac_mode |= MAC_MODE_LINK_POLARITY;
7279
7280                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7281                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7282         }
7283
7284         tw32(MAC_MODE, mac_mode);
7285         udelay(40);
7286
7287         return 0;
7288 }
7289
7290 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7291 {
7292         struct tg3 *tp = netdev_priv(dev);
7293
7294         if (features & NETIF_F_LOOPBACK) {
7295                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7296                         return;
7297
7298                 spin_lock_bh(&tp->lock);
7299                 tg3_mac_loopback(tp, true);
7300                 netif_carrier_on(tp->dev);
7301                 spin_unlock_bh(&tp->lock);
7302                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7303         } else {
7304                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7305                         return;
7306
7307                 spin_lock_bh(&tp->lock);
7308                 tg3_mac_loopback(tp, false);
7309                 /* Force link status check */
7310                 tg3_setup_phy(tp, 1);
7311                 spin_unlock_bh(&tp->lock);
7312                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7313         }
7314 }
7315
7316 static netdev_features_t tg3_fix_features(struct net_device *dev,
7317         netdev_features_t features)
7318 {
7319         struct tg3 *tp = netdev_priv(dev);
7320
7321         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7322                 features &= ~NETIF_F_ALL_TSO;
7323
7324         return features;
7325 }
7326
7327 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7328 {
7329         netdev_features_t changed = dev->features ^ features;
7330
7331         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7332                 tg3_set_loopback(dev, features);
7333
7334         return 0;
7335 }
7336
7337 static void tg3_rx_prodring_free(struct tg3 *tp,
7338                                  struct tg3_rx_prodring_set *tpr)
7339 {
7340         int i;
7341
7342         if (tpr != &tp->napi[0].prodring) {
7343                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7344                      i = (i + 1) & tp->rx_std_ring_mask)
7345                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7346                                         tp->rx_pkt_map_sz);
7347
7348                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7349                         for (i = tpr->rx_jmb_cons_idx;
7350                              i != tpr->rx_jmb_prod_idx;
7351                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7352                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7353                                                 TG3_RX_JMB_MAP_SZ);
7354                         }
7355                 }
7356
7357                 return;
7358         }
7359
7360         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7361                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7362                                 tp->rx_pkt_map_sz);
7363
7364         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7365                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7366                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7367                                         TG3_RX_JMB_MAP_SZ);
7368         }
7369 }
7370
7371 /* Initialize rx rings for packet processing.
7372  *
7373  * The chip has been shut down and the driver detached from
7374  * the networking, so no interrupts or new tx packets will
7375  * end up in the driver.  tp->{tx,}lock are held and thus
7376  * we may not sleep.
7377  */
7378 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7379                                  struct tg3_rx_prodring_set *tpr)
7380 {
7381         u32 i, rx_pkt_dma_sz;
7382
7383         tpr->rx_std_cons_idx = 0;
7384         tpr->rx_std_prod_idx = 0;
7385         tpr->rx_jmb_cons_idx = 0;
7386         tpr->rx_jmb_prod_idx = 0;
7387
7388         if (tpr != &tp->napi[0].prodring) {
7389                 memset(&tpr->rx_std_buffers[0], 0,
7390                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7391                 if (tpr->rx_jmb_buffers)
7392                         memset(&tpr->rx_jmb_buffers[0], 0,
7393                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7394                 goto done;
7395         }
7396
7397         /* Zero out all descriptors. */
7398         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7399
7400         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7401         if (tg3_flag(tp, 5780_CLASS) &&
7402             tp->dev->mtu > ETH_DATA_LEN)
7403                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7404         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7405
7406         /* Initialize invariants of the rings, we only set this
7407          * stuff once.  This works because the card does not
7408          * write into the rx buffer posting rings.
7409          */
7410         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7411                 struct tg3_rx_buffer_desc *rxd;
7412
7413                 rxd = &tpr->rx_std[i];
7414                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7415                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7416                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7417                                (i << RXD_OPAQUE_INDEX_SHIFT));
7418         }
7419
7420         /* Now allocate fresh SKBs for each rx ring. */
7421         for (i = 0; i < tp->rx_pending; i++) {
7422                 unsigned int frag_size;
7423
7424                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7425                                       &frag_size) < 0) {
7426                         netdev_warn(tp->dev,
7427                                     "Using a smaller RX standard ring. Only "
7428                                     "%d out of %d buffers were allocated "
7429                                     "successfully\n", i, tp->rx_pending);
7430                         if (i == 0)
7431                                 goto initfail;
7432                         tp->rx_pending = i;
7433                         break;
7434                 }
7435         }
7436
7437         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7438                 goto done;
7439
7440         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7441
7442         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7443                 goto done;
7444
7445         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7446                 struct tg3_rx_buffer_desc *rxd;
7447
7448                 rxd = &tpr->rx_jmb[i].std;
7449                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7450                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7451                                   RXD_FLAG_JUMBO;
7452                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7453                        (i << RXD_OPAQUE_INDEX_SHIFT));
7454         }
7455
7456         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7457                 unsigned int frag_size;
7458
7459                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7460                                       &frag_size) < 0) {
7461                         netdev_warn(tp->dev,
7462                                     "Using a smaller RX jumbo ring. Only %d "
7463                                     "out of %d buffers were allocated "
7464                                     "successfully\n", i, tp->rx_jumbo_pending);
7465                         if (i == 0)
7466                                 goto initfail;
7467                         tp->rx_jumbo_pending = i;
7468                         break;
7469                 }
7470         }
7471
7472 done:
7473         return 0;
7474
7475 initfail:
7476         tg3_rx_prodring_free(tp, tpr);
7477         return -ENOMEM;
7478 }
7479
7480 static void tg3_rx_prodring_fini(struct tg3 *tp,
7481                                  struct tg3_rx_prodring_set *tpr)
7482 {
7483         kfree(tpr->rx_std_buffers);
7484         tpr->rx_std_buffers = NULL;
7485         kfree(tpr->rx_jmb_buffers);
7486         tpr->rx_jmb_buffers = NULL;
7487         if (tpr->rx_std) {
7488                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7489                                   tpr->rx_std, tpr->rx_std_mapping);
7490                 tpr->rx_std = NULL;
7491         }
7492         if (tpr->rx_jmb) {
7493                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7494                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7495                 tpr->rx_jmb = NULL;
7496         }
7497 }
7498
7499 static int tg3_rx_prodring_init(struct tg3 *tp,
7500                                 struct tg3_rx_prodring_set *tpr)
7501 {
7502         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7503                                       GFP_KERNEL);
7504         if (!tpr->rx_std_buffers)
7505                 return -ENOMEM;
7506
7507         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7508                                          TG3_RX_STD_RING_BYTES(tp),
7509                                          &tpr->rx_std_mapping,
7510                                          GFP_KERNEL);
7511         if (!tpr->rx_std)
7512                 goto err_out;
7513
7514         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7515                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7516                                               GFP_KERNEL);
7517                 if (!tpr->rx_jmb_buffers)
7518                         goto err_out;
7519
7520                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7521                                                  TG3_RX_JMB_RING_BYTES(tp),
7522                                                  &tpr->rx_jmb_mapping,
7523                                                  GFP_KERNEL);
7524                 if (!tpr->rx_jmb)
7525                         goto err_out;
7526         }
7527
7528         return 0;
7529
7530 err_out:
7531         tg3_rx_prodring_fini(tp, tpr);
7532         return -ENOMEM;
7533 }
7534
7535 /* Free up pending packets in all rx/tx rings.
7536  *
7537  * The chip has been shut down and the driver detached from
7538  * the networking, so no interrupts or new tx packets will
7539  * end up in the driver.  tp->{tx,}lock is not held and we are not
7540  * in an interrupt context and thus may sleep.
7541  */
7542 static void tg3_free_rings(struct tg3 *tp)
7543 {
7544         int i, j;
7545
7546         for (j = 0; j < tp->irq_cnt; j++) {
7547                 struct tg3_napi *tnapi = &tp->napi[j];
7548
7549                 tg3_rx_prodring_free(tp, &tnapi->prodring);
7550
7551                 if (!tnapi->tx_buffers)
7552                         continue;
7553
7554                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7555                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7556
7557                         if (!skb)
7558                                 continue;
7559
7560                         tg3_tx_skb_unmap(tnapi, i,
7561                                          skb_shinfo(skb)->nr_frags - 1);
7562
7563                         dev_kfree_skb_any(skb);
7564                 }
7565                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7566         }
7567 }
7568
7569 /* Initialize tx/rx rings for packet processing.
7570  *
7571  * The chip has been shut down and the driver detached from
7572  * the networking, so no interrupts or new tx packets will
7573  * end up in the driver.  tp->{tx,}lock are held and thus
7574  * we may not sleep.
7575  */
7576 static int tg3_init_rings(struct tg3 *tp)
7577 {
7578         int i;
7579
7580         /* Free up all the SKBs. */
7581         tg3_free_rings(tp);
7582
7583         for (i = 0; i < tp->irq_cnt; i++) {
7584                 struct tg3_napi *tnapi = &tp->napi[i];
7585
7586                 tnapi->last_tag = 0;
7587                 tnapi->last_irq_tag = 0;
7588                 tnapi->hw_status->status = 0;
7589                 tnapi->hw_status->status_tag = 0;
7590                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7591
7592                 tnapi->tx_prod = 0;
7593                 tnapi->tx_cons = 0;
7594                 if (tnapi->tx_ring)
7595                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7596
7597                 tnapi->rx_rcb_ptr = 0;
7598                 if (tnapi->rx_rcb)
7599                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7600
7601                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7602                         tg3_free_rings(tp);
7603                         return -ENOMEM;
7604                 }
7605         }
7606
7607         return 0;
7608 }
7609
7610 static void tg3_mem_tx_release(struct tg3 *tp)
7611 {
7612         int i;
7613
7614         for (i = 0; i < tp->irq_max; i++) {
7615                 struct tg3_napi *tnapi = &tp->napi[i];
7616
7617                 if (tnapi->tx_ring) {
7618                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7619                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
7620                         tnapi->tx_ring = NULL;
7621                 }
7622
7623                 kfree(tnapi->tx_buffers);
7624                 tnapi->tx_buffers = NULL;
7625         }
7626 }
7627
7628 static int tg3_mem_tx_acquire(struct tg3 *tp)
7629 {
7630         int i;
7631         struct tg3_napi *tnapi = &tp->napi[0];
7632
7633         /* If multivector TSS is enabled, vector 0 does not handle
7634          * tx interrupts.  Don't allocate any resources for it.
7635          */
7636         if (tg3_flag(tp, ENABLE_TSS))
7637                 tnapi++;
7638
7639         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
7640                 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
7641                                             TG3_TX_RING_SIZE, GFP_KERNEL);
7642                 if (!tnapi->tx_buffers)
7643                         goto err_out;
7644
7645                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7646                                                     TG3_TX_RING_BYTES,
7647                                                     &tnapi->tx_desc_mapping,
7648                                                     GFP_KERNEL);
7649                 if (!tnapi->tx_ring)
7650                         goto err_out;
7651         }
7652
7653         return 0;
7654
7655 err_out:
7656         tg3_mem_tx_release(tp);
7657         return -ENOMEM;
7658 }
7659
7660 static void tg3_mem_rx_release(struct tg3 *tp)
7661 {
7662         int i;
7663
7664         for (i = 0; i < tp->irq_max; i++) {
7665                 struct tg3_napi *tnapi = &tp->napi[i];
7666
7667                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7668
7669                 if (!tnapi->rx_rcb)
7670                         continue;
7671
7672                 dma_free_coherent(&tp->pdev->dev,
7673                                   TG3_RX_RCB_RING_BYTES(tp),
7674                                   tnapi->rx_rcb,
7675                                   tnapi->rx_rcb_mapping);
7676                 tnapi->rx_rcb = NULL;
7677         }
7678 }
7679
7680 static int tg3_mem_rx_acquire(struct tg3 *tp)
7681 {
7682         unsigned int i, limit;
7683
7684         limit = tp->rxq_cnt;
7685
7686         /* If RSS is enabled, we need a (dummy) producer ring
7687          * set on vector zero.  This is the true hw prodring.
7688          */
7689         if (tg3_flag(tp, ENABLE_RSS))
7690                 limit++;
7691
7692         for (i = 0; i < limit; i++) {
7693                 struct tg3_napi *tnapi = &tp->napi[i];
7694
7695                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7696                         goto err_out;
7697
7698                 /* If multivector RSS is enabled, vector 0
7699                  * does not handle rx or tx interrupts.
7700                  * Don't allocate any resources for it.
7701                  */
7702                 if (!i && tg3_flag(tp, ENABLE_RSS))
7703                         continue;
7704
7705                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7706                                                    TG3_RX_RCB_RING_BYTES(tp),
7707                                                    &tnapi->rx_rcb_mapping,
7708                                                    GFP_KERNEL);
7709                 if (!tnapi->rx_rcb)
7710                         goto err_out;
7711
7712                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7713         }
7714
7715         return 0;
7716
7717 err_out:
7718         tg3_mem_rx_release(tp);
7719         return -ENOMEM;
7720 }
7721
7722 /*
7723  * Must not be invoked with interrupt sources disabled and
7724  * the hardware shutdown down.
7725  */
7726 static void tg3_free_consistent(struct tg3 *tp)
7727 {
7728         int i;
7729
7730         for (i = 0; i < tp->irq_cnt; i++) {
7731                 struct tg3_napi *tnapi = &tp->napi[i];
7732
7733                 if (tnapi->hw_status) {
7734                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7735                                           tnapi->hw_status,
7736                                           tnapi->status_mapping);
7737                         tnapi->hw_status = NULL;
7738                 }
7739         }
7740
7741         tg3_mem_rx_release(tp);
7742         tg3_mem_tx_release(tp);
7743
7744         if (tp->hw_stats) {
7745                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7746                                   tp->hw_stats, tp->stats_mapping);
7747                 tp->hw_stats = NULL;
7748         }
7749 }
7750
7751 /*
7752  * Must not be invoked with interrupt sources disabled and
7753  * the hardware shutdown down.  Can sleep.
7754  */
7755 static int tg3_alloc_consistent(struct tg3 *tp)
7756 {
7757         int i;
7758
7759         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7760                                           sizeof(struct tg3_hw_stats),
7761                                           &tp->stats_mapping,
7762                                           GFP_KERNEL);
7763         if (!tp->hw_stats)
7764                 goto err_out;
7765
7766         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7767
7768         for (i = 0; i < tp->irq_cnt; i++) {
7769                 struct tg3_napi *tnapi = &tp->napi[i];
7770                 struct tg3_hw_status *sblk;
7771
7772                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7773                                                       TG3_HW_STATUS_SIZE,
7774                                                       &tnapi->status_mapping,
7775                                                       GFP_KERNEL);
7776                 if (!tnapi->hw_status)
7777                         goto err_out;
7778
7779                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7780                 sblk = tnapi->hw_status;
7781
7782                 if (tg3_flag(tp, ENABLE_RSS)) {
7783                         u16 *prodptr = 0;
7784
7785                         /*
7786                          * When RSS is enabled, the status block format changes
7787                          * slightly.  The "rx_jumbo_consumer", "reserved",
7788                          * and "rx_mini_consumer" members get mapped to the
7789                          * other three rx return ring producer indexes.
7790                          */
7791                         switch (i) {
7792                         case 1:
7793                                 prodptr = &sblk->idx[0].rx_producer;
7794                                 break;
7795                         case 2:
7796                                 prodptr = &sblk->rx_jumbo_consumer;
7797                                 break;
7798                         case 3:
7799                                 prodptr = &sblk->reserved;
7800                                 break;
7801                         case 4:
7802                                 prodptr = &sblk->rx_mini_consumer;
7803                                 break;
7804                         }
7805                         tnapi->rx_rcb_prod_idx = prodptr;
7806                 } else {
7807                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7808                 }
7809         }
7810
7811         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
7812                 goto err_out;
7813
7814         return 0;
7815
7816 err_out:
7817         tg3_free_consistent(tp);
7818         return -ENOMEM;
7819 }
7820
7821 #define MAX_WAIT_CNT 1000
7822
7823 /* To stop a block, clear the enable bit and poll till it
7824  * clears.  tp->lock is held.
7825  */
7826 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7827 {
7828         unsigned int i;
7829         u32 val;
7830
7831         if (tg3_flag(tp, 5705_PLUS)) {
7832                 switch (ofs) {
7833                 case RCVLSC_MODE:
7834                 case DMAC_MODE:
7835                 case MBFREE_MODE:
7836                 case BUFMGR_MODE:
7837                 case MEMARB_MODE:
7838                         /* We can't enable/disable these bits of the
7839                          * 5705/5750, just say success.
7840                          */
7841                         return 0;
7842
7843                 default:
7844                         break;
7845                 }
7846         }
7847
7848         val = tr32(ofs);
7849         val &= ~enable_bit;
7850         tw32_f(ofs, val);
7851
7852         for (i = 0; i < MAX_WAIT_CNT; i++) {
7853                 udelay(100);
7854                 val = tr32(ofs);
7855                 if ((val & enable_bit) == 0)
7856                         break;
7857         }
7858
7859         if (i == MAX_WAIT_CNT && !silent) {
7860                 dev_err(&tp->pdev->dev,
7861                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7862                         ofs, enable_bit);
7863                 return -ENODEV;
7864         }
7865
7866         return 0;
7867 }
7868
7869 /* tp->lock is held. */
7870 static int tg3_abort_hw(struct tg3 *tp, int silent)
7871 {
7872         int i, err;
7873
7874         tg3_disable_ints(tp);
7875
7876         tp->rx_mode &= ~RX_MODE_ENABLE;
7877         tw32_f(MAC_RX_MODE, tp->rx_mode);
7878         udelay(10);
7879
7880         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7881         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7882         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7883         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7884         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7885         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7886
7887         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7888         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7889         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7890         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7891         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7892         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7893         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7894
7895         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7896         tw32_f(MAC_MODE, tp->mac_mode);
7897         udelay(40);
7898
7899         tp->tx_mode &= ~TX_MODE_ENABLE;
7900         tw32_f(MAC_TX_MODE, tp->tx_mode);
7901
7902         for (i = 0; i < MAX_WAIT_CNT; i++) {
7903                 udelay(100);
7904                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7905                         break;
7906         }
7907         if (i >= MAX_WAIT_CNT) {
7908                 dev_err(&tp->pdev->dev,
7909                         "%s timed out, TX_MODE_ENABLE will not clear "
7910                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7911                 err |= -ENODEV;
7912         }
7913
7914         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7915         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7916         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7917
7918         tw32(FTQ_RESET, 0xffffffff);
7919         tw32(FTQ_RESET, 0x00000000);
7920
7921         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7922         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7923
7924         for (i = 0; i < tp->irq_cnt; i++) {
7925                 struct tg3_napi *tnapi = &tp->napi[i];
7926                 if (tnapi->hw_status)
7927                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7928         }
7929
7930         return err;
7931 }
7932
7933 /* Save PCI command register before chip reset */
7934 static void tg3_save_pci_state(struct tg3 *tp)
7935 {
7936         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7937 }
7938
7939 /* Restore PCI state after chip reset */
7940 static void tg3_restore_pci_state(struct tg3 *tp)
7941 {
7942         u32 val;
7943
7944         /* Re-enable indirect register accesses. */
7945         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7946                                tp->misc_host_ctrl);
7947
7948         /* Set MAX PCI retry to zero. */
7949         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7950         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7951             tg3_flag(tp, PCIX_MODE))
7952                 val |= PCISTATE_RETRY_SAME_DMA;
7953         /* Allow reads and writes to the APE register and memory space. */
7954         if (tg3_flag(tp, ENABLE_APE))
7955                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7956                        PCISTATE_ALLOW_APE_SHMEM_WR |
7957                        PCISTATE_ALLOW_APE_PSPACE_WR;
7958         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7959
7960         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7961
7962         if (!tg3_flag(tp, PCI_EXPRESS)) {
7963                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7964                                       tp->pci_cacheline_sz);
7965                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7966                                       tp->pci_lat_timer);
7967         }
7968
7969         /* Make sure PCI-X relaxed ordering bit is clear. */
7970         if (tg3_flag(tp, PCIX_MODE)) {
7971                 u16 pcix_cmd;
7972
7973                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7974                                      &pcix_cmd);
7975                 pcix_cmd &= ~PCI_X_CMD_ERO;
7976                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7977                                       pcix_cmd);
7978         }
7979
7980         if (tg3_flag(tp, 5780_CLASS)) {
7981
7982                 /* Chip reset on 5780 will reset MSI enable bit,
7983                  * so need to restore it.
7984                  */
7985                 if (tg3_flag(tp, USING_MSI)) {
7986                         u16 ctrl;
7987
7988                         pci_read_config_word(tp->pdev,
7989                                              tp->msi_cap + PCI_MSI_FLAGS,
7990                                              &ctrl);
7991                         pci_write_config_word(tp->pdev,
7992                                               tp->msi_cap + PCI_MSI_FLAGS,
7993                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7994                         val = tr32(MSGINT_MODE);
7995                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7996                 }
7997         }
7998 }
7999
8000 /* tp->lock is held. */
8001 static int tg3_chip_reset(struct tg3 *tp)
8002 {
8003         u32 val;
8004         void (*write_op)(struct tg3 *, u32, u32);
8005         int i, err;
8006
8007         tg3_nvram_lock(tp);
8008
8009         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8010
8011         /* No matching tg3_nvram_unlock() after this because
8012          * chip reset below will undo the nvram lock.
8013          */
8014         tp->nvram_lock_cnt = 0;
8015
8016         /* GRC_MISC_CFG core clock reset will clear the memory
8017          * enable bit in PCI register 4 and the MSI enable bit
8018          * on some chips, so we save relevant registers here.
8019          */
8020         tg3_save_pci_state(tp);
8021
8022         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8023             tg3_flag(tp, 5755_PLUS))
8024                 tw32(GRC_FASTBOOT_PC, 0);
8025
8026         /*
8027          * We must avoid the readl() that normally takes place.
8028          * It locks machines, causes machine checks, and other
8029          * fun things.  So, temporarily disable the 5701
8030          * hardware workaround, while we do the reset.
8031          */
8032         write_op = tp->write32;
8033         if (write_op == tg3_write_flush_reg32)
8034                 tp->write32 = tg3_write32;
8035
8036         /* Prevent the irq handler from reading or writing PCI registers
8037          * during chip reset when the memory enable bit in the PCI command
8038          * register may be cleared.  The chip does not generate interrupt
8039          * at this time, but the irq handler may still be called due to irq
8040          * sharing or irqpoll.
8041          */
8042         tg3_flag_set(tp, CHIP_RESETTING);
8043         for (i = 0; i < tp->irq_cnt; i++) {
8044                 struct tg3_napi *tnapi = &tp->napi[i];
8045                 if (tnapi->hw_status) {
8046                         tnapi->hw_status->status = 0;
8047                         tnapi->hw_status->status_tag = 0;
8048                 }
8049                 tnapi->last_tag = 0;
8050                 tnapi->last_irq_tag = 0;
8051         }
8052         smp_mb();
8053
8054         for (i = 0; i < tp->irq_cnt; i++)
8055                 synchronize_irq(tp->napi[i].irq_vec);
8056
8057         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8058                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8059                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8060         }
8061
8062         /* do the reset */
8063         val = GRC_MISC_CFG_CORECLK_RESET;
8064
8065         if (tg3_flag(tp, PCI_EXPRESS)) {
8066                 /* Force PCIe 1.0a mode */
8067                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8068                     !tg3_flag(tp, 57765_PLUS) &&
8069                     tr32(TG3_PCIE_PHY_TSTCTL) ==
8070                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8071                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8072
8073                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
8074                         tw32(GRC_MISC_CFG, (1 << 29));
8075                         val |= (1 << 29);
8076                 }
8077         }
8078
8079         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8080                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8081                 tw32(GRC_VCPU_EXT_CTRL,
8082                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8083         }
8084
8085         /* Manage gphy power for all CPMU absent PCIe devices. */
8086         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8087                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8088
8089         tw32(GRC_MISC_CFG, val);
8090
8091         /* restore 5701 hardware bug workaround write method */
8092         tp->write32 = write_op;
8093
8094         /* Unfortunately, we have to delay before the PCI read back.
8095          * Some 575X chips even will not respond to a PCI cfg access
8096          * when the reset command is given to the chip.
8097          *
8098          * How do these hardware designers expect things to work
8099          * properly if the PCI write is posted for a long period
8100          * of time?  It is always necessary to have some method by
8101          * which a register read back can occur to push the write
8102          * out which does the reset.
8103          *
8104          * For most tg3 variants the trick below was working.
8105          * Ho hum...
8106          */
8107         udelay(120);
8108
8109         /* Flush PCI posted writes.  The normal MMIO registers
8110          * are inaccessible at this time so this is the only
8111          * way to make this reliably (actually, this is no longer
8112          * the case, see above).  I tried to use indirect
8113          * register read/write but this upset some 5701 variants.
8114          */
8115         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8116
8117         udelay(120);
8118
8119         if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
8120                 u16 val16;
8121
8122                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
8123                         int i;
8124                         u32 cfg_val;
8125
8126                         /* Wait for link training to complete.  */
8127                         for (i = 0; i < 5000; i++)
8128                                 udelay(100);
8129
8130                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8131                         pci_write_config_dword(tp->pdev, 0xc4,
8132                                                cfg_val | (1 << 15));
8133                 }
8134
8135                 /* Clear the "no snoop" and "relaxed ordering" bits. */
8136                 pci_read_config_word(tp->pdev,
8137                                      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
8138                                      &val16);
8139                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
8140                            PCI_EXP_DEVCTL_NOSNOOP_EN);
8141                 /*
8142                  * Older PCIe devices only support the 128 byte
8143                  * MPS setting.  Enforce the restriction.
8144                  */
8145                 if (!tg3_flag(tp, CPMU_PRESENT))
8146                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
8147                 pci_write_config_word(tp->pdev,
8148                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
8149                                       val16);
8150
8151                 /* Clear error status */
8152                 pci_write_config_word(tp->pdev,
8153                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
8154                                       PCI_EXP_DEVSTA_CED |
8155                                       PCI_EXP_DEVSTA_NFED |
8156                                       PCI_EXP_DEVSTA_FED |
8157                                       PCI_EXP_DEVSTA_URD);
8158         }
8159
8160         tg3_restore_pci_state(tp);
8161
8162         tg3_flag_clear(tp, CHIP_RESETTING);
8163         tg3_flag_clear(tp, ERROR_PROCESSED);
8164
8165         val = 0;
8166         if (tg3_flag(tp, 5780_CLASS))
8167                 val = tr32(MEMARB_MODE);
8168         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8169
8170         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
8171                 tg3_stop_fw(tp);
8172                 tw32(0x5000, 0x400);
8173         }
8174
8175         tw32(GRC_MODE, tp->grc_mode);
8176
8177         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
8178                 val = tr32(0xc4);
8179
8180                 tw32(0xc4, val | (1 << 15));
8181         }
8182
8183         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8184             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8185                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8186                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
8187                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8188                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8189         }
8190
8191         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8192                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8193                 val = tp->mac_mode;
8194         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8195                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8196                 val = tp->mac_mode;
8197         } else
8198                 val = 0;
8199
8200         tw32_f(MAC_MODE, val);
8201         udelay(40);
8202
8203         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8204
8205         err = tg3_poll_fw(tp);
8206         if (err)
8207                 return err;
8208
8209         tg3_mdio_start(tp);
8210
8211         if (tg3_flag(tp, PCI_EXPRESS) &&
8212             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8213             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8214             !tg3_flag(tp, 57765_PLUS)) {
8215                 val = tr32(0x7c00);
8216
8217                 tw32(0x7c00, val | (1 << 25));
8218         }
8219
8220         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8221                 val = tr32(TG3_CPMU_CLCK_ORIDE);
8222                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8223         }
8224
8225         /* Reprobe ASF enable state.  */
8226         tg3_flag_clear(tp, ENABLE_ASF);
8227         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8228         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8229         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8230                 u32 nic_cfg;
8231
8232                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8233                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8234                         tg3_flag_set(tp, ENABLE_ASF);
8235                         tp->last_event_jiffies = jiffies;
8236                         if (tg3_flag(tp, 5750_PLUS))
8237                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8238                 }
8239         }
8240
8241         return 0;
8242 }
8243
8244 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8245 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8246
8247 /* tp->lock is held. */
8248 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8249 {
8250         int err;
8251
8252         tg3_stop_fw(tp);
8253
8254         tg3_write_sig_pre_reset(tp, kind);
8255
8256         tg3_abort_hw(tp, silent);
8257         err = tg3_chip_reset(tp);
8258
8259         __tg3_set_mac_addr(tp, 0);
8260
8261         tg3_write_sig_legacy(tp, kind);
8262         tg3_write_sig_post_reset(tp, kind);
8263
8264         if (tp->hw_stats) {
8265                 /* Save the stats across chip resets... */
8266                 tg3_get_nstats(tp, &tp->net_stats_prev);
8267                 tg3_get_estats(tp, &tp->estats_prev);
8268
8269                 /* And make sure the next sample is new data */
8270                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8271         }
8272
8273         if (err)
8274                 return err;
8275
8276         return 0;
8277 }
8278
8279 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8280 {
8281         struct tg3 *tp = netdev_priv(dev);
8282         struct sockaddr *addr = p;
8283         int err = 0, skip_mac_1 = 0;
8284
8285         if (!is_valid_ether_addr(addr->sa_data))
8286                 return -EADDRNOTAVAIL;
8287
8288         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8289
8290         if (!netif_running(dev))
8291                 return 0;
8292
8293         if (tg3_flag(tp, ENABLE_ASF)) {
8294                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8295
8296                 addr0_high = tr32(MAC_ADDR_0_HIGH);
8297                 addr0_low = tr32(MAC_ADDR_0_LOW);
8298                 addr1_high = tr32(MAC_ADDR_1_HIGH);
8299                 addr1_low = tr32(MAC_ADDR_1_LOW);
8300
8301                 /* Skip MAC addr 1 if ASF is using it. */
8302                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8303                     !(addr1_high == 0 && addr1_low == 0))
8304                         skip_mac_1 = 1;
8305         }
8306         spin_lock_bh(&tp->lock);
8307         __tg3_set_mac_addr(tp, skip_mac_1);
8308         spin_unlock_bh(&tp->lock);
8309
8310         return err;
8311 }
8312
8313 /* tp->lock is held. */
8314 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8315                            dma_addr_t mapping, u32 maxlen_flags,
8316                            u32 nic_addr)
8317 {
8318         tg3_write_mem(tp,
8319                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8320                       ((u64) mapping >> 32));
8321         tg3_write_mem(tp,
8322                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8323                       ((u64) mapping & 0xffffffff));
8324         tg3_write_mem(tp,
8325                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8326                        maxlen_flags);
8327
8328         if (!tg3_flag(tp, 5705_PLUS))
8329                 tg3_write_mem(tp,
8330                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8331                               nic_addr);
8332 }
8333
8334 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8335 {
8336         int i;
8337
8338         if (!tg3_flag(tp, ENABLE_TSS)) {
8339                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8340                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8341                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8342         } else {
8343                 tw32(HOSTCC_TXCOL_TICKS, 0);
8344                 tw32(HOSTCC_TXMAX_FRAMES, 0);
8345                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8346         }
8347
8348         if (!tg3_flag(tp, ENABLE_RSS)) {
8349                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8350                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8351                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8352         } else {
8353                 tw32(HOSTCC_RXCOL_TICKS, 0);
8354                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8355                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8356         }
8357
8358         if (!tg3_flag(tp, 5705_PLUS)) {
8359                 u32 val = ec->stats_block_coalesce_usecs;
8360
8361                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8362                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8363
8364                 if (!netif_carrier_ok(tp->dev))
8365                         val = 0;
8366
8367                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8368         }
8369
8370         for (i = 0; i < tp->irq_cnt - 1; i++) {
8371                 u32 reg;
8372
8373                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8374                 tw32(reg, ec->rx_coalesce_usecs);
8375                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8376                 tw32(reg, ec->rx_max_coalesced_frames);
8377                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8378                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8379
8380                 if (tg3_flag(tp, ENABLE_TSS)) {
8381                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8382                         tw32(reg, ec->tx_coalesce_usecs);
8383                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8384                         tw32(reg, ec->tx_max_coalesced_frames);
8385                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8386                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8387                 }
8388         }
8389
8390         for (; i < tp->irq_max - 1; i++) {
8391                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8392                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8393                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8394
8395                 if (tg3_flag(tp, ENABLE_TSS)) {
8396                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8397                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8398                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8399                 }
8400         }
8401 }
8402
8403 /* tp->lock is held. */
8404 static void tg3_rings_reset(struct tg3 *tp)
8405 {
8406         int i;
8407         u32 stblk, txrcb, rxrcb, limit;
8408         struct tg3_napi *tnapi = &tp->napi[0];
8409
8410         /* Disable all transmit rings but the first. */
8411         if (!tg3_flag(tp, 5705_PLUS))
8412                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8413         else if (tg3_flag(tp, 5717_PLUS))
8414                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8415         else if (tg3_flag(tp, 57765_CLASS))
8416                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8417         else
8418                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8419
8420         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8421              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8422                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8423                               BDINFO_FLAGS_DISABLED);
8424
8425
8426         /* Disable all receive return rings but the first. */
8427         if (tg3_flag(tp, 5717_PLUS))
8428                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8429         else if (!tg3_flag(tp, 5705_PLUS))
8430                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8431         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8432                  tg3_flag(tp, 57765_CLASS))
8433                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8434         else
8435                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8436
8437         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8438              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8439                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8440                               BDINFO_FLAGS_DISABLED);
8441
8442         /* Disable interrupts */
8443         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8444         tp->napi[0].chk_msi_cnt = 0;
8445         tp->napi[0].last_rx_cons = 0;
8446         tp->napi[0].last_tx_cons = 0;
8447
8448         /* Zero mailbox registers. */
8449         if (tg3_flag(tp, SUPPORT_MSIX)) {
8450                 for (i = 1; i < tp->irq_max; i++) {
8451                         tp->napi[i].tx_prod = 0;
8452                         tp->napi[i].tx_cons = 0;
8453                         if (tg3_flag(tp, ENABLE_TSS))
8454                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8455                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8456                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8457                         tp->napi[i].chk_msi_cnt = 0;
8458                         tp->napi[i].last_rx_cons = 0;
8459                         tp->napi[i].last_tx_cons = 0;
8460                 }
8461                 if (!tg3_flag(tp, ENABLE_TSS))
8462                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8463         } else {
8464                 tp->napi[0].tx_prod = 0;
8465                 tp->napi[0].tx_cons = 0;
8466                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8467                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8468         }
8469
8470         /* Make sure the NIC-based send BD rings are disabled. */
8471         if (!tg3_flag(tp, 5705_PLUS)) {
8472                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8473                 for (i = 0; i < 16; i++)
8474                         tw32_tx_mbox(mbox + i * 8, 0);
8475         }
8476
8477         txrcb = NIC_SRAM_SEND_RCB;
8478         rxrcb = NIC_SRAM_RCV_RET_RCB;
8479
8480         /* Clear status block in ram. */
8481         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8482
8483         /* Set status block DMA address */
8484         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8485              ((u64) tnapi->status_mapping >> 32));
8486         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8487              ((u64) tnapi->status_mapping & 0xffffffff));
8488
8489         if (tnapi->tx_ring) {
8490                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8491                                (TG3_TX_RING_SIZE <<
8492                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8493                                NIC_SRAM_TX_BUFFER_DESC);
8494                 txrcb += TG3_BDINFO_SIZE;
8495         }
8496
8497         if (tnapi->rx_rcb) {
8498                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8499                                (tp->rx_ret_ring_mask + 1) <<
8500                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8501                 rxrcb += TG3_BDINFO_SIZE;
8502         }
8503
8504         stblk = HOSTCC_STATBLCK_RING1;
8505
8506         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8507                 u64 mapping = (u64)tnapi->status_mapping;
8508                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8509                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8510
8511                 /* Clear status block in ram. */
8512                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8513
8514                 if (tnapi->tx_ring) {
8515                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8516                                        (TG3_TX_RING_SIZE <<
8517                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8518                                        NIC_SRAM_TX_BUFFER_DESC);
8519                         txrcb += TG3_BDINFO_SIZE;
8520                 }
8521
8522                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8523                                ((tp->rx_ret_ring_mask + 1) <<
8524                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8525
8526                 stblk += 8;
8527                 rxrcb += TG3_BDINFO_SIZE;
8528         }
8529 }
8530
8531 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8532 {
8533         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8534
8535         if (!tg3_flag(tp, 5750_PLUS) ||
8536             tg3_flag(tp, 5780_CLASS) ||
8537             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8538             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8539             tg3_flag(tp, 57765_PLUS))
8540                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8541         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8542                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8543                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8544         else
8545                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8546
8547         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8548         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8549
8550         val = min(nic_rep_thresh, host_rep_thresh);
8551         tw32(RCVBDI_STD_THRESH, val);
8552
8553         if (tg3_flag(tp, 57765_PLUS))
8554                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8555
8556         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8557                 return;
8558
8559         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8560
8561         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8562
8563         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8564         tw32(RCVBDI_JUMBO_THRESH, val);
8565
8566         if (tg3_flag(tp, 57765_PLUS))
8567                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8568 }
8569
8570 static inline u32 calc_crc(unsigned char *buf, int len)
8571 {
8572         u32 reg;
8573         u32 tmp;
8574         int j, k;
8575
8576         reg = 0xffffffff;
8577
8578         for (j = 0; j < len; j++) {
8579                 reg ^= buf[j];
8580
8581                 for (k = 0; k < 8; k++) {
8582                         tmp = reg & 0x01;
8583
8584                         reg >>= 1;
8585
8586                         if (tmp)
8587                                 reg ^= 0xedb88320;
8588                 }
8589         }
8590
8591         return ~reg;
8592 }
8593
8594 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8595 {
8596         /* accept or reject all multicast frames */
8597         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8598         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8599         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8600         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8601 }
8602
8603 static void __tg3_set_rx_mode(struct net_device *dev)
8604 {
8605         struct tg3 *tp = netdev_priv(dev);
8606         u32 rx_mode;
8607
8608         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8609                                   RX_MODE_KEEP_VLAN_TAG);
8610
8611 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8612         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8613          * flag clear.
8614          */
8615         if (!tg3_flag(tp, ENABLE_ASF))
8616                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8617 #endif
8618
8619         if (dev->flags & IFF_PROMISC) {
8620                 /* Promiscuous mode. */
8621                 rx_mode |= RX_MODE_PROMISC;
8622         } else if (dev->flags & IFF_ALLMULTI) {
8623                 /* Accept all multicast. */
8624                 tg3_set_multi(tp, 1);
8625         } else if (netdev_mc_empty(dev)) {
8626                 /* Reject all multicast. */
8627                 tg3_set_multi(tp, 0);
8628         } else {
8629                 /* Accept one or more multicast(s). */
8630                 struct netdev_hw_addr *ha;
8631                 u32 mc_filter[4] = { 0, };
8632                 u32 regidx;
8633                 u32 bit;
8634                 u32 crc;
8635
8636                 netdev_for_each_mc_addr(ha, dev) {
8637                         crc = calc_crc(ha->addr, ETH_ALEN);
8638                         bit = ~crc & 0x7f;
8639                         regidx = (bit & 0x60) >> 5;
8640                         bit &= 0x1f;
8641                         mc_filter[regidx] |= (1 << bit);
8642                 }
8643
8644                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8645                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8646                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8647                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8648         }
8649
8650         if (rx_mode != tp->rx_mode) {
8651                 tp->rx_mode = rx_mode;
8652                 tw32_f(MAC_RX_MODE, rx_mode);
8653                 udelay(10);
8654         }
8655 }
8656
8657 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
8658 {
8659         int i;
8660
8661         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8662                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
8663 }
8664
8665 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8666 {
8667         int i;
8668
8669         if (!tg3_flag(tp, SUPPORT_MSIX))
8670                 return;
8671
8672         if (tp->irq_cnt <= 2) {
8673                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8674                 return;
8675         }
8676
8677         /* Validate table against current IRQ count */
8678         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8679                 if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8680                         break;
8681         }
8682
8683         if (i != TG3_RSS_INDIR_TBL_SIZE)
8684                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
8685 }
8686
8687 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8688 {
8689         int i = 0;
8690         u32 reg = MAC_RSS_INDIR_TBL_0;
8691
8692         while (i < TG3_RSS_INDIR_TBL_SIZE) {
8693                 u32 val = tp->rss_ind_tbl[i];
8694                 i++;
8695                 for (; i % 8; i++) {
8696                         val <<= 4;
8697                         val |= tp->rss_ind_tbl[i];
8698                 }
8699                 tw32(reg, val);
8700                 reg += 4;
8701         }
8702 }
8703
8704 /* tp->lock is held. */
8705 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8706 {
8707         u32 val, rdmac_mode;
8708         int i, err, limit;
8709         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8710
8711         tg3_disable_ints(tp);
8712
8713         tg3_stop_fw(tp);
8714
8715         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8716
8717         if (tg3_flag(tp, INIT_COMPLETE))
8718                 tg3_abort_hw(tp, 1);
8719
8720         /* Enable MAC control of LPI */
8721         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8722                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8723                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8724                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
8725
8726                 tw32_f(TG3_CPMU_EEE_CTRL,
8727                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8728
8729                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8730                       TG3_CPMU_EEEMD_LPI_IN_TX |
8731                       TG3_CPMU_EEEMD_LPI_IN_RX |
8732                       TG3_CPMU_EEEMD_EEE_ENABLE;
8733
8734                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8735                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8736
8737                 if (tg3_flag(tp, ENABLE_APE))
8738                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8739
8740                 tw32_f(TG3_CPMU_EEE_MODE, val);
8741
8742                 tw32_f(TG3_CPMU_EEE_DBTMR1,
8743                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8744                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8745
8746                 tw32_f(TG3_CPMU_EEE_DBTMR2,
8747                        TG3_CPMU_DBTMR2_APE_TX_2047US |
8748                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8749         }
8750
8751         if (reset_phy)
8752                 tg3_phy_reset(tp);
8753
8754         err = tg3_chip_reset(tp);
8755         if (err)
8756                 return err;
8757
8758         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8759
8760         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8761                 val = tr32(TG3_CPMU_CTRL);
8762                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8763                 tw32(TG3_CPMU_CTRL, val);
8764
8765                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8766                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8767                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8768                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8769
8770                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8771                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8772                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8773                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8774
8775                 val = tr32(TG3_CPMU_HST_ACC);
8776                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8777                 val |= CPMU_HST_ACC_MACCLK_6_25;
8778                 tw32(TG3_CPMU_HST_ACC, val);
8779         }
8780
8781         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8782                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8783                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8784                        PCIE_PWR_MGMT_L1_THRESH_4MS;
8785                 tw32(PCIE_PWR_MGMT_THRESH, val);
8786
8787                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8788                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8789
8790                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8791
8792                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8793                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8794         }
8795
8796         if (tg3_flag(tp, L1PLLPD_EN)) {
8797                 u32 grc_mode = tr32(GRC_MODE);
8798
8799                 /* Access the lower 1K of PL PCIE block registers. */
8800                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8801                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8802
8803                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8804                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8805                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8806
8807                 tw32(GRC_MODE, grc_mode);
8808         }
8809
8810         if (tg3_flag(tp, 57765_CLASS)) {
8811                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8812                         u32 grc_mode = tr32(GRC_MODE);
8813
8814                         /* Access the lower 1K of PL PCIE block registers. */
8815                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8816                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8817
8818                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8819                                    TG3_PCIE_PL_LO_PHYCTL5);
8820                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8821                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8822
8823                         tw32(GRC_MODE, grc_mode);
8824                 }
8825
8826                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8827                         u32 grc_mode = tr32(GRC_MODE);
8828
8829                         /* Access the lower 1K of DL PCIE block registers. */
8830                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8831                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8832
8833                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8834                                    TG3_PCIE_DL_LO_FTSMAX);
8835                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8836                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8837                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8838
8839                         tw32(GRC_MODE, grc_mode);
8840                 }
8841
8842                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8843                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8844                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8845                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8846         }
8847
8848         /* This works around an issue with Athlon chipsets on
8849          * B3 tigon3 silicon.  This bit has no effect on any
8850          * other revision.  But do not set this on PCI Express
8851          * chips and don't even touch the clocks if the CPMU is present.
8852          */
8853         if (!tg3_flag(tp, CPMU_PRESENT)) {
8854                 if (!tg3_flag(tp, PCI_EXPRESS))
8855                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8856                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8857         }
8858
8859         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8860             tg3_flag(tp, PCIX_MODE)) {
8861                 val = tr32(TG3PCI_PCISTATE);
8862                 val |= PCISTATE_RETRY_SAME_DMA;
8863                 tw32(TG3PCI_PCISTATE, val);
8864         }
8865
8866         if (tg3_flag(tp, ENABLE_APE)) {
8867                 /* Allow reads and writes to the
8868                  * APE register and memory space.
8869                  */
8870                 val = tr32(TG3PCI_PCISTATE);
8871                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8872                        PCISTATE_ALLOW_APE_SHMEM_WR |
8873                        PCISTATE_ALLOW_APE_PSPACE_WR;
8874                 tw32(TG3PCI_PCISTATE, val);
8875         }
8876
8877         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8878                 /* Enable some hw fixes.  */
8879                 val = tr32(TG3PCI_MSI_DATA);
8880                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8881                 tw32(TG3PCI_MSI_DATA, val);
8882         }
8883
8884         /* Descriptor ring init may make accesses to the
8885          * NIC SRAM area to setup the TX descriptors, so we
8886          * can only do this after the hardware has been
8887          * successfully reset.
8888          */
8889         err = tg3_init_rings(tp);
8890         if (err)
8891                 return err;
8892
8893         if (tg3_flag(tp, 57765_PLUS)) {
8894                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8895                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8896                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8897                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8898                 if (!tg3_flag(tp, 57765_CLASS) &&
8899                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8900                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8901                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8902         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8903                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8904                 /* This value is determined during the probe time DMA
8905                  * engine test, tg3_test_dma.
8906                  */
8907                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8908         }
8909
8910         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8911                           GRC_MODE_4X_NIC_SEND_RINGS |
8912                           GRC_MODE_NO_TX_PHDR_CSUM |
8913                           GRC_MODE_NO_RX_PHDR_CSUM);
8914         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8915
8916         /* Pseudo-header checksum is done by hardware logic and not
8917          * the offload processers, so make the chip do the pseudo-
8918          * header checksums on receive.  For transmit it is more
8919          * convenient to do the pseudo-header checksum in software
8920          * as Linux does that on transmit for us in all cases.
8921          */
8922         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8923
8924         tw32(GRC_MODE,
8925              tp->grc_mode |
8926              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8927
8928         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8929         val = tr32(GRC_MISC_CFG);
8930         val &= ~0xff;
8931         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8932         tw32(GRC_MISC_CFG, val);
8933
8934         /* Initialize MBUF/DESC pool. */
8935         if (tg3_flag(tp, 5750_PLUS)) {
8936                 /* Do nothing.  */
8937         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8938                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8939                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8940                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8941                 else
8942                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8943                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8944                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8945         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8946                 int fw_len;
8947
8948                 fw_len = tp->fw_len;
8949                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8950                 tw32(BUFMGR_MB_POOL_ADDR,
8951                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8952                 tw32(BUFMGR_MB_POOL_SIZE,
8953                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8954         }
8955
8956         if (tp->dev->mtu <= ETH_DATA_LEN) {
8957                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8958                      tp->bufmgr_config.mbuf_read_dma_low_water);
8959                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8960                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8961                 tw32(BUFMGR_MB_HIGH_WATER,
8962                      tp->bufmgr_config.mbuf_high_water);
8963         } else {
8964                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8965                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8966                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8967                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8968                 tw32(BUFMGR_MB_HIGH_WATER,
8969                      tp->bufmgr_config.mbuf_high_water_jumbo);
8970         }
8971         tw32(BUFMGR_DMA_LOW_WATER,
8972              tp->bufmgr_config.dma_low_water);
8973         tw32(BUFMGR_DMA_HIGH_WATER,
8974              tp->bufmgr_config.dma_high_water);
8975
8976         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8977         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8978                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8979         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8980             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8981             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8982                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8983         tw32(BUFMGR_MODE, val);
8984         for (i = 0; i < 2000; i++) {
8985                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8986                         break;
8987                 udelay(10);
8988         }
8989         if (i >= 2000) {
8990                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8991                 return -ENODEV;
8992         }
8993
8994         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8995                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8996
8997         tg3_setup_rxbd_thresholds(tp);
8998
8999         /* Initialize TG3_BDINFO's at:
9000          *  RCVDBDI_STD_BD:     standard eth size rx ring
9001          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
9002          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
9003          *
9004          * like so:
9005          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
9006          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
9007          *                              ring attribute flags
9008          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
9009          *
9010          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9011          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9012          *
9013          * The size of each ring is fixed in the firmware, but the location is
9014          * configurable.
9015          */
9016         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9017              ((u64) tpr->rx_std_mapping >> 32));
9018         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9019              ((u64) tpr->rx_std_mapping & 0xffffffff));
9020         if (!tg3_flag(tp, 5717_PLUS))
9021                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9022                      NIC_SRAM_RX_BUFFER_DESC);
9023
9024         /* Disable the mini ring */
9025         if (!tg3_flag(tp, 5705_PLUS))
9026                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9027                      BDINFO_FLAGS_DISABLED);
9028
9029         /* Program the jumbo buffer descriptor ring control
9030          * blocks on those devices that have them.
9031          */
9032         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9033             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9034
9035                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9036                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9037                              ((u64) tpr->rx_jmb_mapping >> 32));
9038                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9039                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9040                         val = TG3_RX_JMB_RING_SIZE(tp) <<
9041                               BDINFO_FLAGS_MAXLEN_SHIFT;
9042                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9043                              val | BDINFO_FLAGS_USE_EXT_RECV);
9044                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9045                             tg3_flag(tp, 57765_CLASS))
9046                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9047                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9048                 } else {
9049                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9050                              BDINFO_FLAGS_DISABLED);
9051                 }
9052
9053                 if (tg3_flag(tp, 57765_PLUS)) {
9054                         val = TG3_RX_STD_RING_SIZE(tp);
9055                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9056                         val |= (TG3_RX_STD_DMA_SZ << 2);
9057                 } else
9058                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9059         } else
9060                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9061
9062         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9063
9064         tpr->rx_std_prod_idx = tp->rx_pending;
9065         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9066
9067         tpr->rx_jmb_prod_idx =
9068                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9069         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9070
9071         tg3_rings_reset(tp);
9072
9073         /* Initialize MAC address and backoff seed. */
9074         __tg3_set_mac_addr(tp, 0);
9075
9076         /* MTU + ethernet header + FCS + optional VLAN tag */
9077         tw32(MAC_RX_MTU_SIZE,
9078              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9079
9080         /* The slot time is changed by tg3_setup_phy if we
9081          * run at gigabit with half duplex.
9082          */
9083         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9084               (6 << TX_LENGTHS_IPG_SHIFT) |
9085               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9086
9087         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9088                 val |= tr32(MAC_TX_LENGTHS) &
9089                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
9090                         TX_LENGTHS_CNT_DWN_VAL_MSK);
9091
9092         tw32(MAC_TX_LENGTHS, val);
9093
9094         /* Receive rules. */
9095         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9096         tw32(RCVLPC_CONFIG, 0x0181);
9097
9098         /* Calculate RDMAC_MODE setting early, we need it to determine
9099          * the RCVLPC_STATE_ENABLE mask.
9100          */
9101         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9102                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9103                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9104                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9105                       RDMAC_MODE_LNGREAD_ENAB);
9106
9107         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
9108                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9109
9110         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9111             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9112             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9113                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9114                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9115                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9116
9117         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9118             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9119                 if (tg3_flag(tp, TSO_CAPABLE) &&
9120                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
9121                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9122                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9123                            !tg3_flag(tp, IS_5788)) {
9124                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9125                 }
9126         }
9127
9128         if (tg3_flag(tp, PCI_EXPRESS))
9129                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9130
9131         if (tg3_flag(tp, HW_TSO_1) ||
9132             tg3_flag(tp, HW_TSO_2) ||
9133             tg3_flag(tp, HW_TSO_3))
9134                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9135
9136         if (tg3_flag(tp, 57765_PLUS) ||
9137             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9138             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9139                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9140
9141         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9142                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9143
9144         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9145             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9146             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9147             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
9148             tg3_flag(tp, 57765_PLUS)) {
9149                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
9150                 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
9151                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9152                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9153                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9154                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9155                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9156                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9157                 }
9158                 tw32(TG3_RDMA_RSRVCTRL_REG,
9159                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9160         }
9161
9162         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9163             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9164                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9165                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
9166                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9167                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9168         }
9169
9170         /* Receive/send statistics. */
9171         if (tg3_flag(tp, 5750_PLUS)) {
9172                 val = tr32(RCVLPC_STATS_ENABLE);
9173                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9174                 tw32(RCVLPC_STATS_ENABLE, val);
9175         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9176                    tg3_flag(tp, TSO_CAPABLE)) {
9177                 val = tr32(RCVLPC_STATS_ENABLE);
9178                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9179                 tw32(RCVLPC_STATS_ENABLE, val);
9180         } else {
9181                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9182         }
9183         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9184         tw32(SNDDATAI_STATSENAB, 0xffffff);
9185         tw32(SNDDATAI_STATSCTRL,
9186              (SNDDATAI_SCTRL_ENABLE |
9187               SNDDATAI_SCTRL_FASTUPD));
9188
9189         /* Setup host coalescing engine. */
9190         tw32(HOSTCC_MODE, 0);
9191         for (i = 0; i < 2000; i++) {
9192                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9193                         break;
9194                 udelay(10);
9195         }
9196
9197         __tg3_set_coalesce(tp, &tp->coal);
9198
9199         if (!tg3_flag(tp, 5705_PLUS)) {
9200                 /* Status/statistics block address.  See tg3_timer,
9201                  * the tg3_periodic_fetch_stats call there, and
9202                  * tg3_get_stats to see how this works for 5705/5750 chips.
9203                  */
9204                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9205                      ((u64) tp->stats_mapping >> 32));
9206                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9207                      ((u64) tp->stats_mapping & 0xffffffff));
9208                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9209
9210                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9211
9212                 /* Clear statistics and status block memory areas */
9213                 for (i = NIC_SRAM_STATS_BLK;
9214                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9215                      i += sizeof(u32)) {
9216                         tg3_write_mem(tp, i, 0);
9217                         udelay(40);
9218                 }
9219         }
9220
9221         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9222
9223         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9224         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9225         if (!tg3_flag(tp, 5705_PLUS))
9226                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9227
9228         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9229                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9230                 /* reset to prevent losing 1st rx packet intermittently */
9231                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9232                 udelay(10);
9233         }
9234
9235         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9236                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9237                         MAC_MODE_FHDE_ENABLE;
9238         if (tg3_flag(tp, ENABLE_APE))
9239                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9240         if (!tg3_flag(tp, 5705_PLUS) &&
9241             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9242             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9243                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9244         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9245         udelay(40);
9246
9247         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9248          * If TG3_FLAG_IS_NIC is zero, we should read the
9249          * register to preserve the GPIO settings for LOMs. The GPIOs,
9250          * whether used as inputs or outputs, are set by boot code after
9251          * reset.
9252          */
9253         if (!tg3_flag(tp, IS_NIC)) {
9254                 u32 gpio_mask;
9255
9256                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9257                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9258                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9259
9260                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9261                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9262                                      GRC_LCLCTRL_GPIO_OUTPUT3;
9263
9264                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9265                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9266
9267                 tp->grc_local_ctrl &= ~gpio_mask;
9268                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9269
9270                 /* GPIO1 must be driven high for eeprom write protect */
9271                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9272                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9273                                                GRC_LCLCTRL_GPIO_OUTPUT1);
9274         }
9275         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9276         udelay(100);
9277
9278         if (tg3_flag(tp, USING_MSIX)) {
9279                 val = tr32(MSGINT_MODE);
9280                 val |= MSGINT_MODE_ENABLE;
9281                 if (tp->irq_cnt > 1)
9282                         val |= MSGINT_MODE_MULTIVEC_EN;
9283                 if (!tg3_flag(tp, 1SHOT_MSI))
9284                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9285                 tw32(MSGINT_MODE, val);
9286         }
9287
9288         if (!tg3_flag(tp, 5705_PLUS)) {
9289                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9290                 udelay(40);
9291         }
9292
9293         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9294                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9295                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9296                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9297                WDMAC_MODE_LNGREAD_ENAB);
9298
9299         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9300             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9301                 if (tg3_flag(tp, TSO_CAPABLE) &&
9302                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9303                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9304                         /* nothing */
9305                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9306                            !tg3_flag(tp, IS_5788)) {
9307                         val |= WDMAC_MODE_RX_ACCEL;
9308                 }
9309         }
9310
9311         /* Enable host coalescing bug fix */
9312         if (tg3_flag(tp, 5755_PLUS))
9313                 val |= WDMAC_MODE_STATUS_TAG_FIX;
9314
9315         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9316                 val |= WDMAC_MODE_BURST_ALL_DATA;
9317
9318         tw32_f(WDMAC_MODE, val);
9319         udelay(40);
9320
9321         if (tg3_flag(tp, PCIX_MODE)) {
9322                 u16 pcix_cmd;
9323
9324                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9325                                      &pcix_cmd);
9326                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9327                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9328                         pcix_cmd |= PCI_X_CMD_READ_2K;
9329                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9330                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9331                         pcix_cmd |= PCI_X_CMD_READ_2K;
9332                 }
9333                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9334                                       pcix_cmd);
9335         }
9336
9337         tw32_f(RDMAC_MODE, rdmac_mode);
9338         udelay(40);
9339
9340         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
9341                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9342                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9343                                 break;
9344                 }
9345                 if (i < TG3_NUM_RDMA_CHANNELS) {
9346                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9347                         val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9348                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9349                         tg3_flag_set(tp, 5719_RDMA_BUG);
9350                 }
9351         }
9352
9353         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9354         if (!tg3_flag(tp, 5705_PLUS))
9355                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9356
9357         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9358                 tw32(SNDDATAC_MODE,
9359                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9360         else
9361                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9362
9363         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9364         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9365         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9366         if (tg3_flag(tp, LRG_PROD_RING_CAP))
9367                 val |= RCVDBDI_MODE_LRG_RING_SZ;
9368         tw32(RCVDBDI_MODE, val);
9369         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9370         if (tg3_flag(tp, HW_TSO_1) ||
9371             tg3_flag(tp, HW_TSO_2) ||
9372             tg3_flag(tp, HW_TSO_3))
9373                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9374         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9375         if (tg3_flag(tp, ENABLE_TSS))
9376                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9377         tw32(SNDBDI_MODE, val);
9378         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9379
9380         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9381                 err = tg3_load_5701_a0_firmware_fix(tp);
9382                 if (err)
9383                         return err;
9384         }
9385
9386         if (tg3_flag(tp, TSO_CAPABLE)) {
9387                 err = tg3_load_tso_firmware(tp);
9388                 if (err)
9389                         return err;
9390         }
9391
9392         tp->tx_mode = TX_MODE_ENABLE;
9393
9394         if (tg3_flag(tp, 5755_PLUS) ||
9395             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9396                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9397
9398         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9399                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9400                 tp->tx_mode &= ~val;
9401                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9402         }
9403
9404         tw32_f(MAC_TX_MODE, tp->tx_mode);
9405         udelay(100);
9406
9407         if (tg3_flag(tp, ENABLE_RSS)) {
9408                 tg3_rss_write_indir_tbl(tp);
9409
9410                 /* Setup the "secret" hash key. */
9411                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9412                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9413                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9414                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9415                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9416                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9417                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9418                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9419                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9420                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9421         }
9422
9423         tp->rx_mode = RX_MODE_ENABLE;
9424         if (tg3_flag(tp, 5755_PLUS))
9425                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9426
9427         if (tg3_flag(tp, ENABLE_RSS))
9428                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9429                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
9430                                RX_MODE_RSS_IPV6_HASH_EN |
9431                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
9432                                RX_MODE_RSS_IPV4_HASH_EN |
9433                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
9434
9435         tw32_f(MAC_RX_MODE, tp->rx_mode);
9436         udelay(10);
9437
9438         tw32(MAC_LED_CTRL, tp->led_ctrl);
9439
9440         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9441         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9442                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9443                 udelay(10);
9444         }
9445         tw32_f(MAC_RX_MODE, tp->rx_mode);
9446         udelay(10);
9447
9448         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9449                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9450                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9451                         /* Set drive transmission level to 1.2V  */
9452                         /* only if the signal pre-emphasis bit is not set  */
9453                         val = tr32(MAC_SERDES_CFG);
9454                         val &= 0xfffff000;
9455                         val |= 0x880;
9456                         tw32(MAC_SERDES_CFG, val);
9457                 }
9458                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9459                         tw32(MAC_SERDES_CFG, 0x616000);
9460         }
9461
9462         /* Prevent chip from dropping frames when flow control
9463          * is enabled.
9464          */
9465         if (tg3_flag(tp, 57765_CLASS))
9466                 val = 1;
9467         else
9468                 val = 2;
9469         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9470
9471         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9472             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9473                 /* Use hardware link auto-negotiation */
9474                 tg3_flag_set(tp, HW_AUTONEG);
9475         }
9476
9477         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9478             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9479                 u32 tmp;
9480
9481                 tmp = tr32(SERDES_RX_CTRL);
9482                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9483                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9484                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9485                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9486         }
9487
9488         if (!tg3_flag(tp, USE_PHYLIB)) {
9489                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9490                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9491
9492                 err = tg3_setup_phy(tp, 0);
9493                 if (err)
9494                         return err;
9495
9496                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9497                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9498                         u32 tmp;
9499
9500                         /* Clear CRC stats. */
9501                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9502                                 tg3_writephy(tp, MII_TG3_TEST1,
9503                                              tmp | MII_TG3_TEST1_CRC_EN);
9504                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9505                         }
9506                 }
9507         }
9508
9509         __tg3_set_rx_mode(tp->dev);
9510
9511         /* Initialize receive rules. */
9512         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9513         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9514         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9515         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9516
9517         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9518                 limit = 8;
9519         else
9520                 limit = 16;
9521         if (tg3_flag(tp, ENABLE_ASF))
9522                 limit -= 4;
9523         switch (limit) {
9524         case 16:
9525                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9526         case 15:
9527                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9528         case 14:
9529                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9530         case 13:
9531                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9532         case 12:
9533                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9534         case 11:
9535                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9536         case 10:
9537                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9538         case 9:
9539                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9540         case 8:
9541                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9542         case 7:
9543                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9544         case 6:
9545                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9546         case 5:
9547                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9548         case 4:
9549                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9550         case 3:
9551                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9552         case 2:
9553         case 1:
9554
9555         default:
9556                 break;
9557         }
9558
9559         if (tg3_flag(tp, ENABLE_APE))
9560                 /* Write our heartbeat update interval to APE. */
9561                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9562                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9563
9564         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9565
9566         return 0;
9567 }
9568
9569 /* Called at device open time to get the chip ready for
9570  * packet processing.  Invoked with tp->lock held.
9571  */
9572 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9573 {
9574         tg3_switch_clocks(tp);
9575
9576         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9577
9578         return tg3_reset_hw(tp, reset_phy);
9579 }
9580
9581 #if IS_ENABLED(CONFIG_HWMON)
9582 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
9583 {
9584         int i;
9585
9586         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
9587                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
9588
9589                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
9590                 off += len;
9591
9592                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
9593                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
9594                         memset(ocir, 0, TG3_OCIR_LEN);
9595         }
9596 }
9597
9598 /* sysfs attributes for hwmon */
9599 static ssize_t tg3_show_temp(struct device *dev,
9600                              struct device_attribute *devattr, char *buf)
9601 {
9602         struct pci_dev *pdev = to_pci_dev(dev);
9603         struct net_device *netdev = pci_get_drvdata(pdev);
9604         struct tg3 *tp = netdev_priv(netdev);
9605         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
9606         u32 temperature;
9607
9608         spin_lock_bh(&tp->lock);
9609         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
9610                                 sizeof(temperature));
9611         spin_unlock_bh(&tp->lock);
9612         return sprintf(buf, "%u\n", temperature);
9613 }
9614
9615
9616 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
9617                           TG3_TEMP_SENSOR_OFFSET);
9618 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
9619                           TG3_TEMP_CAUTION_OFFSET);
9620 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
9621                           TG3_TEMP_MAX_OFFSET);
9622
9623 static struct attribute *tg3_attributes[] = {
9624         &sensor_dev_attr_temp1_input.dev_attr.attr,
9625         &sensor_dev_attr_temp1_crit.dev_attr.attr,
9626         &sensor_dev_attr_temp1_max.dev_attr.attr,
9627         NULL
9628 };
9629
9630 static const struct attribute_group tg3_group = {
9631         .attrs = tg3_attributes,
9632 };
9633
9634 #endif
9635
9636 static void tg3_hwmon_close(struct tg3 *tp)
9637 {
9638 #if IS_ENABLED(CONFIG_HWMON)
9639         if (tp->hwmon_dev) {
9640                 hwmon_device_unregister(tp->hwmon_dev);
9641                 tp->hwmon_dev = NULL;
9642                 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
9643         }
9644 #endif
9645 }
9646
9647 static void tg3_hwmon_open(struct tg3 *tp)
9648 {
9649 #if IS_ENABLED(CONFIG_HWMON)
9650         int i, err;
9651         u32 size = 0;
9652         struct pci_dev *pdev = tp->pdev;
9653         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
9654
9655         tg3_sd_scan_scratchpad(tp, ocirs);
9656
9657         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
9658                 if (!ocirs[i].src_data_length)
9659                         continue;
9660
9661                 size += ocirs[i].src_hdr_length;
9662                 size += ocirs[i].src_data_length;
9663         }
9664
9665         if (!size)
9666                 return;
9667
9668         /* Register hwmon sysfs hooks */
9669         err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
9670         if (err) {
9671                 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
9672                 return;
9673         }
9674
9675         tp->hwmon_dev = hwmon_device_register(&pdev->dev);
9676         if (IS_ERR(tp->hwmon_dev)) {
9677                 tp->hwmon_dev = NULL;
9678                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
9679                 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
9680         }
9681 #endif
9682 }
9683
9684
9685 #define TG3_STAT_ADD32(PSTAT, REG) \
9686 do {    u32 __val = tr32(REG); \
9687         (PSTAT)->low += __val; \
9688         if ((PSTAT)->low < __val) \
9689                 (PSTAT)->high += 1; \
9690 } while (0)
9691
9692 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9693 {
9694         struct tg3_hw_stats *sp = tp->hw_stats;
9695
9696         if (!netif_carrier_ok(tp->dev))
9697                 return;
9698
9699         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9700         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9701         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9702         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9703         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9704         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9705         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9706         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9707         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9708         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9709         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9710         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9711         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9712         if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
9713                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
9714                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
9715                 u32 val;
9716
9717                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9718                 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
9719                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9720                 tg3_flag_clear(tp, 5719_RDMA_BUG);
9721         }
9722
9723         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9724         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9725         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9726         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9727         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9728         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9729         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9730         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9731         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9732         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9733         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9734         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9735         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9736         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9737
9738         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9739         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9740             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9741             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9742                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9743         } else {
9744                 u32 val = tr32(HOSTCC_FLOW_ATTN);
9745                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9746                 if (val) {
9747                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9748                         sp->rx_discards.low += val;
9749                         if (sp->rx_discards.low < val)
9750                                 sp->rx_discards.high += 1;
9751                 }
9752                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9753         }
9754         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9755 }
9756
9757 static void tg3_chk_missed_msi(struct tg3 *tp)
9758 {
9759         u32 i;
9760
9761         for (i = 0; i < tp->irq_cnt; i++) {
9762                 struct tg3_napi *tnapi = &tp->napi[i];
9763
9764                 if (tg3_has_work(tnapi)) {
9765                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9766                             tnapi->last_tx_cons == tnapi->tx_cons) {
9767                                 if (tnapi->chk_msi_cnt < 1) {
9768                                         tnapi->chk_msi_cnt++;
9769                                         return;
9770                                 }
9771                                 tg3_msi(0, tnapi);
9772                         }
9773                 }
9774                 tnapi->chk_msi_cnt = 0;
9775                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9776                 tnapi->last_tx_cons = tnapi->tx_cons;
9777         }
9778 }
9779
9780 static void tg3_timer(unsigned long __opaque)
9781 {
9782         struct tg3 *tp = (struct tg3 *) __opaque;
9783
9784         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9785                 goto restart_timer;
9786
9787         spin_lock(&tp->lock);
9788
9789         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9790             tg3_flag(tp, 57765_CLASS))
9791                 tg3_chk_missed_msi(tp);
9792
9793         if (!tg3_flag(tp, TAGGED_STATUS)) {
9794                 /* All of this garbage is because when using non-tagged
9795                  * IRQ status the mailbox/status_block protocol the chip
9796                  * uses with the cpu is race prone.
9797                  */
9798                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9799                         tw32(GRC_LOCAL_CTRL,
9800                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9801                 } else {
9802                         tw32(HOSTCC_MODE, tp->coalesce_mode |
9803                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9804                 }
9805
9806                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9807                         spin_unlock(&tp->lock);
9808                         tg3_reset_task_schedule(tp);
9809                         goto restart_timer;
9810                 }
9811         }
9812
9813         /* This part only runs once per second. */
9814         if (!--tp->timer_counter) {
9815                 if (tg3_flag(tp, 5705_PLUS))
9816                         tg3_periodic_fetch_stats(tp);
9817
9818                 if (tp->setlpicnt && !--tp->setlpicnt)
9819                         tg3_phy_eee_enable(tp);
9820
9821                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9822                         u32 mac_stat;
9823                         int phy_event;
9824
9825                         mac_stat = tr32(MAC_STATUS);
9826
9827                         phy_event = 0;
9828                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9829                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9830                                         phy_event = 1;
9831                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9832                                 phy_event = 1;
9833
9834                         if (phy_event)
9835                                 tg3_setup_phy(tp, 0);
9836                 } else if (tg3_flag(tp, POLL_SERDES)) {
9837                         u32 mac_stat = tr32(MAC_STATUS);
9838                         int need_setup = 0;
9839
9840                         if (netif_carrier_ok(tp->dev) &&
9841                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9842                                 need_setup = 1;
9843                         }
9844                         if (!netif_carrier_ok(tp->dev) &&
9845                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
9846                                          MAC_STATUS_SIGNAL_DET))) {
9847                                 need_setup = 1;
9848                         }
9849                         if (need_setup) {
9850                                 if (!tp->serdes_counter) {
9851                                         tw32_f(MAC_MODE,
9852                                              (tp->mac_mode &
9853                                               ~MAC_MODE_PORT_MODE_MASK));
9854                                         udelay(40);
9855                                         tw32_f(MAC_MODE, tp->mac_mode);
9856                                         udelay(40);
9857                                 }
9858                                 tg3_setup_phy(tp, 0);
9859                         }
9860                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9861                            tg3_flag(tp, 5780_CLASS)) {
9862                         tg3_serdes_parallel_detect(tp);
9863                 }
9864
9865                 tp->timer_counter = tp->timer_multiplier;
9866         }
9867
9868         /* Heartbeat is only sent once every 2 seconds.
9869          *
9870          * The heartbeat is to tell the ASF firmware that the host
9871          * driver is still alive.  In the event that the OS crashes,
9872          * ASF needs to reset the hardware to free up the FIFO space
9873          * that may be filled with rx packets destined for the host.
9874          * If the FIFO is full, ASF will no longer function properly.
9875          *
9876          * Unintended resets have been reported on real time kernels
9877          * where the timer doesn't run on time.  Netpoll will also have
9878          * same problem.
9879          *
9880          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9881          * to check the ring condition when the heartbeat is expiring
9882          * before doing the reset.  This will prevent most unintended
9883          * resets.
9884          */
9885         if (!--tp->asf_counter) {
9886                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9887                         tg3_wait_for_event_ack(tp);
9888
9889                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9890                                       FWCMD_NICDRV_ALIVE3);
9891                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9892                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9893                                       TG3_FW_UPDATE_TIMEOUT_SEC);
9894
9895                         tg3_generate_fw_event(tp);
9896                 }
9897                 tp->asf_counter = tp->asf_multiplier;
9898         }
9899
9900         spin_unlock(&tp->lock);
9901
9902 restart_timer:
9903         tp->timer.expires = jiffies + tp->timer_offset;
9904         add_timer(&tp->timer);
9905 }
9906
9907 static void __devinit tg3_timer_init(struct tg3 *tp)
9908 {
9909         if (tg3_flag(tp, TAGGED_STATUS) &&
9910             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9911             !tg3_flag(tp, 57765_CLASS))
9912                 tp->timer_offset = HZ;
9913         else
9914                 tp->timer_offset = HZ / 10;
9915
9916         BUG_ON(tp->timer_offset > HZ);
9917
9918         tp->timer_multiplier = (HZ / tp->timer_offset);
9919         tp->asf_multiplier = (HZ / tp->timer_offset) *
9920                              TG3_FW_UPDATE_FREQ_SEC;
9921
9922         init_timer(&tp->timer);
9923         tp->timer.data = (unsigned long) tp;
9924         tp->timer.function = tg3_timer;
9925 }
9926
9927 static void tg3_timer_start(struct tg3 *tp)
9928 {
9929         tp->asf_counter   = tp->asf_multiplier;
9930         tp->timer_counter = tp->timer_multiplier;
9931
9932         tp->timer.expires = jiffies + tp->timer_offset;
9933         add_timer(&tp->timer);
9934 }
9935
9936 static void tg3_timer_stop(struct tg3 *tp)
9937 {
9938         del_timer_sync(&tp->timer);
9939 }
9940
9941 /* Restart hardware after configuration changes, self-test, etc.
9942  * Invoked with tp->lock held.
9943  */
9944 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9945         __releases(tp->lock)
9946         __acquires(tp->lock)
9947 {
9948         int err;
9949
9950         err = tg3_init_hw(tp, reset_phy);
9951         if (err) {
9952                 netdev_err(tp->dev,
9953                            "Failed to re-initialize device, aborting\n");
9954                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9955                 tg3_full_unlock(tp);
9956                 tg3_timer_stop(tp);
9957                 tp->irq_sync = 0;
9958                 tg3_napi_enable(tp);
9959                 dev_close(tp->dev);
9960                 tg3_full_lock(tp, 0);
9961         }
9962         return err;
9963 }
9964
9965 static void tg3_reset_task(struct work_struct *work)
9966 {
9967         struct tg3 *tp = container_of(work, struct tg3, reset_task);
9968         int err;
9969
9970         tg3_full_lock(tp, 0);
9971
9972         if (!netif_running(tp->dev)) {
9973                 tg3_flag_clear(tp, RESET_TASK_PENDING);
9974                 tg3_full_unlock(tp);
9975                 return;
9976         }
9977
9978         tg3_full_unlock(tp);
9979
9980         tg3_phy_stop(tp);
9981
9982         tg3_netif_stop(tp);
9983
9984         tg3_full_lock(tp, 1);
9985
9986         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
9987                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9988                 tp->write32_rx_mbox = tg3_write_flush_reg32;
9989                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
9990                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
9991         }
9992
9993         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
9994         err = tg3_init_hw(tp, 1);
9995         if (err)
9996                 goto out;
9997
9998         tg3_netif_start(tp);
9999
10000 out:
10001         tg3_full_unlock(tp);
10002
10003         if (!err)
10004                 tg3_phy_start(tp);
10005
10006         tg3_flag_clear(tp, RESET_TASK_PENDING);
10007 }
10008
10009 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10010 {
10011         irq_handler_t fn;
10012         unsigned long flags;
10013         char *name;
10014         struct tg3_napi *tnapi = &tp->napi[irq_num];
10015
10016         if (tp->irq_cnt == 1)
10017                 name = tp->dev->name;
10018         else {
10019                 name = &tnapi->irq_lbl[0];
10020                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10021                 name[IFNAMSIZ-1] = 0;
10022         }
10023
10024         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10025                 fn = tg3_msi;
10026                 if (tg3_flag(tp, 1SHOT_MSI))
10027                         fn = tg3_msi_1shot;
10028                 flags = 0;
10029         } else {
10030                 fn = tg3_interrupt;
10031                 if (tg3_flag(tp, TAGGED_STATUS))
10032                         fn = tg3_interrupt_tagged;
10033                 flags = IRQF_SHARED;
10034         }
10035
10036         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10037 }
10038
10039 static int tg3_test_interrupt(struct tg3 *tp)
10040 {
10041         struct tg3_napi *tnapi = &tp->napi[0];
10042         struct net_device *dev = tp->dev;
10043         int err, i, intr_ok = 0;
10044         u32 val;
10045
10046         if (!netif_running(dev))
10047                 return -ENODEV;
10048
10049         tg3_disable_ints(tp);
10050
10051         free_irq(tnapi->irq_vec, tnapi);
10052
10053         /*
10054          * Turn off MSI one shot mode.  Otherwise this test has no
10055          * observable way to know whether the interrupt was delivered.
10056          */
10057         if (tg3_flag(tp, 57765_PLUS)) {
10058                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10059                 tw32(MSGINT_MODE, val);
10060         }
10061
10062         err = request_irq(tnapi->irq_vec, tg3_test_isr,
10063                           IRQF_SHARED, dev->name, tnapi);
10064         if (err)
10065                 return err;
10066
10067         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10068         tg3_enable_ints(tp);
10069
10070         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10071                tnapi->coal_now);
10072
10073         for (i = 0; i < 5; i++) {
10074                 u32 int_mbox, misc_host_ctrl;
10075
10076                 int_mbox = tr32_mailbox(tnapi->int_mbox);
10077                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10078
10079                 if ((int_mbox != 0) ||
10080                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10081                         intr_ok = 1;
10082                         break;
10083                 }
10084
10085                 if (tg3_flag(tp, 57765_PLUS) &&
10086                     tnapi->hw_status->status_tag != tnapi->last_tag)
10087                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10088
10089                 msleep(10);
10090         }
10091
10092         tg3_disable_ints(tp);
10093
10094         free_irq(tnapi->irq_vec, tnapi);
10095
10096         err = tg3_request_irq(tp, 0);
10097
10098         if (err)
10099                 return err;
10100
10101         if (intr_ok) {
10102                 /* Reenable MSI one shot mode. */
10103                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10104                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10105                         tw32(MSGINT_MODE, val);
10106                 }
10107                 return 0;
10108         }
10109
10110         return -EIO;
10111 }
10112
10113 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10114  * successfully restored
10115  */
10116 static int tg3_test_msi(struct tg3 *tp)
10117 {
10118         int err;
10119         u16 pci_cmd;
10120
10121         if (!tg3_flag(tp, USING_MSI))
10122                 return 0;
10123
10124         /* Turn off SERR reporting in case MSI terminates with Master
10125          * Abort.
10126          */
10127         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10128         pci_write_config_word(tp->pdev, PCI_COMMAND,
10129                               pci_cmd & ~PCI_COMMAND_SERR);
10130
10131         err = tg3_test_interrupt(tp);
10132
10133         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10134
10135         if (!err)
10136                 return 0;
10137
10138         /* other failures */
10139         if (err != -EIO)
10140                 return err;
10141
10142         /* MSI test failed, go back to INTx mode */
10143         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10144                     "to INTx mode. Please report this failure to the PCI "
10145                     "maintainer and include system chipset information\n");
10146
10147         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10148
10149         pci_disable_msi(tp->pdev);
10150
10151         tg3_flag_clear(tp, USING_MSI);
10152         tp->napi[0].irq_vec = tp->pdev->irq;
10153
10154         err = tg3_request_irq(tp, 0);
10155         if (err)
10156                 return err;
10157
10158         /* Need to reset the chip because the MSI cycle may have terminated
10159          * with Master Abort.
10160          */
10161         tg3_full_lock(tp, 1);
10162
10163         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10164         err = tg3_init_hw(tp, 1);
10165
10166         tg3_full_unlock(tp);
10167
10168         if (err)
10169                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10170
10171         return err;
10172 }
10173
10174 static int tg3_request_firmware(struct tg3 *tp)
10175 {
10176         const __be32 *fw_data;
10177
10178         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10179                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10180                            tp->fw_needed);
10181                 return -ENOENT;
10182         }
10183
10184         fw_data = (void *)tp->fw->data;
10185
10186         /* Firmware blob starts with version numbers, followed by
10187          * start address and _full_ length including BSS sections
10188          * (which must be longer than the actual data, of course
10189          */
10190
10191         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
10192         if (tp->fw_len < (tp->fw->size - 12)) {
10193                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10194                            tp->fw_len, tp->fw_needed);
10195                 release_firmware(tp->fw);
10196                 tp->fw = NULL;
10197                 return -EINVAL;
10198         }
10199
10200         /* We no longer need firmware; we have it. */
10201         tp->fw_needed = NULL;
10202         return 0;
10203 }
10204
10205 static u32 tg3_irq_count(struct tg3 *tp)
10206 {
10207         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10208
10209         if (irq_cnt > 1) {
10210                 /* We want as many rx rings enabled as there are cpus.
10211                  * In multiqueue MSI-X mode, the first MSI-X vector
10212                  * only deals with link interrupts, etc, so we add
10213                  * one to the number of vectors we are requesting.
10214                  */
10215                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
10216         }
10217
10218         return irq_cnt;
10219 }
10220
10221 static bool tg3_enable_msix(struct tg3 *tp)
10222 {
10223         int i, rc;
10224         struct msix_entry msix_ent[tp->irq_max];
10225
10226         tp->rxq_cnt = netif_get_num_default_rss_queues();
10227         if (tp->rxq_cnt > tp->rxq_max)
10228                 tp->rxq_cnt = tp->rxq_max;
10229         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
10230             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
10231                 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
10232
10233         tp->irq_cnt = tg3_irq_count(tp);
10234
10235         for (i = 0; i < tp->irq_max; i++) {
10236                 msix_ent[i].entry  = i;
10237                 msix_ent[i].vector = 0;
10238         }
10239
10240         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10241         if (rc < 0) {
10242                 return false;
10243         } else if (rc != 0) {
10244                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10245                         return false;
10246                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10247                               tp->irq_cnt, rc);
10248                 tp->irq_cnt = rc;
10249                 tp->rxq_cnt = max(rc - 1, 1);
10250                 if (tp->txq_cnt)
10251                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
10252         }
10253
10254         for (i = 0; i < tp->irq_max; i++)
10255                 tp->napi[i].irq_vec = msix_ent[i].vector;
10256
10257         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
10258                 pci_disable_msix(tp->pdev);
10259                 return false;
10260         }
10261
10262         if (tp->irq_cnt == 1)
10263                 return true;
10264
10265         tg3_flag_set(tp, ENABLE_RSS);
10266
10267         if (tp->txq_cnt > 1)
10268                 tg3_flag_set(tp, ENABLE_TSS);
10269
10270         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
10271
10272         return true;
10273 }
10274
10275 static void tg3_ints_init(struct tg3 *tp)
10276 {
10277         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10278             !tg3_flag(tp, TAGGED_STATUS)) {
10279                 /* All MSI supporting chips should support tagged
10280                  * status.  Assert that this is the case.
10281                  */
10282                 netdev_warn(tp->dev,
10283                             "MSI without TAGGED_STATUS? Not using MSI\n");
10284                 goto defcfg;
10285         }
10286
10287         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10288                 tg3_flag_set(tp, USING_MSIX);
10289         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10290                 tg3_flag_set(tp, USING_MSI);
10291
10292         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10293                 u32 msi_mode = tr32(MSGINT_MODE);
10294                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10295                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10296                 if (!tg3_flag(tp, 1SHOT_MSI))
10297                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10298                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10299         }
10300 defcfg:
10301         if (!tg3_flag(tp, USING_MSIX)) {
10302                 tp->irq_cnt = 1;
10303                 tp->napi[0].irq_vec = tp->pdev->irq;
10304         }
10305
10306         if (tp->irq_cnt == 1) {
10307                 tp->txq_cnt = 1;
10308                 tp->rxq_cnt = 1;
10309                 netif_set_real_num_tx_queues(tp->dev, 1);
10310                 netif_set_real_num_rx_queues(tp->dev, 1);
10311         }
10312 }
10313
10314 static void tg3_ints_fini(struct tg3 *tp)
10315 {
10316         if (tg3_flag(tp, USING_MSIX))
10317                 pci_disable_msix(tp->pdev);
10318         else if (tg3_flag(tp, USING_MSI))
10319                 pci_disable_msi(tp->pdev);
10320         tg3_flag_clear(tp, USING_MSI);
10321         tg3_flag_clear(tp, USING_MSIX);
10322         tg3_flag_clear(tp, ENABLE_RSS);
10323         tg3_flag_clear(tp, ENABLE_TSS);
10324 }
10325
10326 static int tg3_open(struct net_device *dev)
10327 {
10328         struct tg3 *tp = netdev_priv(dev);
10329         int i, err;
10330
10331         if (tp->fw_needed) {
10332                 err = tg3_request_firmware(tp);
10333                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
10334                         if (err)
10335                                 return err;
10336                 } else if (err) {
10337                         netdev_warn(tp->dev, "TSO capability disabled\n");
10338                         tg3_flag_clear(tp, TSO_CAPABLE);
10339                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10340                         netdev_notice(tp->dev, "TSO capability restored\n");
10341                         tg3_flag_set(tp, TSO_CAPABLE);
10342                 }
10343         }
10344
10345         netif_carrier_off(tp->dev);
10346
10347         err = tg3_power_up(tp);
10348         if (err)
10349                 return err;
10350
10351         tg3_full_lock(tp, 0);
10352
10353         tg3_disable_ints(tp);
10354         tg3_flag_clear(tp, INIT_COMPLETE);
10355
10356         tg3_full_unlock(tp);
10357
10358         /*
10359          * Setup interrupts first so we know how
10360          * many NAPI resources to allocate
10361          */
10362         tg3_ints_init(tp);
10363
10364         tg3_rss_check_indir_tbl(tp);
10365
10366         /* The placement of this call is tied
10367          * to the setup and use of Host TX descriptors.
10368          */
10369         err = tg3_alloc_consistent(tp);
10370         if (err)
10371                 goto err_out1;
10372
10373         tg3_napi_init(tp);
10374
10375         tg3_napi_enable(tp);
10376
10377         for (i = 0; i < tp->irq_cnt; i++) {
10378                 struct tg3_napi *tnapi = &tp->napi[i];
10379                 err = tg3_request_irq(tp, i);
10380                 if (err) {
10381                         for (i--; i >= 0; i--) {
10382                                 tnapi = &tp->napi[i];
10383                                 free_irq(tnapi->irq_vec, tnapi);
10384                         }
10385                         goto err_out2;
10386                 }
10387         }
10388
10389         tg3_full_lock(tp, 0);
10390
10391         err = tg3_init_hw(tp, 1);
10392         if (err) {
10393                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10394                 tg3_free_rings(tp);
10395         }
10396
10397         tg3_full_unlock(tp);
10398
10399         if (err)
10400                 goto err_out3;
10401
10402         if (tg3_flag(tp, USING_MSI)) {
10403                 err = tg3_test_msi(tp);
10404
10405                 if (err) {
10406                         tg3_full_lock(tp, 0);
10407                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10408                         tg3_free_rings(tp);
10409                         tg3_full_unlock(tp);
10410
10411                         goto err_out2;
10412                 }
10413
10414                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10415                         u32 val = tr32(PCIE_TRANSACTION_CFG);
10416
10417                         tw32(PCIE_TRANSACTION_CFG,
10418                              val | PCIE_TRANS_CFG_1SHOT_MSI);
10419                 }
10420         }
10421
10422         tg3_phy_start(tp);
10423
10424         tg3_hwmon_open(tp);
10425
10426         tg3_full_lock(tp, 0);
10427
10428         tg3_timer_start(tp);
10429         tg3_flag_set(tp, INIT_COMPLETE);
10430         tg3_enable_ints(tp);
10431
10432         tg3_full_unlock(tp);
10433
10434         netif_tx_start_all_queues(dev);
10435
10436         /*
10437          * Reset loopback feature if it was turned on while the device was down
10438          * make sure that it's installed properly now.
10439          */
10440         if (dev->features & NETIF_F_LOOPBACK)
10441                 tg3_set_loopback(dev, dev->features);
10442
10443         return 0;
10444
10445 err_out3:
10446         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10447                 struct tg3_napi *tnapi = &tp->napi[i];
10448                 free_irq(tnapi->irq_vec, tnapi);
10449         }
10450
10451 err_out2:
10452         tg3_napi_disable(tp);
10453         tg3_napi_fini(tp);
10454         tg3_free_consistent(tp);
10455
10456 err_out1:
10457         tg3_ints_fini(tp);
10458         tg3_frob_aux_power(tp, false);
10459         pci_set_power_state(tp->pdev, PCI_D3hot);
10460         return err;
10461 }
10462
10463 static int tg3_close(struct net_device *dev)
10464 {
10465         int i;
10466         struct tg3 *tp = netdev_priv(dev);
10467
10468         tg3_napi_disable(tp);
10469         tg3_reset_task_cancel(tp);
10470
10471         netif_tx_stop_all_queues(dev);
10472
10473         tg3_timer_stop(tp);
10474
10475         tg3_hwmon_close(tp);
10476
10477         tg3_phy_stop(tp);
10478
10479         tg3_full_lock(tp, 1);
10480
10481         tg3_disable_ints(tp);
10482
10483         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10484         tg3_free_rings(tp);
10485         tg3_flag_clear(tp, INIT_COMPLETE);
10486
10487         tg3_full_unlock(tp);
10488
10489         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10490                 struct tg3_napi *tnapi = &tp->napi[i];
10491                 free_irq(tnapi->irq_vec, tnapi);
10492         }
10493
10494         tg3_ints_fini(tp);
10495
10496         /* Clear stats across close / open calls */
10497         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10498         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10499
10500         tg3_napi_fini(tp);
10501
10502         tg3_free_consistent(tp);
10503
10504         tg3_power_down(tp);
10505
10506         netif_carrier_off(tp->dev);
10507
10508         return 0;
10509 }
10510
10511 static inline u64 get_stat64(tg3_stat64_t *val)
10512 {
10513        return ((u64)val->high << 32) | ((u64)val->low);
10514 }
10515
10516 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10517 {
10518         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10519
10520         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10521             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10522              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10523                 u32 val;
10524
10525                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10526                         tg3_writephy(tp, MII_TG3_TEST1,
10527                                      val | MII_TG3_TEST1_CRC_EN);
10528                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10529                 } else
10530                         val = 0;
10531
10532                 tp->phy_crc_errors += val;
10533
10534                 return tp->phy_crc_errors;
10535         }
10536
10537         return get_stat64(&hw_stats->rx_fcs_errors);
10538 }
10539
10540 #define ESTAT_ADD(member) \
10541         estats->member =        old_estats->member + \
10542                                 get_stat64(&hw_stats->member)
10543
10544 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10545 {
10546         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10547         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10548
10549         ESTAT_ADD(rx_octets);
10550         ESTAT_ADD(rx_fragments);
10551         ESTAT_ADD(rx_ucast_packets);
10552         ESTAT_ADD(rx_mcast_packets);
10553         ESTAT_ADD(rx_bcast_packets);
10554         ESTAT_ADD(rx_fcs_errors);
10555         ESTAT_ADD(rx_align_errors);
10556         ESTAT_ADD(rx_xon_pause_rcvd);
10557         ESTAT_ADD(rx_xoff_pause_rcvd);
10558         ESTAT_ADD(rx_mac_ctrl_rcvd);
10559         ESTAT_ADD(rx_xoff_entered);
10560         ESTAT_ADD(rx_frame_too_long_errors);
10561         ESTAT_ADD(rx_jabbers);
10562         ESTAT_ADD(rx_undersize_packets);
10563         ESTAT_ADD(rx_in_length_errors);
10564         ESTAT_ADD(rx_out_length_errors);
10565         ESTAT_ADD(rx_64_or_less_octet_packets);
10566         ESTAT_ADD(rx_65_to_127_octet_packets);
10567         ESTAT_ADD(rx_128_to_255_octet_packets);
10568         ESTAT_ADD(rx_256_to_511_octet_packets);
10569         ESTAT_ADD(rx_512_to_1023_octet_packets);
10570         ESTAT_ADD(rx_1024_to_1522_octet_packets);
10571         ESTAT_ADD(rx_1523_to_2047_octet_packets);
10572         ESTAT_ADD(rx_2048_to_4095_octet_packets);
10573         ESTAT_ADD(rx_4096_to_8191_octet_packets);
10574         ESTAT_ADD(rx_8192_to_9022_octet_packets);
10575
10576         ESTAT_ADD(tx_octets);
10577         ESTAT_ADD(tx_collisions);
10578         ESTAT_ADD(tx_xon_sent);
10579         ESTAT_ADD(tx_xoff_sent);
10580         ESTAT_ADD(tx_flow_control);
10581         ESTAT_ADD(tx_mac_errors);
10582         ESTAT_ADD(tx_single_collisions);
10583         ESTAT_ADD(tx_mult_collisions);
10584         ESTAT_ADD(tx_deferred);
10585         ESTAT_ADD(tx_excessive_collisions);
10586         ESTAT_ADD(tx_late_collisions);
10587         ESTAT_ADD(tx_collide_2times);
10588         ESTAT_ADD(tx_collide_3times);
10589         ESTAT_ADD(tx_collide_4times);
10590         ESTAT_ADD(tx_collide_5times);
10591         ESTAT_ADD(tx_collide_6times);
10592         ESTAT_ADD(tx_collide_7times);
10593         ESTAT_ADD(tx_collide_8times);
10594         ESTAT_ADD(tx_collide_9times);
10595         ESTAT_ADD(tx_collide_10times);
10596         ESTAT_ADD(tx_collide_11times);
10597         ESTAT_ADD(tx_collide_12times);
10598         ESTAT_ADD(tx_collide_13times);
10599         ESTAT_ADD(tx_collide_14times);
10600         ESTAT_ADD(tx_collide_15times);
10601         ESTAT_ADD(tx_ucast_packets);
10602         ESTAT_ADD(tx_mcast_packets);
10603         ESTAT_ADD(tx_bcast_packets);
10604         ESTAT_ADD(tx_carrier_sense_errors);
10605         ESTAT_ADD(tx_discards);
10606         ESTAT_ADD(tx_errors);
10607
10608         ESTAT_ADD(dma_writeq_full);
10609         ESTAT_ADD(dma_write_prioq_full);
10610         ESTAT_ADD(rxbds_empty);
10611         ESTAT_ADD(rx_discards);
10612         ESTAT_ADD(rx_errors);
10613         ESTAT_ADD(rx_threshold_hit);
10614
10615         ESTAT_ADD(dma_readq_full);
10616         ESTAT_ADD(dma_read_prioq_full);
10617         ESTAT_ADD(tx_comp_queue_full);
10618
10619         ESTAT_ADD(ring_set_send_prod_index);
10620         ESTAT_ADD(ring_status_update);
10621         ESTAT_ADD(nic_irqs);
10622         ESTAT_ADD(nic_avoided_irqs);
10623         ESTAT_ADD(nic_tx_threshold_hit);
10624
10625         ESTAT_ADD(mbuf_lwm_thresh_hit);
10626 }
10627
10628 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
10629 {
10630         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10631         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10632
10633         stats->rx_packets = old_stats->rx_packets +
10634                 get_stat64(&hw_stats->rx_ucast_packets) +
10635                 get_stat64(&hw_stats->rx_mcast_packets) +
10636                 get_stat64(&hw_stats->rx_bcast_packets);
10637
10638         stats->tx_packets = old_stats->tx_packets +
10639                 get_stat64(&hw_stats->tx_ucast_packets) +
10640                 get_stat64(&hw_stats->tx_mcast_packets) +
10641                 get_stat64(&hw_stats->tx_bcast_packets);
10642
10643         stats->rx_bytes = old_stats->rx_bytes +
10644                 get_stat64(&hw_stats->rx_octets);
10645         stats->tx_bytes = old_stats->tx_bytes +
10646                 get_stat64(&hw_stats->tx_octets);
10647
10648         stats->rx_errors = old_stats->rx_errors +
10649                 get_stat64(&hw_stats->rx_errors);
10650         stats->tx_errors = old_stats->tx_errors +
10651                 get_stat64(&hw_stats->tx_errors) +
10652                 get_stat64(&hw_stats->tx_mac_errors) +
10653                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10654                 get_stat64(&hw_stats->tx_discards);
10655
10656         stats->multicast = old_stats->multicast +
10657                 get_stat64(&hw_stats->rx_mcast_packets);
10658         stats->collisions = old_stats->collisions +
10659                 get_stat64(&hw_stats->tx_collisions);
10660
10661         stats->rx_length_errors = old_stats->rx_length_errors +
10662                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10663                 get_stat64(&hw_stats->rx_undersize_packets);
10664
10665         stats->rx_over_errors = old_stats->rx_over_errors +
10666                 get_stat64(&hw_stats->rxbds_empty);
10667         stats->rx_frame_errors = old_stats->rx_frame_errors +
10668                 get_stat64(&hw_stats->rx_align_errors);
10669         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10670                 get_stat64(&hw_stats->tx_discards);
10671         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10672                 get_stat64(&hw_stats->tx_carrier_sense_errors);
10673
10674         stats->rx_crc_errors = old_stats->rx_crc_errors +
10675                 tg3_calc_crc_errors(tp);
10676
10677         stats->rx_missed_errors = old_stats->rx_missed_errors +
10678                 get_stat64(&hw_stats->rx_discards);
10679
10680         stats->rx_dropped = tp->rx_dropped;
10681         stats->tx_dropped = tp->tx_dropped;
10682 }
10683
10684 static int tg3_get_regs_len(struct net_device *dev)
10685 {
10686         return TG3_REG_BLK_SIZE;
10687 }
10688
10689 static void tg3_get_regs(struct net_device *dev,
10690                 struct ethtool_regs *regs, void *_p)
10691 {
10692         struct tg3 *tp = netdev_priv(dev);
10693
10694         regs->version = 0;
10695
10696         memset(_p, 0, TG3_REG_BLK_SIZE);
10697
10698         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10699                 return;
10700
10701         tg3_full_lock(tp, 0);
10702
10703         tg3_dump_legacy_regs(tp, (u32 *)_p);
10704
10705         tg3_full_unlock(tp);
10706 }
10707
10708 static int tg3_get_eeprom_len(struct net_device *dev)
10709 {
10710         struct tg3 *tp = netdev_priv(dev);
10711
10712         return tp->nvram_size;
10713 }
10714
10715 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10716 {
10717         struct tg3 *tp = netdev_priv(dev);
10718         int ret;
10719         u8  *pd;
10720         u32 i, offset, len, b_offset, b_count;
10721         __be32 val;
10722
10723         if (tg3_flag(tp, NO_NVRAM))
10724                 return -EINVAL;
10725
10726         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10727                 return -EAGAIN;
10728
10729         offset = eeprom->offset;
10730         len = eeprom->len;
10731         eeprom->len = 0;
10732
10733         eeprom->magic = TG3_EEPROM_MAGIC;
10734
10735         if (offset & 3) {
10736                 /* adjustments to start on required 4 byte boundary */
10737                 b_offset = offset & 3;
10738                 b_count = 4 - b_offset;
10739                 if (b_count > len) {
10740                         /* i.e. offset=1 len=2 */
10741                         b_count = len;
10742                 }
10743                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10744                 if (ret)
10745                         return ret;
10746                 memcpy(data, ((char *)&val) + b_offset, b_count);
10747                 len -= b_count;
10748                 offset += b_count;
10749                 eeprom->len += b_count;
10750         }
10751
10752         /* read bytes up to the last 4 byte boundary */
10753         pd = &data[eeprom->len];
10754         for (i = 0; i < (len - (len & 3)); i += 4) {
10755                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10756                 if (ret) {
10757                         eeprom->len += i;
10758                         return ret;
10759                 }
10760                 memcpy(pd + i, &val, 4);
10761         }
10762         eeprom->len += i;
10763
10764         if (len & 3) {
10765                 /* read last bytes not ending on 4 byte boundary */
10766                 pd = &data[eeprom->len];
10767                 b_count = len & 3;
10768                 b_offset = offset + len - b_count;
10769                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10770                 if (ret)
10771                         return ret;
10772                 memcpy(pd, &val, b_count);
10773                 eeprom->len += b_count;
10774         }
10775         return 0;
10776 }
10777
10778 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10779 {
10780         struct tg3 *tp = netdev_priv(dev);
10781         int ret;
10782         u32 offset, len, b_offset, odd_len;
10783         u8 *buf;
10784         __be32 start, end;
10785
10786         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10787                 return -EAGAIN;
10788
10789         if (tg3_flag(tp, NO_NVRAM) ||
10790             eeprom->magic != TG3_EEPROM_MAGIC)
10791                 return -EINVAL;
10792
10793         offset = eeprom->offset;
10794         len = eeprom->len;
10795
10796         if ((b_offset = (offset & 3))) {
10797                 /* adjustments to start on required 4 byte boundary */
10798                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10799                 if (ret)
10800                         return ret;
10801                 len += b_offset;
10802                 offset &= ~3;
10803                 if (len < 4)
10804                         len = 4;
10805         }
10806
10807         odd_len = 0;
10808         if (len & 3) {
10809                 /* adjustments to end on required 4 byte boundary */
10810                 odd_len = 1;
10811                 len = (len + 3) & ~3;
10812                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10813                 if (ret)
10814                         return ret;
10815         }
10816
10817         buf = data;
10818         if (b_offset || odd_len) {
10819                 buf = kmalloc(len, GFP_KERNEL);
10820                 if (!buf)
10821                         return -ENOMEM;
10822                 if (b_offset)
10823                         memcpy(buf, &start, 4);
10824                 if (odd_len)
10825                         memcpy(buf+len-4, &end, 4);
10826                 memcpy(buf + b_offset, data, eeprom->len);
10827         }
10828
10829         ret = tg3_nvram_write_block(tp, offset, len, buf);
10830
10831         if (buf != data)
10832                 kfree(buf);
10833
10834         return ret;
10835 }
10836
10837 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10838 {
10839         struct tg3 *tp = netdev_priv(dev);
10840
10841         if (tg3_flag(tp, USE_PHYLIB)) {
10842                 struct phy_device *phydev;
10843                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10844                         return -EAGAIN;
10845                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10846                 return phy_ethtool_gset(phydev, cmd);
10847         }
10848
10849         cmd->supported = (SUPPORTED_Autoneg);
10850
10851         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10852                 cmd->supported |= (SUPPORTED_1000baseT_Half |
10853                                    SUPPORTED_1000baseT_Full);
10854
10855         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10856                 cmd->supported |= (SUPPORTED_100baseT_Half |
10857                                   SUPPORTED_100baseT_Full |
10858                                   SUPPORTED_10baseT_Half |
10859                                   SUPPORTED_10baseT_Full |
10860                                   SUPPORTED_TP);
10861                 cmd->port = PORT_TP;
10862         } else {
10863                 cmd->supported |= SUPPORTED_FIBRE;
10864                 cmd->port = PORT_FIBRE;
10865         }
10866
10867         cmd->advertising = tp->link_config.advertising;
10868         if (tg3_flag(tp, PAUSE_AUTONEG)) {
10869                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10870                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10871                                 cmd->advertising |= ADVERTISED_Pause;
10872                         } else {
10873                                 cmd->advertising |= ADVERTISED_Pause |
10874                                                     ADVERTISED_Asym_Pause;
10875                         }
10876                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10877                         cmd->advertising |= ADVERTISED_Asym_Pause;
10878                 }
10879         }
10880         if (netif_running(dev) && netif_carrier_ok(dev)) {
10881                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10882                 cmd->duplex = tp->link_config.active_duplex;
10883                 cmd->lp_advertising = tp->link_config.rmt_adv;
10884                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10885                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10886                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10887                         else
10888                                 cmd->eth_tp_mdix = ETH_TP_MDI;
10889                 }
10890         } else {
10891                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
10892                 cmd->duplex = DUPLEX_UNKNOWN;
10893                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10894         }
10895         cmd->phy_address = tp->phy_addr;
10896         cmd->transceiver = XCVR_INTERNAL;
10897         cmd->autoneg = tp->link_config.autoneg;
10898         cmd->maxtxpkt = 0;
10899         cmd->maxrxpkt = 0;
10900         return 0;
10901 }
10902
10903 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10904 {
10905         struct tg3 *tp = netdev_priv(dev);
10906         u32 speed = ethtool_cmd_speed(cmd);
10907
10908         if (tg3_flag(tp, USE_PHYLIB)) {
10909                 struct phy_device *phydev;
10910                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10911                         return -EAGAIN;
10912                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10913                 return phy_ethtool_sset(phydev, cmd);
10914         }
10915
10916         if (cmd->autoneg != AUTONEG_ENABLE &&
10917             cmd->autoneg != AUTONEG_DISABLE)
10918                 return -EINVAL;
10919
10920         if (cmd->autoneg == AUTONEG_DISABLE &&
10921             cmd->duplex != DUPLEX_FULL &&
10922             cmd->duplex != DUPLEX_HALF)
10923                 return -EINVAL;
10924
10925         if (cmd->autoneg == AUTONEG_ENABLE) {
10926                 u32 mask = ADVERTISED_Autoneg |
10927                            ADVERTISED_Pause |
10928                            ADVERTISED_Asym_Pause;
10929
10930                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10931                         mask |= ADVERTISED_1000baseT_Half |
10932                                 ADVERTISED_1000baseT_Full;
10933
10934                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10935                         mask |= ADVERTISED_100baseT_Half |
10936                                 ADVERTISED_100baseT_Full |
10937                                 ADVERTISED_10baseT_Half |
10938                                 ADVERTISED_10baseT_Full |
10939                                 ADVERTISED_TP;
10940                 else
10941                         mask |= ADVERTISED_FIBRE;
10942
10943                 if (cmd->advertising & ~mask)
10944                         return -EINVAL;
10945
10946                 mask &= (ADVERTISED_1000baseT_Half |
10947                          ADVERTISED_1000baseT_Full |
10948                          ADVERTISED_100baseT_Half |
10949                          ADVERTISED_100baseT_Full |
10950                          ADVERTISED_10baseT_Half |
10951                          ADVERTISED_10baseT_Full);
10952
10953                 cmd->advertising &= mask;
10954         } else {
10955                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10956                         if (speed != SPEED_1000)
10957                                 return -EINVAL;
10958
10959                         if (cmd->duplex != DUPLEX_FULL)
10960                                 return -EINVAL;
10961                 } else {
10962                         if (speed != SPEED_100 &&
10963                             speed != SPEED_10)
10964                                 return -EINVAL;
10965                 }
10966         }
10967
10968         tg3_full_lock(tp, 0);
10969
10970         tp->link_config.autoneg = cmd->autoneg;
10971         if (cmd->autoneg == AUTONEG_ENABLE) {
10972                 tp->link_config.advertising = (cmd->advertising |
10973                                               ADVERTISED_Autoneg);
10974                 tp->link_config.speed = SPEED_UNKNOWN;
10975                 tp->link_config.duplex = DUPLEX_UNKNOWN;
10976         } else {
10977                 tp->link_config.advertising = 0;
10978                 tp->link_config.speed = speed;
10979                 tp->link_config.duplex = cmd->duplex;
10980         }
10981
10982         if (netif_running(dev))
10983                 tg3_setup_phy(tp, 1);
10984
10985         tg3_full_unlock(tp);
10986
10987         return 0;
10988 }
10989
10990 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10991 {
10992         struct tg3 *tp = netdev_priv(dev);
10993
10994         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
10995         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
10996         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
10997         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
10998 }
10999
11000 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11001 {
11002         struct tg3 *tp = netdev_priv(dev);
11003
11004         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11005                 wol->supported = WAKE_MAGIC;
11006         else
11007                 wol->supported = 0;
11008         wol->wolopts = 0;
11009         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11010                 wol->wolopts = WAKE_MAGIC;
11011         memset(&wol->sopass, 0, sizeof(wol->sopass));
11012 }
11013
11014 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11015 {
11016         struct tg3 *tp = netdev_priv(dev);
11017         struct device *dp = &tp->pdev->dev;
11018
11019         if (wol->wolopts & ~WAKE_MAGIC)
11020                 return -EINVAL;
11021         if ((wol->wolopts & WAKE_MAGIC) &&
11022             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11023                 return -EINVAL;
11024
11025         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11026
11027         spin_lock_bh(&tp->lock);
11028         if (device_may_wakeup(dp))
11029                 tg3_flag_set(tp, WOL_ENABLE);
11030         else
11031                 tg3_flag_clear(tp, WOL_ENABLE);
11032         spin_unlock_bh(&tp->lock);
11033
11034         return 0;
11035 }
11036
11037 static u32 tg3_get_msglevel(struct net_device *dev)
11038 {
11039         struct tg3 *tp = netdev_priv(dev);
11040         return tp->msg_enable;
11041 }
11042
11043 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11044 {
11045         struct tg3 *tp = netdev_priv(dev);
11046         tp->msg_enable = value;
11047 }
11048
11049 static int tg3_nway_reset(struct net_device *dev)
11050 {
11051         struct tg3 *tp = netdev_priv(dev);
11052         int r;
11053
11054         if (!netif_running(dev))
11055                 return -EAGAIN;
11056
11057         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11058                 return -EINVAL;
11059
11060         if (tg3_flag(tp, USE_PHYLIB)) {
11061                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11062                         return -EAGAIN;
11063                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11064         } else {
11065                 u32 bmcr;
11066
11067                 spin_lock_bh(&tp->lock);
11068                 r = -EINVAL;
11069                 tg3_readphy(tp, MII_BMCR, &bmcr);
11070                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11071                     ((bmcr & BMCR_ANENABLE) ||
11072                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11073                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11074                                                    BMCR_ANENABLE);
11075                         r = 0;
11076                 }
11077                 spin_unlock_bh(&tp->lock);
11078         }
11079
11080         return r;
11081 }
11082
11083 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11084 {
11085         struct tg3 *tp = netdev_priv(dev);
11086
11087         ering->rx_max_pending = tp->rx_std_ring_mask;
11088         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11089                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11090         else
11091                 ering->rx_jumbo_max_pending = 0;
11092
11093         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11094
11095         ering->rx_pending = tp->rx_pending;
11096         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11097                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11098         else
11099                 ering->rx_jumbo_pending = 0;
11100
11101         ering->tx_pending = tp->napi[0].tx_pending;
11102 }
11103
11104 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11105 {
11106         struct tg3 *tp = netdev_priv(dev);
11107         int i, irq_sync = 0, err = 0;
11108
11109         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11110             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11111             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11112             (ering->tx_pending <= MAX_SKB_FRAGS) ||
11113             (tg3_flag(tp, TSO_BUG) &&
11114              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11115                 return -EINVAL;
11116
11117         if (netif_running(dev)) {
11118                 tg3_phy_stop(tp);
11119                 tg3_netif_stop(tp);
11120                 irq_sync = 1;
11121         }
11122
11123         tg3_full_lock(tp, irq_sync);
11124
11125         tp->rx_pending = ering->rx_pending;
11126
11127         if (tg3_flag(tp, MAX_RXPEND_64) &&
11128             tp->rx_pending > 63)
11129                 tp->rx_pending = 63;
11130         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11131
11132         for (i = 0; i < tp->irq_max; i++)
11133                 tp->napi[i].tx_pending = ering->tx_pending;
11134
11135         if (netif_running(dev)) {
11136                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11137                 err = tg3_restart_hw(tp, 1);
11138                 if (!err)
11139                         tg3_netif_start(tp);
11140         }
11141
11142         tg3_full_unlock(tp);
11143
11144         if (irq_sync && !err)
11145                 tg3_phy_start(tp);
11146
11147         return err;
11148 }
11149
11150 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11151 {
11152         struct tg3 *tp = netdev_priv(dev);
11153
11154         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11155
11156         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11157                 epause->rx_pause = 1;
11158         else
11159                 epause->rx_pause = 0;
11160
11161         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11162                 epause->tx_pause = 1;
11163         else
11164                 epause->tx_pause = 0;
11165 }
11166
11167 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11168 {
11169         struct tg3 *tp = netdev_priv(dev);
11170         int err = 0;
11171
11172         if (tg3_flag(tp, USE_PHYLIB)) {
11173                 u32 newadv;
11174                 struct phy_device *phydev;
11175
11176                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11177
11178                 if (!(phydev->supported & SUPPORTED_Pause) ||
11179                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11180                      (epause->rx_pause != epause->tx_pause)))
11181                         return -EINVAL;
11182
11183                 tp->link_config.flowctrl = 0;
11184                 if (epause->rx_pause) {
11185                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11186
11187                         if (epause->tx_pause) {
11188                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11189                                 newadv = ADVERTISED_Pause;
11190                         } else
11191                                 newadv = ADVERTISED_Pause |
11192                                          ADVERTISED_Asym_Pause;
11193                 } else if (epause->tx_pause) {
11194                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11195                         newadv = ADVERTISED_Asym_Pause;
11196                 } else
11197                         newadv = 0;
11198
11199                 if (epause->autoneg)
11200                         tg3_flag_set(tp, PAUSE_AUTONEG);
11201                 else
11202                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11203
11204                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11205                         u32 oldadv = phydev->advertising &
11206                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11207                         if (oldadv != newadv) {
11208                                 phydev->advertising &=
11209                                         ~(ADVERTISED_Pause |
11210                                           ADVERTISED_Asym_Pause);
11211                                 phydev->advertising |= newadv;
11212                                 if (phydev->autoneg) {
11213                                         /*
11214                                          * Always renegotiate the link to
11215                                          * inform our link partner of our
11216                                          * flow control settings, even if the
11217                                          * flow control is forced.  Let
11218                                          * tg3_adjust_link() do the final
11219                                          * flow control setup.
11220                                          */
11221                                         return phy_start_aneg(phydev);
11222                                 }
11223                         }
11224
11225                         if (!epause->autoneg)
11226                                 tg3_setup_flow_control(tp, 0, 0);
11227                 } else {
11228                         tp->link_config.advertising &=
11229                                         ~(ADVERTISED_Pause |
11230                                           ADVERTISED_Asym_Pause);
11231                         tp->link_config.advertising |= newadv;
11232                 }
11233         } else {
11234                 int irq_sync = 0;
11235
11236                 if (netif_running(dev)) {
11237                         tg3_netif_stop(tp);
11238                         irq_sync = 1;
11239                 }
11240
11241                 tg3_full_lock(tp, irq_sync);
11242
11243                 if (epause->autoneg)
11244                         tg3_flag_set(tp, PAUSE_AUTONEG);
11245                 else
11246                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11247                 if (epause->rx_pause)
11248                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11249                 else
11250                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11251                 if (epause->tx_pause)
11252                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11253                 else
11254                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11255
11256                 if (netif_running(dev)) {
11257                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11258                         err = tg3_restart_hw(tp, 1);
11259                         if (!err)
11260                                 tg3_netif_start(tp);
11261                 }
11262
11263                 tg3_full_unlock(tp);
11264         }
11265
11266         return err;
11267 }
11268
11269 static int tg3_get_sset_count(struct net_device *dev, int sset)
11270 {
11271         switch (sset) {
11272         case ETH_SS_TEST:
11273                 return TG3_NUM_TEST;
11274         case ETH_SS_STATS:
11275                 return TG3_NUM_STATS;
11276         default:
11277                 return -EOPNOTSUPP;
11278         }
11279 }
11280
11281 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11282                          u32 *rules __always_unused)
11283 {
11284         struct tg3 *tp = netdev_priv(dev);
11285
11286         if (!tg3_flag(tp, SUPPORT_MSIX))
11287                 return -EOPNOTSUPP;
11288
11289         switch (info->cmd) {
11290         case ETHTOOL_GRXRINGS:
11291                 if (netif_running(tp->dev))
11292                         info->data = tp->rxq_cnt;
11293                 else {
11294                         info->data = num_online_cpus();
11295                         if (info->data > TG3_RSS_MAX_NUM_QS)
11296                                 info->data = TG3_RSS_MAX_NUM_QS;
11297                 }
11298
11299                 /* The first interrupt vector only
11300                  * handles link interrupts.
11301                  */
11302                 info->data -= 1;
11303                 return 0;
11304
11305         default:
11306                 return -EOPNOTSUPP;
11307         }
11308 }
11309
11310 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11311 {
11312         u32 size = 0;
11313         struct tg3 *tp = netdev_priv(dev);
11314
11315         if (tg3_flag(tp, SUPPORT_MSIX))
11316                 size = TG3_RSS_INDIR_TBL_SIZE;
11317
11318         return size;
11319 }
11320
11321 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11322 {
11323         struct tg3 *tp = netdev_priv(dev);
11324         int i;
11325
11326         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11327                 indir[i] = tp->rss_ind_tbl[i];
11328
11329         return 0;
11330 }
11331
11332 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11333 {
11334         struct tg3 *tp = netdev_priv(dev);
11335         size_t i;
11336
11337         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11338                 tp->rss_ind_tbl[i] = indir[i];
11339
11340         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11341                 return 0;
11342
11343         /* It is legal to write the indirection
11344          * table while the device is running.
11345          */
11346         tg3_full_lock(tp, 0);
11347         tg3_rss_write_indir_tbl(tp);
11348         tg3_full_unlock(tp);
11349
11350         return 0;
11351 }
11352
11353 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11354 {
11355         switch (stringset) {
11356         case ETH_SS_STATS:
11357                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11358                 break;
11359         case ETH_SS_TEST:
11360                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11361                 break;
11362         default:
11363                 WARN_ON(1);     /* we need a WARN() */
11364                 break;
11365         }
11366 }
11367
11368 static int tg3_set_phys_id(struct net_device *dev,
11369                             enum ethtool_phys_id_state state)
11370 {
11371         struct tg3 *tp = netdev_priv(dev);
11372
11373         if (!netif_running(tp->dev))
11374                 return -EAGAIN;
11375
11376         switch (state) {
11377         case ETHTOOL_ID_ACTIVE:
11378                 return 1;       /* cycle on/off once per second */
11379
11380         case ETHTOOL_ID_ON:
11381                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11382                      LED_CTRL_1000MBPS_ON |
11383                      LED_CTRL_100MBPS_ON |
11384                      LED_CTRL_10MBPS_ON |
11385                      LED_CTRL_TRAFFIC_OVERRIDE |
11386                      LED_CTRL_TRAFFIC_BLINK |
11387                      LED_CTRL_TRAFFIC_LED);
11388                 break;
11389
11390         case ETHTOOL_ID_OFF:
11391                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11392                      LED_CTRL_TRAFFIC_OVERRIDE);
11393                 break;
11394
11395         case ETHTOOL_ID_INACTIVE:
11396                 tw32(MAC_LED_CTRL, tp->led_ctrl);
11397                 break;
11398         }
11399
11400         return 0;
11401 }
11402
11403 static void tg3_get_ethtool_stats(struct net_device *dev,
11404                                    struct ethtool_stats *estats, u64 *tmp_stats)
11405 {
11406         struct tg3 *tp = netdev_priv(dev);
11407
11408         if (tp->hw_stats)
11409                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11410         else
11411                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11412 }
11413
11414 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11415 {
11416         int i;
11417         __be32 *buf;
11418         u32 offset = 0, len = 0;
11419         u32 magic, val;
11420
11421         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11422                 return NULL;
11423
11424         if (magic == TG3_EEPROM_MAGIC) {
11425                 for (offset = TG3_NVM_DIR_START;
11426                      offset < TG3_NVM_DIR_END;
11427                      offset += TG3_NVM_DIRENT_SIZE) {
11428                         if (tg3_nvram_read(tp, offset, &val))
11429                                 return NULL;
11430
11431                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11432                             TG3_NVM_DIRTYPE_EXTVPD)
11433                                 break;
11434                 }
11435
11436                 if (offset != TG3_NVM_DIR_END) {
11437                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11438                         if (tg3_nvram_read(tp, offset + 4, &offset))
11439                                 return NULL;
11440
11441                         offset = tg3_nvram_logical_addr(tp, offset);
11442                 }
11443         }
11444
11445         if (!offset || !len) {
11446                 offset = TG3_NVM_VPD_OFF;
11447                 len = TG3_NVM_VPD_LEN;
11448         }
11449
11450         buf = kmalloc(len, GFP_KERNEL);
11451         if (buf == NULL)
11452                 return NULL;
11453
11454         if (magic == TG3_EEPROM_MAGIC) {
11455                 for (i = 0; i < len; i += 4) {
11456                         /* The data is in little-endian format in NVRAM.
11457                          * Use the big-endian read routines to preserve
11458                          * the byte order as it exists in NVRAM.
11459                          */
11460                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11461                                 goto error;
11462                 }
11463         } else {
11464                 u8 *ptr;
11465                 ssize_t cnt;
11466                 unsigned int pos = 0;
11467
11468                 ptr = (u8 *)&buf[0];
11469                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11470                         cnt = pci_read_vpd(tp->pdev, pos,
11471                                            len - pos, ptr);
11472                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
11473                                 cnt = 0;
11474                         else if (cnt < 0)
11475                                 goto error;
11476                 }
11477                 if (pos != len)
11478                         goto error;
11479         }
11480
11481         *vpdlen = len;
11482
11483         return buf;
11484
11485 error:
11486         kfree(buf);
11487         return NULL;
11488 }
11489
11490 #define NVRAM_TEST_SIZE 0x100
11491 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
11492 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
11493 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
11494 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
11495 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
11496 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
11497 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11498 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11499
11500 static int tg3_test_nvram(struct tg3 *tp)
11501 {
11502         u32 csum, magic, len;
11503         __be32 *buf;
11504         int i, j, k, err = 0, size;
11505
11506         if (tg3_flag(tp, NO_NVRAM))
11507                 return 0;
11508
11509         if (tg3_nvram_read(tp, 0, &magic) != 0)
11510                 return -EIO;
11511
11512         if (magic == TG3_EEPROM_MAGIC)
11513                 size = NVRAM_TEST_SIZE;
11514         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11515                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11516                     TG3_EEPROM_SB_FORMAT_1) {
11517                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11518                         case TG3_EEPROM_SB_REVISION_0:
11519                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11520                                 break;
11521                         case TG3_EEPROM_SB_REVISION_2:
11522                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11523                                 break;
11524                         case TG3_EEPROM_SB_REVISION_3:
11525                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11526                                 break;
11527                         case TG3_EEPROM_SB_REVISION_4:
11528                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11529                                 break;
11530                         case TG3_EEPROM_SB_REVISION_5:
11531                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11532                                 break;
11533                         case TG3_EEPROM_SB_REVISION_6:
11534                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11535                                 break;
11536                         default:
11537                                 return -EIO;
11538                         }
11539                 } else
11540                         return 0;
11541         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11542                 size = NVRAM_SELFBOOT_HW_SIZE;
11543         else
11544                 return -EIO;
11545
11546         buf = kmalloc(size, GFP_KERNEL);
11547         if (buf == NULL)
11548                 return -ENOMEM;
11549
11550         err = -EIO;
11551         for (i = 0, j = 0; i < size; i += 4, j++) {
11552                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11553                 if (err)
11554                         break;
11555         }
11556         if (i < size)
11557                 goto out;
11558
11559         /* Selfboot format */
11560         magic = be32_to_cpu(buf[0]);
11561         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11562             TG3_EEPROM_MAGIC_FW) {
11563                 u8 *buf8 = (u8 *) buf, csum8 = 0;
11564
11565                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11566                     TG3_EEPROM_SB_REVISION_2) {
11567                         /* For rev 2, the csum doesn't include the MBA. */
11568                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11569                                 csum8 += buf8[i];
11570                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11571                                 csum8 += buf8[i];
11572                 } else {
11573                         for (i = 0; i < size; i++)
11574                                 csum8 += buf8[i];
11575                 }
11576
11577                 if (csum8 == 0) {
11578                         err = 0;
11579                         goto out;
11580                 }
11581
11582                 err = -EIO;
11583                 goto out;
11584         }
11585
11586         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11587             TG3_EEPROM_MAGIC_HW) {
11588                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11589                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11590                 u8 *buf8 = (u8 *) buf;
11591
11592                 /* Separate the parity bits and the data bytes.  */
11593                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11594                         if ((i == 0) || (i == 8)) {
11595                                 int l;
11596                                 u8 msk;
11597
11598                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11599                                         parity[k++] = buf8[i] & msk;
11600                                 i++;
11601                         } else if (i == 16) {
11602                                 int l;
11603                                 u8 msk;
11604
11605                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11606                                         parity[k++] = buf8[i] & msk;
11607                                 i++;
11608
11609                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11610                                         parity[k++] = buf8[i] & msk;
11611                                 i++;
11612                         }
11613                         data[j++] = buf8[i];
11614                 }
11615
11616                 err = -EIO;
11617                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11618                         u8 hw8 = hweight8(data[i]);
11619
11620                         if ((hw8 & 0x1) && parity[i])
11621                                 goto out;
11622                         else if (!(hw8 & 0x1) && !parity[i])
11623                                 goto out;
11624                 }
11625                 err = 0;
11626                 goto out;
11627         }
11628
11629         err = -EIO;
11630
11631         /* Bootstrap checksum at offset 0x10 */
11632         csum = calc_crc((unsigned char *) buf, 0x10);
11633         if (csum != le32_to_cpu(buf[0x10/4]))
11634                 goto out;
11635
11636         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11637         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11638         if (csum != le32_to_cpu(buf[0xfc/4]))
11639                 goto out;
11640
11641         kfree(buf);
11642
11643         buf = tg3_vpd_readblock(tp, &len);
11644         if (!buf)
11645                 return -ENOMEM;
11646
11647         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11648         if (i > 0) {
11649                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11650                 if (j < 0)
11651                         goto out;
11652
11653                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11654                         goto out;
11655
11656                 i += PCI_VPD_LRDT_TAG_SIZE;
11657                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11658                                               PCI_VPD_RO_KEYWORD_CHKSUM);
11659                 if (j > 0) {
11660                         u8 csum8 = 0;
11661
11662                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
11663
11664                         for (i = 0; i <= j; i++)
11665                                 csum8 += ((u8 *)buf)[i];
11666
11667                         if (csum8)
11668                                 goto out;
11669                 }
11670         }
11671
11672         err = 0;
11673
11674 out:
11675         kfree(buf);
11676         return err;
11677 }
11678
11679 #define TG3_SERDES_TIMEOUT_SEC  2
11680 #define TG3_COPPER_TIMEOUT_SEC  6
11681
11682 static int tg3_test_link(struct tg3 *tp)
11683 {
11684         int i, max;
11685
11686         if (!netif_running(tp->dev))
11687                 return -ENODEV;
11688
11689         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11690                 max = TG3_SERDES_TIMEOUT_SEC;
11691         else
11692                 max = TG3_COPPER_TIMEOUT_SEC;
11693
11694         for (i = 0; i < max; i++) {
11695                 if (netif_carrier_ok(tp->dev))
11696                         return 0;
11697
11698                 if (msleep_interruptible(1000))
11699                         break;
11700         }
11701
11702         return -EIO;
11703 }
11704
11705 /* Only test the commonly used registers */
11706 static int tg3_test_registers(struct tg3 *tp)
11707 {
11708         int i, is_5705, is_5750;
11709         u32 offset, read_mask, write_mask, val, save_val, read_val;
11710         static struct {
11711                 u16 offset;
11712                 u16 flags;
11713 #define TG3_FL_5705     0x1
11714 #define TG3_FL_NOT_5705 0x2
11715 #define TG3_FL_NOT_5788 0x4
11716 #define TG3_FL_NOT_5750 0x8
11717                 u32 read_mask;
11718                 u32 write_mask;
11719         } reg_tbl[] = {
11720                 /* MAC Control Registers */
11721                 { MAC_MODE, TG3_FL_NOT_5705,
11722                         0x00000000, 0x00ef6f8c },
11723                 { MAC_MODE, TG3_FL_5705,
11724                         0x00000000, 0x01ef6b8c },
11725                 { MAC_STATUS, TG3_FL_NOT_5705,
11726                         0x03800107, 0x00000000 },
11727                 { MAC_STATUS, TG3_FL_5705,
11728                         0x03800100, 0x00000000 },
11729                 { MAC_ADDR_0_HIGH, 0x0000,
11730                         0x00000000, 0x0000ffff },
11731                 { MAC_ADDR_0_LOW, 0x0000,
11732                         0x00000000, 0xffffffff },
11733                 { MAC_RX_MTU_SIZE, 0x0000,
11734                         0x00000000, 0x0000ffff },
11735                 { MAC_TX_MODE, 0x0000,
11736                         0x00000000, 0x00000070 },
11737                 { MAC_TX_LENGTHS, 0x0000,
11738                         0x00000000, 0x00003fff },
11739                 { MAC_RX_MODE, TG3_FL_NOT_5705,
11740                         0x00000000, 0x000007fc },
11741                 { MAC_RX_MODE, TG3_FL_5705,
11742                         0x00000000, 0x000007dc },
11743                 { MAC_HASH_REG_0, 0x0000,
11744                         0x00000000, 0xffffffff },
11745                 { MAC_HASH_REG_1, 0x0000,
11746                         0x00000000, 0xffffffff },
11747                 { MAC_HASH_REG_2, 0x0000,
11748                         0x00000000, 0xffffffff },
11749                 { MAC_HASH_REG_3, 0x0000,
11750                         0x00000000, 0xffffffff },
11751
11752                 /* Receive Data and Receive BD Initiator Control Registers. */
11753                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11754                         0x00000000, 0xffffffff },
11755                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11756                         0x00000000, 0xffffffff },
11757                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11758                         0x00000000, 0x00000003 },
11759                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11760                         0x00000000, 0xffffffff },
11761                 { RCVDBDI_STD_BD+0, 0x0000,
11762                         0x00000000, 0xffffffff },
11763                 { RCVDBDI_STD_BD+4, 0x0000,
11764                         0x00000000, 0xffffffff },
11765                 { RCVDBDI_STD_BD+8, 0x0000,
11766                         0x00000000, 0xffff0002 },
11767                 { RCVDBDI_STD_BD+0xc, 0x0000,
11768                         0x00000000, 0xffffffff },
11769
11770                 /* Receive BD Initiator Control Registers. */
11771                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11772                         0x00000000, 0xffffffff },
11773                 { RCVBDI_STD_THRESH, TG3_FL_5705,
11774                         0x00000000, 0x000003ff },
11775                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11776                         0x00000000, 0xffffffff },
11777
11778                 /* Host Coalescing Control Registers. */
11779                 { HOSTCC_MODE, TG3_FL_NOT_5705,
11780                         0x00000000, 0x00000004 },
11781                 { HOSTCC_MODE, TG3_FL_5705,
11782                         0x00000000, 0x000000f6 },
11783                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11784                         0x00000000, 0xffffffff },
11785                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11786                         0x00000000, 0x000003ff },
11787                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11788                         0x00000000, 0xffffffff },
11789                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11790                         0x00000000, 0x000003ff },
11791                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11792                         0x00000000, 0xffffffff },
11793                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11794                         0x00000000, 0x000000ff },
11795                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11796                         0x00000000, 0xffffffff },
11797                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11798                         0x00000000, 0x000000ff },
11799                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11800                         0x00000000, 0xffffffff },
11801                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11802                         0x00000000, 0xffffffff },
11803                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11804                         0x00000000, 0xffffffff },
11805                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11806                         0x00000000, 0x000000ff },
11807                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11808                         0x00000000, 0xffffffff },
11809                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11810                         0x00000000, 0x000000ff },
11811                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11812                         0x00000000, 0xffffffff },
11813                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11814                         0x00000000, 0xffffffff },
11815                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11816                         0x00000000, 0xffffffff },
11817                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11818                         0x00000000, 0xffffffff },
11819                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11820                         0x00000000, 0xffffffff },
11821                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11822                         0xffffffff, 0x00000000 },
11823                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11824                         0xffffffff, 0x00000000 },
11825
11826                 /* Buffer Manager Control Registers. */
11827                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11828                         0x00000000, 0x007fff80 },
11829                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11830                         0x00000000, 0x007fffff },
11831                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11832                         0x00000000, 0x0000003f },
11833                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11834                         0x00000000, 0x000001ff },
11835                 { BUFMGR_MB_HIGH_WATER, 0x0000,
11836                         0x00000000, 0x000001ff },
11837                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11838                         0xffffffff, 0x00000000 },
11839                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11840                         0xffffffff, 0x00000000 },
11841
11842                 /* Mailbox Registers */
11843                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11844                         0x00000000, 0x000001ff },
11845                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11846                         0x00000000, 0x000001ff },
11847                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11848                         0x00000000, 0x000007ff },
11849                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11850                         0x00000000, 0x000001ff },
11851
11852                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11853         };
11854
11855         is_5705 = is_5750 = 0;
11856         if (tg3_flag(tp, 5705_PLUS)) {
11857                 is_5705 = 1;
11858                 if (tg3_flag(tp, 5750_PLUS))
11859                         is_5750 = 1;
11860         }
11861
11862         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11863                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11864                         continue;
11865
11866                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11867                         continue;
11868
11869                 if (tg3_flag(tp, IS_5788) &&
11870                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
11871                         continue;
11872
11873                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11874                         continue;
11875
11876                 offset = (u32) reg_tbl[i].offset;
11877                 read_mask = reg_tbl[i].read_mask;
11878                 write_mask = reg_tbl[i].write_mask;
11879
11880                 /* Save the original register content */
11881                 save_val = tr32(offset);
11882
11883                 /* Determine the read-only value. */
11884                 read_val = save_val & read_mask;
11885
11886                 /* Write zero to the register, then make sure the read-only bits
11887                  * are not changed and the read/write bits are all zeros.
11888                  */
11889                 tw32(offset, 0);
11890
11891                 val = tr32(offset);
11892
11893                 /* Test the read-only and read/write bits. */
11894                 if (((val & read_mask) != read_val) || (val & write_mask))
11895                         goto out;
11896
11897                 /* Write ones to all the bits defined by RdMask and WrMask, then
11898                  * make sure the read-only bits are not changed and the
11899                  * read/write bits are all ones.
11900                  */
11901                 tw32(offset, read_mask | write_mask);
11902
11903                 val = tr32(offset);
11904
11905                 /* Test the read-only bits. */
11906                 if ((val & read_mask) != read_val)
11907                         goto out;
11908
11909                 /* Test the read/write bits. */
11910                 if ((val & write_mask) != write_mask)
11911                         goto out;
11912
11913                 tw32(offset, save_val);
11914         }
11915
11916         return 0;
11917
11918 out:
11919         if (netif_msg_hw(tp))
11920                 netdev_err(tp->dev,
11921                            "Register test failed at offset %x\n", offset);
11922         tw32(offset, save_val);
11923         return -EIO;
11924 }
11925
11926 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11927 {
11928         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11929         int i;
11930         u32 j;
11931
11932         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11933                 for (j = 0; j < len; j += 4) {
11934                         u32 val;
11935
11936                         tg3_write_mem(tp, offset + j, test_pattern[i]);
11937                         tg3_read_mem(tp, offset + j, &val);
11938                         if (val != test_pattern[i])
11939                                 return -EIO;
11940                 }
11941         }
11942         return 0;
11943 }
11944
11945 static int tg3_test_memory(struct tg3 *tp)
11946 {
11947         static struct mem_entry {
11948                 u32 offset;
11949                 u32 len;
11950         } mem_tbl_570x[] = {
11951                 { 0x00000000, 0x00b50},
11952                 { 0x00002000, 0x1c000},
11953                 { 0xffffffff, 0x00000}
11954         }, mem_tbl_5705[] = {
11955                 { 0x00000100, 0x0000c},
11956                 { 0x00000200, 0x00008},
11957                 { 0x00004000, 0x00800},
11958                 { 0x00006000, 0x01000},
11959                 { 0x00008000, 0x02000},
11960                 { 0x00010000, 0x0e000},
11961                 { 0xffffffff, 0x00000}
11962         }, mem_tbl_5755[] = {
11963                 { 0x00000200, 0x00008},
11964                 { 0x00004000, 0x00800},
11965                 { 0x00006000, 0x00800},
11966                 { 0x00008000, 0x02000},
11967                 { 0x00010000, 0x0c000},
11968                 { 0xffffffff, 0x00000}
11969         }, mem_tbl_5906[] = {
11970                 { 0x00000200, 0x00008},
11971                 { 0x00004000, 0x00400},
11972                 { 0x00006000, 0x00400},
11973                 { 0x00008000, 0x01000},
11974                 { 0x00010000, 0x01000},
11975                 { 0xffffffff, 0x00000}
11976         }, mem_tbl_5717[] = {
11977                 { 0x00000200, 0x00008},
11978                 { 0x00010000, 0x0a000},
11979                 { 0x00020000, 0x13c00},
11980                 { 0xffffffff, 0x00000}
11981         }, mem_tbl_57765[] = {
11982                 { 0x00000200, 0x00008},
11983                 { 0x00004000, 0x00800},
11984                 { 0x00006000, 0x09800},
11985                 { 0x00010000, 0x0a000},
11986                 { 0xffffffff, 0x00000}
11987         };
11988         struct mem_entry *mem_tbl;
11989         int err = 0;
11990         int i;
11991
11992         if (tg3_flag(tp, 5717_PLUS))
11993                 mem_tbl = mem_tbl_5717;
11994         else if (tg3_flag(tp, 57765_CLASS))
11995                 mem_tbl = mem_tbl_57765;
11996         else if (tg3_flag(tp, 5755_PLUS))
11997                 mem_tbl = mem_tbl_5755;
11998         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11999                 mem_tbl = mem_tbl_5906;
12000         else if (tg3_flag(tp, 5705_PLUS))
12001                 mem_tbl = mem_tbl_5705;
12002         else
12003                 mem_tbl = mem_tbl_570x;
12004
12005         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12006                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12007                 if (err)
12008                         break;
12009         }
12010
12011         return err;
12012 }
12013
12014 #define TG3_TSO_MSS             500
12015
12016 #define TG3_TSO_IP_HDR_LEN      20
12017 #define TG3_TSO_TCP_HDR_LEN     20
12018 #define TG3_TSO_TCP_OPT_LEN     12
12019
12020 static const u8 tg3_tso_header[] = {
12021 0x08, 0x00,
12022 0x45, 0x00, 0x00, 0x00,
12023 0x00, 0x00, 0x40, 0x00,
12024 0x40, 0x06, 0x00, 0x00,
12025 0x0a, 0x00, 0x00, 0x01,
12026 0x0a, 0x00, 0x00, 0x02,
12027 0x0d, 0x00, 0xe0, 0x00,
12028 0x00, 0x00, 0x01, 0x00,
12029 0x00, 0x00, 0x02, 0x00,
12030 0x80, 0x10, 0x10, 0x00,
12031 0x14, 0x09, 0x00, 0x00,
12032 0x01, 0x01, 0x08, 0x0a,
12033 0x11, 0x11, 0x11, 0x11,
12034 0x11, 0x11, 0x11, 0x11,
12035 };
12036
12037 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12038 {
12039         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12040         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12041         u32 budget;
12042         struct sk_buff *skb;
12043         u8 *tx_data, *rx_data;
12044         dma_addr_t map;
12045         int num_pkts, tx_len, rx_len, i, err;
12046         struct tg3_rx_buffer_desc *desc;
12047         struct tg3_napi *tnapi, *rnapi;
12048         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12049
12050         tnapi = &tp->napi[0];
12051         rnapi = &tp->napi[0];
12052         if (tp->irq_cnt > 1) {
12053                 if (tg3_flag(tp, ENABLE_RSS))
12054                         rnapi = &tp->napi[1];
12055                 if (tg3_flag(tp, ENABLE_TSS))
12056                         tnapi = &tp->napi[1];
12057         }
12058         coal_now = tnapi->coal_now | rnapi->coal_now;
12059
12060         err = -EIO;
12061
12062         tx_len = pktsz;
12063         skb = netdev_alloc_skb(tp->dev, tx_len);
12064         if (!skb)
12065                 return -ENOMEM;
12066
12067         tx_data = skb_put(skb, tx_len);
12068         memcpy(tx_data, tp->dev->dev_addr, 6);
12069         memset(tx_data + 6, 0x0, 8);
12070
12071         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12072
12073         if (tso_loopback) {
12074                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12075
12076                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12077                               TG3_TSO_TCP_OPT_LEN;
12078
12079                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12080                        sizeof(tg3_tso_header));
12081                 mss = TG3_TSO_MSS;
12082
12083                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12084                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12085
12086                 /* Set the total length field in the IP header */
12087                 iph->tot_len = htons((u16)(mss + hdr_len));
12088
12089                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12090                               TXD_FLAG_CPU_POST_DMA);
12091
12092                 if (tg3_flag(tp, HW_TSO_1) ||
12093                     tg3_flag(tp, HW_TSO_2) ||
12094                     tg3_flag(tp, HW_TSO_3)) {
12095                         struct tcphdr *th;
12096                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12097                         th = (struct tcphdr *)&tx_data[val];
12098                         th->check = 0;
12099                 } else
12100                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
12101
12102                 if (tg3_flag(tp, HW_TSO_3)) {
12103                         mss |= (hdr_len & 0xc) << 12;
12104                         if (hdr_len & 0x10)
12105                                 base_flags |= 0x00000010;
12106                         base_flags |= (hdr_len & 0x3e0) << 5;
12107                 } else if (tg3_flag(tp, HW_TSO_2))
12108                         mss |= hdr_len << 9;
12109                 else if (tg3_flag(tp, HW_TSO_1) ||
12110                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
12111                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12112                 } else {
12113                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12114                 }
12115
12116                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12117         } else {
12118                 num_pkts = 1;
12119                 data_off = ETH_HLEN;
12120
12121                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12122                     tx_len > VLAN_ETH_FRAME_LEN)
12123                         base_flags |= TXD_FLAG_JMB_PKT;
12124         }
12125
12126         for (i = data_off; i < tx_len; i++)
12127                 tx_data[i] = (u8) (i & 0xff);
12128
12129         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12130         if (pci_dma_mapping_error(tp->pdev, map)) {
12131                 dev_kfree_skb(skb);
12132                 return -EIO;
12133         }
12134
12135         val = tnapi->tx_prod;
12136         tnapi->tx_buffers[val].skb = skb;
12137         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12138
12139         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12140                rnapi->coal_now);
12141
12142         udelay(10);
12143
12144         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12145
12146         budget = tg3_tx_avail(tnapi);
12147         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12148                             base_flags | TXD_FLAG_END, mss, 0)) {
12149                 tnapi->tx_buffers[val].skb = NULL;
12150                 dev_kfree_skb(skb);
12151                 return -EIO;
12152         }
12153
12154         tnapi->tx_prod++;
12155
12156         /* Sync BD data before updating mailbox */
12157         wmb();
12158
12159         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12160         tr32_mailbox(tnapi->prodmbox);
12161
12162         udelay(10);
12163
12164         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
12165         for (i = 0; i < 35; i++) {
12166                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12167                        coal_now);
12168
12169                 udelay(10);
12170
12171                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12172                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
12173                 if ((tx_idx == tnapi->tx_prod) &&
12174                     (rx_idx == (rx_start_idx + num_pkts)))
12175                         break;
12176         }
12177
12178         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12179         dev_kfree_skb(skb);
12180
12181         if (tx_idx != tnapi->tx_prod)
12182                 goto out;
12183
12184         if (rx_idx != rx_start_idx + num_pkts)
12185                 goto out;
12186
12187         val = data_off;
12188         while (rx_idx != rx_start_idx) {
12189                 desc = &rnapi->rx_rcb[rx_start_idx++];
12190                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12191                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12192
12193                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12194                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12195                         goto out;
12196
12197                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12198                          - ETH_FCS_LEN;
12199
12200                 if (!tso_loopback) {
12201                         if (rx_len != tx_len)
12202                                 goto out;
12203
12204                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12205                                 if (opaque_key != RXD_OPAQUE_RING_STD)
12206                                         goto out;
12207                         } else {
12208                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12209                                         goto out;
12210                         }
12211                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12212                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12213                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
12214                         goto out;
12215                 }
12216
12217                 if (opaque_key == RXD_OPAQUE_RING_STD) {
12218                         rx_data = tpr->rx_std_buffers[desc_idx].data;
12219                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12220                                              mapping);
12221                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12222                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12223                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12224                                              mapping);
12225                 } else
12226                         goto out;
12227
12228                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12229                                             PCI_DMA_FROMDEVICE);
12230
12231                 rx_data += TG3_RX_OFFSET(tp);
12232                 for (i = data_off; i < rx_len; i++, val++) {
12233                         if (*(rx_data + i) != (u8) (val & 0xff))
12234                                 goto out;
12235                 }
12236         }
12237
12238         err = 0;
12239
12240         /* tg3_free_rings will unmap and free the rx_data */
12241 out:
12242         return err;
12243 }
12244
12245 #define TG3_STD_LOOPBACK_FAILED         1
12246 #define TG3_JMB_LOOPBACK_FAILED         2
12247 #define TG3_TSO_LOOPBACK_FAILED         4
12248 #define TG3_LOOPBACK_FAILED \
12249         (TG3_STD_LOOPBACK_FAILED | \
12250          TG3_JMB_LOOPBACK_FAILED | \
12251          TG3_TSO_LOOPBACK_FAILED)
12252
12253 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12254 {
12255         int err = -EIO;
12256         u32 eee_cap;
12257         u32 jmb_pkt_sz = 9000;
12258
12259         if (tp->dma_limit)
12260                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12261
12262         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12263         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12264
12265         if (!netif_running(tp->dev)) {
12266                 data[0] = TG3_LOOPBACK_FAILED;
12267                 data[1] = TG3_LOOPBACK_FAILED;
12268                 if (do_extlpbk)
12269                         data[2] = TG3_LOOPBACK_FAILED;
12270                 goto done;
12271         }
12272
12273         err = tg3_reset_hw(tp, 1);
12274         if (err) {
12275                 data[0] = TG3_LOOPBACK_FAILED;
12276                 data[1] = TG3_LOOPBACK_FAILED;
12277                 if (do_extlpbk)
12278                         data[2] = TG3_LOOPBACK_FAILED;
12279                 goto done;
12280         }
12281
12282         if (tg3_flag(tp, ENABLE_RSS)) {
12283                 int i;
12284
12285                 /* Reroute all rx packets to the 1st queue */
12286                 for (i = MAC_RSS_INDIR_TBL_0;
12287                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12288                         tw32(i, 0x0);
12289         }
12290
12291         /* HW errata - mac loopback fails in some cases on 5780.
12292          * Normal traffic and PHY loopback are not affected by
12293          * errata.  Also, the MAC loopback test is deprecated for
12294          * all newer ASIC revisions.
12295          */
12296         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
12297             !tg3_flag(tp, CPMU_PRESENT)) {
12298                 tg3_mac_loopback(tp, true);
12299
12300                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12301                         data[0] |= TG3_STD_LOOPBACK_FAILED;
12302
12303                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12304                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12305                         data[0] |= TG3_JMB_LOOPBACK_FAILED;
12306
12307                 tg3_mac_loopback(tp, false);
12308         }
12309
12310         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12311             !tg3_flag(tp, USE_PHYLIB)) {
12312                 int i;
12313
12314                 tg3_phy_lpbk_set(tp, 0, false);
12315
12316                 /* Wait for link */
12317                 for (i = 0; i < 100; i++) {
12318                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12319                                 break;
12320                         mdelay(1);
12321                 }
12322
12323                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12324                         data[1] |= TG3_STD_LOOPBACK_FAILED;
12325                 if (tg3_flag(tp, TSO_CAPABLE) &&
12326                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12327                         data[1] |= TG3_TSO_LOOPBACK_FAILED;
12328                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12329                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12330                         data[1] |= TG3_JMB_LOOPBACK_FAILED;
12331
12332                 if (do_extlpbk) {
12333                         tg3_phy_lpbk_set(tp, 0, true);
12334
12335                         /* All link indications report up, but the hardware
12336                          * isn't really ready for about 20 msec.  Double it
12337                          * to be sure.
12338                          */
12339                         mdelay(40);
12340
12341                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12342                                 data[2] |= TG3_STD_LOOPBACK_FAILED;
12343                         if (tg3_flag(tp, TSO_CAPABLE) &&
12344                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12345                                 data[2] |= TG3_TSO_LOOPBACK_FAILED;
12346                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12347                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12348                                 data[2] |= TG3_JMB_LOOPBACK_FAILED;
12349                 }
12350
12351                 /* Re-enable gphy autopowerdown. */
12352                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12353                         tg3_phy_toggle_apd(tp, true);
12354         }
12355
12356         err = (data[0] | data[1] | data[2]) ? -EIO : 0;
12357
12358 done:
12359         tp->phy_flags |= eee_cap;
12360
12361         return err;
12362 }
12363
12364 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12365                           u64 *data)
12366 {
12367         struct tg3 *tp = netdev_priv(dev);
12368         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12369
12370         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12371             tg3_power_up(tp)) {
12372                 etest->flags |= ETH_TEST_FL_FAILED;
12373                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12374                 return;
12375         }
12376
12377         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12378
12379         if (tg3_test_nvram(tp) != 0) {
12380                 etest->flags |= ETH_TEST_FL_FAILED;
12381                 data[0] = 1;
12382         }
12383         if (!doextlpbk && tg3_test_link(tp)) {
12384                 etest->flags |= ETH_TEST_FL_FAILED;
12385                 data[1] = 1;
12386         }
12387         if (etest->flags & ETH_TEST_FL_OFFLINE) {
12388                 int err, err2 = 0, irq_sync = 0;
12389
12390                 if (netif_running(dev)) {
12391                         tg3_phy_stop(tp);
12392                         tg3_netif_stop(tp);
12393                         irq_sync = 1;
12394                 }
12395
12396                 tg3_full_lock(tp, irq_sync);
12397
12398                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12399                 err = tg3_nvram_lock(tp);
12400                 tg3_halt_cpu(tp, RX_CPU_BASE);
12401                 if (!tg3_flag(tp, 5705_PLUS))
12402                         tg3_halt_cpu(tp, TX_CPU_BASE);
12403                 if (!err)
12404                         tg3_nvram_unlock(tp);
12405
12406                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12407                         tg3_phy_reset(tp);
12408
12409                 if (tg3_test_registers(tp) != 0) {
12410                         etest->flags |= ETH_TEST_FL_FAILED;
12411                         data[2] = 1;
12412                 }
12413
12414                 if (tg3_test_memory(tp) != 0) {
12415                         etest->flags |= ETH_TEST_FL_FAILED;
12416                         data[3] = 1;
12417                 }
12418
12419                 if (doextlpbk)
12420                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12421
12422                 if (tg3_test_loopback(tp, &data[4], doextlpbk))
12423                         etest->flags |= ETH_TEST_FL_FAILED;
12424
12425                 tg3_full_unlock(tp);
12426
12427                 if (tg3_test_interrupt(tp) != 0) {
12428                         etest->flags |= ETH_TEST_FL_FAILED;
12429                         data[7] = 1;
12430                 }
12431
12432                 tg3_full_lock(tp, 0);
12433
12434                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12435                 if (netif_running(dev)) {
12436                         tg3_flag_set(tp, INIT_COMPLETE);
12437                         err2 = tg3_restart_hw(tp, 1);
12438                         if (!err2)
12439                                 tg3_netif_start(tp);
12440                 }
12441
12442                 tg3_full_unlock(tp);
12443
12444                 if (irq_sync && !err2)
12445                         tg3_phy_start(tp);
12446         }
12447         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12448                 tg3_power_down(tp);
12449
12450 }
12451
12452 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12453 {
12454         struct mii_ioctl_data *data = if_mii(ifr);
12455         struct tg3 *tp = netdev_priv(dev);
12456         int err;
12457
12458         if (tg3_flag(tp, USE_PHYLIB)) {
12459                 struct phy_device *phydev;
12460                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12461                         return -EAGAIN;
12462                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12463                 return phy_mii_ioctl(phydev, ifr, cmd);
12464         }
12465
12466         switch (cmd) {
12467         case SIOCGMIIPHY:
12468                 data->phy_id = tp->phy_addr;
12469
12470                 /* fallthru */
12471         case SIOCGMIIREG: {
12472                 u32 mii_regval;
12473
12474                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12475                         break;                  /* We have no PHY */
12476
12477                 if (!netif_running(dev))
12478                         return -EAGAIN;
12479
12480                 spin_lock_bh(&tp->lock);
12481                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12482                 spin_unlock_bh(&tp->lock);
12483
12484                 data->val_out = mii_regval;
12485
12486                 return err;
12487         }
12488
12489         case SIOCSMIIREG:
12490                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12491                         break;                  /* We have no PHY */
12492
12493                 if (!netif_running(dev))
12494                         return -EAGAIN;
12495
12496                 spin_lock_bh(&tp->lock);
12497                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12498                 spin_unlock_bh(&tp->lock);
12499
12500                 return err;
12501
12502         default:
12503                 /* do nothing */
12504                 break;
12505         }
12506         return -EOPNOTSUPP;
12507 }
12508
12509 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12510 {
12511         struct tg3 *tp = netdev_priv(dev);
12512
12513         memcpy(ec, &tp->coal, sizeof(*ec));
12514         return 0;
12515 }
12516
12517 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12518 {
12519         struct tg3 *tp = netdev_priv(dev);
12520         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12521         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12522
12523         if (!tg3_flag(tp, 5705_PLUS)) {
12524                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12525                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12526                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12527                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12528         }
12529
12530         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12531             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12532             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12533             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12534             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12535             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12536             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12537             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12538             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12539             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12540                 return -EINVAL;
12541
12542         /* No rx interrupts will be generated if both are zero */
12543         if ((ec->rx_coalesce_usecs == 0) &&
12544             (ec->rx_max_coalesced_frames == 0))
12545                 return -EINVAL;
12546
12547         /* No tx interrupts will be generated if both are zero */
12548         if ((ec->tx_coalesce_usecs == 0) &&
12549             (ec->tx_max_coalesced_frames == 0))
12550                 return -EINVAL;
12551
12552         /* Only copy relevant parameters, ignore all others. */
12553         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12554         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12555         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12556         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12557         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12558         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12559         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12560         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12561         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12562
12563         if (netif_running(dev)) {
12564                 tg3_full_lock(tp, 0);
12565                 __tg3_set_coalesce(tp, &tp->coal);
12566                 tg3_full_unlock(tp);
12567         }
12568         return 0;
12569 }
12570
12571 static const struct ethtool_ops tg3_ethtool_ops = {
12572         .get_settings           = tg3_get_settings,
12573         .set_settings           = tg3_set_settings,
12574         .get_drvinfo            = tg3_get_drvinfo,
12575         .get_regs_len           = tg3_get_regs_len,
12576         .get_regs               = tg3_get_regs,
12577         .get_wol                = tg3_get_wol,
12578         .set_wol                = tg3_set_wol,
12579         .get_msglevel           = tg3_get_msglevel,
12580         .set_msglevel           = tg3_set_msglevel,
12581         .nway_reset             = tg3_nway_reset,
12582         .get_link               = ethtool_op_get_link,
12583         .get_eeprom_len         = tg3_get_eeprom_len,
12584         .get_eeprom             = tg3_get_eeprom,
12585         .set_eeprom             = tg3_set_eeprom,
12586         .get_ringparam          = tg3_get_ringparam,
12587         .set_ringparam          = tg3_set_ringparam,
12588         .get_pauseparam         = tg3_get_pauseparam,
12589         .set_pauseparam         = tg3_set_pauseparam,
12590         .self_test              = tg3_self_test,
12591         .get_strings            = tg3_get_strings,
12592         .set_phys_id            = tg3_set_phys_id,
12593         .get_ethtool_stats      = tg3_get_ethtool_stats,
12594         .get_coalesce           = tg3_get_coalesce,
12595         .set_coalesce           = tg3_set_coalesce,
12596         .get_sset_count         = tg3_get_sset_count,
12597         .get_rxnfc              = tg3_get_rxnfc,
12598         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
12599         .get_rxfh_indir         = tg3_get_rxfh_indir,
12600         .set_rxfh_indir         = tg3_set_rxfh_indir,
12601         .get_ts_info            = ethtool_op_get_ts_info,
12602 };
12603
12604 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
12605                                                 struct rtnl_link_stats64 *stats)
12606 {
12607         struct tg3 *tp = netdev_priv(dev);
12608
12609         spin_lock_bh(&tp->lock);
12610         if (!tp->hw_stats) {
12611                 spin_unlock_bh(&tp->lock);
12612                 return &tp->net_stats_prev;
12613         }
12614
12615         tg3_get_nstats(tp, stats);
12616         spin_unlock_bh(&tp->lock);
12617
12618         return stats;
12619 }
12620
12621 static void tg3_set_rx_mode(struct net_device *dev)
12622 {
12623         struct tg3 *tp = netdev_priv(dev);
12624
12625         if (!netif_running(dev))
12626                 return;
12627
12628         tg3_full_lock(tp, 0);
12629         __tg3_set_rx_mode(dev);
12630         tg3_full_unlock(tp);
12631 }
12632
12633 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12634                                int new_mtu)
12635 {
12636         dev->mtu = new_mtu;
12637
12638         if (new_mtu > ETH_DATA_LEN) {
12639                 if (tg3_flag(tp, 5780_CLASS)) {
12640                         netdev_update_features(dev);
12641                         tg3_flag_clear(tp, TSO_CAPABLE);
12642                 } else {
12643                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
12644                 }
12645         } else {
12646                 if (tg3_flag(tp, 5780_CLASS)) {
12647                         tg3_flag_set(tp, TSO_CAPABLE);
12648                         netdev_update_features(dev);
12649                 }
12650                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12651         }
12652 }
12653
12654 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12655 {
12656         struct tg3 *tp = netdev_priv(dev);
12657         int err, reset_phy = 0;
12658
12659         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12660                 return -EINVAL;
12661
12662         if (!netif_running(dev)) {
12663                 /* We'll just catch it later when the
12664                  * device is up'd.
12665                  */
12666                 tg3_set_mtu(dev, tp, new_mtu);
12667                 return 0;
12668         }
12669
12670         tg3_phy_stop(tp);
12671
12672         tg3_netif_stop(tp);
12673
12674         tg3_full_lock(tp, 1);
12675
12676         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12677
12678         tg3_set_mtu(dev, tp, new_mtu);
12679
12680         /* Reset PHY, otherwise the read DMA engine will be in a mode that
12681          * breaks all requests to 256 bytes.
12682          */
12683         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
12684                 reset_phy = 1;
12685
12686         err = tg3_restart_hw(tp, reset_phy);
12687
12688         if (!err)
12689                 tg3_netif_start(tp);
12690
12691         tg3_full_unlock(tp);
12692
12693         if (!err)
12694                 tg3_phy_start(tp);
12695
12696         return err;
12697 }
12698
12699 static const struct net_device_ops tg3_netdev_ops = {
12700         .ndo_open               = tg3_open,
12701         .ndo_stop               = tg3_close,
12702         .ndo_start_xmit         = tg3_start_xmit,
12703         .ndo_get_stats64        = tg3_get_stats64,
12704         .ndo_validate_addr      = eth_validate_addr,
12705         .ndo_set_rx_mode        = tg3_set_rx_mode,
12706         .ndo_set_mac_address    = tg3_set_mac_addr,
12707         .ndo_do_ioctl           = tg3_ioctl,
12708         .ndo_tx_timeout         = tg3_tx_timeout,
12709         .ndo_change_mtu         = tg3_change_mtu,
12710         .ndo_fix_features       = tg3_fix_features,
12711         .ndo_set_features       = tg3_set_features,
12712 #ifdef CONFIG_NET_POLL_CONTROLLER
12713         .ndo_poll_controller    = tg3_poll_controller,
12714 #endif
12715 };
12716
12717 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12718 {
12719         u32 cursize, val, magic;
12720
12721         tp->nvram_size = EEPROM_CHIP_SIZE;
12722
12723         if (tg3_nvram_read(tp, 0, &magic) != 0)
12724                 return;
12725
12726         if ((magic != TG3_EEPROM_MAGIC) &&
12727             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12728             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12729                 return;
12730
12731         /*
12732          * Size the chip by reading offsets at increasing powers of two.
12733          * When we encounter our validation signature, we know the addressing
12734          * has wrapped around, and thus have our chip size.
12735          */
12736         cursize = 0x10;
12737
12738         while (cursize < tp->nvram_size) {
12739                 if (tg3_nvram_read(tp, cursize, &val) != 0)
12740                         return;
12741
12742                 if (val == magic)
12743                         break;
12744
12745                 cursize <<= 1;
12746         }
12747
12748         tp->nvram_size = cursize;
12749 }
12750
12751 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12752 {
12753         u32 val;
12754
12755         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12756                 return;
12757
12758         /* Selfboot format */
12759         if (val != TG3_EEPROM_MAGIC) {
12760                 tg3_get_eeprom_size(tp);
12761                 return;
12762         }
12763
12764         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12765                 if (val != 0) {
12766                         /* This is confusing.  We want to operate on the
12767                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
12768                          * call will read from NVRAM and byteswap the data
12769                          * according to the byteswapping settings for all
12770                          * other register accesses.  This ensures the data we
12771                          * want will always reside in the lower 16-bits.
12772                          * However, the data in NVRAM is in LE format, which
12773                          * means the data from the NVRAM read will always be
12774                          * opposite the endianness of the CPU.  The 16-bit
12775                          * byteswap then brings the data to CPU endianness.
12776                          */
12777                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12778                         return;
12779                 }
12780         }
12781         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12782 }
12783
12784 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12785 {
12786         u32 nvcfg1;
12787
12788         nvcfg1 = tr32(NVRAM_CFG1);
12789         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12790                 tg3_flag_set(tp, FLASH);
12791         } else {
12792                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12793                 tw32(NVRAM_CFG1, nvcfg1);
12794         }
12795
12796         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12797             tg3_flag(tp, 5780_CLASS)) {
12798                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12799                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12800                         tp->nvram_jedecnum = JEDEC_ATMEL;
12801                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12802                         tg3_flag_set(tp, NVRAM_BUFFERED);
12803                         break;
12804                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12805                         tp->nvram_jedecnum = JEDEC_ATMEL;
12806                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12807                         break;
12808                 case FLASH_VENDOR_ATMEL_EEPROM:
12809                         tp->nvram_jedecnum = JEDEC_ATMEL;
12810                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12811                         tg3_flag_set(tp, NVRAM_BUFFERED);
12812                         break;
12813                 case FLASH_VENDOR_ST:
12814                         tp->nvram_jedecnum = JEDEC_ST;
12815                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12816                         tg3_flag_set(tp, NVRAM_BUFFERED);
12817                         break;
12818                 case FLASH_VENDOR_SAIFUN:
12819                         tp->nvram_jedecnum = JEDEC_SAIFUN;
12820                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12821                         break;
12822                 case FLASH_VENDOR_SST_SMALL:
12823                 case FLASH_VENDOR_SST_LARGE:
12824                         tp->nvram_jedecnum = JEDEC_SST;
12825                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12826                         break;
12827                 }
12828         } else {
12829                 tp->nvram_jedecnum = JEDEC_ATMEL;
12830                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12831                 tg3_flag_set(tp, NVRAM_BUFFERED);
12832         }
12833 }
12834
12835 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12836 {
12837         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12838         case FLASH_5752PAGE_SIZE_256:
12839                 tp->nvram_pagesize = 256;
12840                 break;
12841         case FLASH_5752PAGE_SIZE_512:
12842                 tp->nvram_pagesize = 512;
12843                 break;
12844         case FLASH_5752PAGE_SIZE_1K:
12845                 tp->nvram_pagesize = 1024;
12846                 break;
12847         case FLASH_5752PAGE_SIZE_2K:
12848                 tp->nvram_pagesize = 2048;
12849                 break;
12850         case FLASH_5752PAGE_SIZE_4K:
12851                 tp->nvram_pagesize = 4096;
12852                 break;
12853         case FLASH_5752PAGE_SIZE_264:
12854                 tp->nvram_pagesize = 264;
12855                 break;
12856         case FLASH_5752PAGE_SIZE_528:
12857                 tp->nvram_pagesize = 528;
12858                 break;
12859         }
12860 }
12861
12862 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12863 {
12864         u32 nvcfg1;
12865
12866         nvcfg1 = tr32(NVRAM_CFG1);
12867
12868         /* NVRAM protection for TPM */
12869         if (nvcfg1 & (1 << 27))
12870                 tg3_flag_set(tp, PROTECTED_NVRAM);
12871
12872         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12873         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12874         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12875                 tp->nvram_jedecnum = JEDEC_ATMEL;
12876                 tg3_flag_set(tp, NVRAM_BUFFERED);
12877                 break;
12878         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12879                 tp->nvram_jedecnum = JEDEC_ATMEL;
12880                 tg3_flag_set(tp, NVRAM_BUFFERED);
12881                 tg3_flag_set(tp, FLASH);
12882                 break;
12883         case FLASH_5752VENDOR_ST_M45PE10:
12884         case FLASH_5752VENDOR_ST_M45PE20:
12885         case FLASH_5752VENDOR_ST_M45PE40:
12886                 tp->nvram_jedecnum = JEDEC_ST;
12887                 tg3_flag_set(tp, NVRAM_BUFFERED);
12888                 tg3_flag_set(tp, FLASH);
12889                 break;
12890         }
12891
12892         if (tg3_flag(tp, FLASH)) {
12893                 tg3_nvram_get_pagesize(tp, nvcfg1);
12894         } else {
12895                 /* For eeprom, set pagesize to maximum eeprom size */
12896                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12897
12898                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12899                 tw32(NVRAM_CFG1, nvcfg1);
12900         }
12901 }
12902
12903 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12904 {
12905         u32 nvcfg1, protect = 0;
12906
12907         nvcfg1 = tr32(NVRAM_CFG1);
12908
12909         /* NVRAM protection for TPM */
12910         if (nvcfg1 & (1 << 27)) {
12911                 tg3_flag_set(tp, PROTECTED_NVRAM);
12912                 protect = 1;
12913         }
12914
12915         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12916         switch (nvcfg1) {
12917         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12918         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12919         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12920         case FLASH_5755VENDOR_ATMEL_FLASH_5:
12921                 tp->nvram_jedecnum = JEDEC_ATMEL;
12922                 tg3_flag_set(tp, NVRAM_BUFFERED);
12923                 tg3_flag_set(tp, FLASH);
12924                 tp->nvram_pagesize = 264;
12925                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12926                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12927                         tp->nvram_size = (protect ? 0x3e200 :
12928                                           TG3_NVRAM_SIZE_512KB);
12929                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12930                         tp->nvram_size = (protect ? 0x1f200 :
12931                                           TG3_NVRAM_SIZE_256KB);
12932                 else
12933                         tp->nvram_size = (protect ? 0x1f200 :
12934                                           TG3_NVRAM_SIZE_128KB);
12935                 break;
12936         case FLASH_5752VENDOR_ST_M45PE10:
12937         case FLASH_5752VENDOR_ST_M45PE20:
12938         case FLASH_5752VENDOR_ST_M45PE40:
12939                 tp->nvram_jedecnum = JEDEC_ST;
12940                 tg3_flag_set(tp, NVRAM_BUFFERED);
12941                 tg3_flag_set(tp, FLASH);
12942                 tp->nvram_pagesize = 256;
12943                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12944                         tp->nvram_size = (protect ?
12945                                           TG3_NVRAM_SIZE_64KB :
12946                                           TG3_NVRAM_SIZE_128KB);
12947                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12948                         tp->nvram_size = (protect ?
12949                                           TG3_NVRAM_SIZE_64KB :
12950                                           TG3_NVRAM_SIZE_256KB);
12951                 else
12952                         tp->nvram_size = (protect ?
12953                                           TG3_NVRAM_SIZE_128KB :
12954                                           TG3_NVRAM_SIZE_512KB);
12955                 break;
12956         }
12957 }
12958
12959 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12960 {
12961         u32 nvcfg1;
12962
12963         nvcfg1 = tr32(NVRAM_CFG1);
12964
12965         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12966         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12967         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12968         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12969         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12970                 tp->nvram_jedecnum = JEDEC_ATMEL;
12971                 tg3_flag_set(tp, NVRAM_BUFFERED);
12972                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12973
12974                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12975                 tw32(NVRAM_CFG1, nvcfg1);
12976                 break;
12977         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12978         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12979         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12980         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12981                 tp->nvram_jedecnum = JEDEC_ATMEL;
12982                 tg3_flag_set(tp, NVRAM_BUFFERED);
12983                 tg3_flag_set(tp, FLASH);
12984                 tp->nvram_pagesize = 264;
12985                 break;
12986         case FLASH_5752VENDOR_ST_M45PE10:
12987         case FLASH_5752VENDOR_ST_M45PE20:
12988         case FLASH_5752VENDOR_ST_M45PE40:
12989                 tp->nvram_jedecnum = JEDEC_ST;
12990                 tg3_flag_set(tp, NVRAM_BUFFERED);
12991                 tg3_flag_set(tp, FLASH);
12992                 tp->nvram_pagesize = 256;
12993                 break;
12994         }
12995 }
12996
12997 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12998 {
12999         u32 nvcfg1, protect = 0;
13000
13001         nvcfg1 = tr32(NVRAM_CFG1);
13002
13003         /* NVRAM protection for TPM */
13004         if (nvcfg1 & (1 << 27)) {
13005                 tg3_flag_set(tp, PROTECTED_NVRAM);
13006                 protect = 1;
13007         }
13008
13009         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13010         switch (nvcfg1) {
13011         case FLASH_5761VENDOR_ATMEL_ADB021D:
13012         case FLASH_5761VENDOR_ATMEL_ADB041D:
13013         case FLASH_5761VENDOR_ATMEL_ADB081D:
13014         case FLASH_5761VENDOR_ATMEL_ADB161D:
13015         case FLASH_5761VENDOR_ATMEL_MDB021D:
13016         case FLASH_5761VENDOR_ATMEL_MDB041D:
13017         case FLASH_5761VENDOR_ATMEL_MDB081D:
13018         case FLASH_5761VENDOR_ATMEL_MDB161D:
13019                 tp->nvram_jedecnum = JEDEC_ATMEL;
13020                 tg3_flag_set(tp, NVRAM_BUFFERED);
13021                 tg3_flag_set(tp, FLASH);
13022                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13023                 tp->nvram_pagesize = 256;
13024                 break;
13025         case FLASH_5761VENDOR_ST_A_M45PE20:
13026         case FLASH_5761VENDOR_ST_A_M45PE40:
13027         case FLASH_5761VENDOR_ST_A_M45PE80:
13028         case FLASH_5761VENDOR_ST_A_M45PE16:
13029         case FLASH_5761VENDOR_ST_M_M45PE20:
13030         case FLASH_5761VENDOR_ST_M_M45PE40:
13031         case FLASH_5761VENDOR_ST_M_M45PE80:
13032         case FLASH_5761VENDOR_ST_M_M45PE16:
13033                 tp->nvram_jedecnum = JEDEC_ST;
13034                 tg3_flag_set(tp, NVRAM_BUFFERED);
13035                 tg3_flag_set(tp, FLASH);
13036                 tp->nvram_pagesize = 256;
13037                 break;
13038         }
13039
13040         if (protect) {
13041                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13042         } else {
13043                 switch (nvcfg1) {
13044                 case FLASH_5761VENDOR_ATMEL_ADB161D:
13045                 case FLASH_5761VENDOR_ATMEL_MDB161D:
13046                 case FLASH_5761VENDOR_ST_A_M45PE16:
13047                 case FLASH_5761VENDOR_ST_M_M45PE16:
13048                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13049                         break;
13050                 case FLASH_5761VENDOR_ATMEL_ADB081D:
13051                 case FLASH_5761VENDOR_ATMEL_MDB081D:
13052                 case FLASH_5761VENDOR_ST_A_M45PE80:
13053                 case FLASH_5761VENDOR_ST_M_M45PE80:
13054                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13055                         break;
13056                 case FLASH_5761VENDOR_ATMEL_ADB041D:
13057                 case FLASH_5761VENDOR_ATMEL_MDB041D:
13058                 case FLASH_5761VENDOR_ST_A_M45PE40:
13059                 case FLASH_5761VENDOR_ST_M_M45PE40:
13060                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13061                         break;
13062                 case FLASH_5761VENDOR_ATMEL_ADB021D:
13063                 case FLASH_5761VENDOR_ATMEL_MDB021D:
13064                 case FLASH_5761VENDOR_ST_A_M45PE20:
13065                 case FLASH_5761VENDOR_ST_M_M45PE20:
13066                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13067                         break;
13068                 }
13069         }
13070 }
13071
13072 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
13073 {
13074         tp->nvram_jedecnum = JEDEC_ATMEL;
13075         tg3_flag_set(tp, NVRAM_BUFFERED);
13076         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13077 }
13078
13079 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
13080 {
13081         u32 nvcfg1;
13082
13083         nvcfg1 = tr32(NVRAM_CFG1);
13084
13085         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13086         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13087         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13088                 tp->nvram_jedecnum = JEDEC_ATMEL;
13089                 tg3_flag_set(tp, NVRAM_BUFFERED);
13090                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13091
13092                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13093                 tw32(NVRAM_CFG1, nvcfg1);
13094                 return;
13095         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13096         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13097         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13098         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13099         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13100         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13101         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13102                 tp->nvram_jedecnum = JEDEC_ATMEL;
13103                 tg3_flag_set(tp, NVRAM_BUFFERED);
13104                 tg3_flag_set(tp, FLASH);
13105
13106                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13107                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13108                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13109                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13110                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13111                         break;
13112                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13113                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13114                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13115                         break;
13116                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13117                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13118                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13119                         break;
13120                 }
13121                 break;
13122         case FLASH_5752VENDOR_ST_M45PE10:
13123         case FLASH_5752VENDOR_ST_M45PE20:
13124         case FLASH_5752VENDOR_ST_M45PE40:
13125                 tp->nvram_jedecnum = JEDEC_ST;
13126                 tg3_flag_set(tp, NVRAM_BUFFERED);
13127                 tg3_flag_set(tp, FLASH);
13128
13129                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13130                 case FLASH_5752VENDOR_ST_M45PE10:
13131                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13132                         break;
13133                 case FLASH_5752VENDOR_ST_M45PE20:
13134                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13135                         break;
13136                 case FLASH_5752VENDOR_ST_M45PE40:
13137                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13138                         break;
13139                 }
13140                 break;
13141         default:
13142                 tg3_flag_set(tp, NO_NVRAM);
13143                 return;
13144         }
13145
13146         tg3_nvram_get_pagesize(tp, nvcfg1);
13147         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13148                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13149 }
13150
13151
13152 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
13153 {
13154         u32 nvcfg1;
13155
13156         nvcfg1 = tr32(NVRAM_CFG1);
13157
13158         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13159         case FLASH_5717VENDOR_ATMEL_EEPROM:
13160         case FLASH_5717VENDOR_MICRO_EEPROM:
13161                 tp->nvram_jedecnum = JEDEC_ATMEL;
13162                 tg3_flag_set(tp, NVRAM_BUFFERED);
13163                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13164
13165                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13166                 tw32(NVRAM_CFG1, nvcfg1);
13167                 return;
13168         case FLASH_5717VENDOR_ATMEL_MDB011D:
13169         case FLASH_5717VENDOR_ATMEL_ADB011B:
13170         case FLASH_5717VENDOR_ATMEL_ADB011D:
13171         case FLASH_5717VENDOR_ATMEL_MDB021D:
13172         case FLASH_5717VENDOR_ATMEL_ADB021B:
13173         case FLASH_5717VENDOR_ATMEL_ADB021D:
13174         case FLASH_5717VENDOR_ATMEL_45USPT:
13175                 tp->nvram_jedecnum = JEDEC_ATMEL;
13176                 tg3_flag_set(tp, NVRAM_BUFFERED);
13177                 tg3_flag_set(tp, FLASH);
13178
13179                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13180                 case FLASH_5717VENDOR_ATMEL_MDB021D:
13181                         /* Detect size with tg3_nvram_get_size() */
13182                         break;
13183                 case FLASH_5717VENDOR_ATMEL_ADB021B:
13184                 case FLASH_5717VENDOR_ATMEL_ADB021D:
13185                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13186                         break;
13187                 default:
13188                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13189                         break;
13190                 }
13191                 break;
13192         case FLASH_5717VENDOR_ST_M_M25PE10:
13193         case FLASH_5717VENDOR_ST_A_M25PE10:
13194         case FLASH_5717VENDOR_ST_M_M45PE10:
13195         case FLASH_5717VENDOR_ST_A_M45PE10:
13196         case FLASH_5717VENDOR_ST_M_M25PE20:
13197         case FLASH_5717VENDOR_ST_A_M25PE20:
13198         case FLASH_5717VENDOR_ST_M_M45PE20:
13199         case FLASH_5717VENDOR_ST_A_M45PE20:
13200         case FLASH_5717VENDOR_ST_25USPT:
13201         case FLASH_5717VENDOR_ST_45USPT:
13202                 tp->nvram_jedecnum = JEDEC_ST;
13203                 tg3_flag_set(tp, NVRAM_BUFFERED);
13204                 tg3_flag_set(tp, FLASH);
13205
13206                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13207                 case FLASH_5717VENDOR_ST_M_M25PE20:
13208                 case FLASH_5717VENDOR_ST_M_M45PE20:
13209                         /* Detect size with tg3_nvram_get_size() */
13210                         break;
13211                 case FLASH_5717VENDOR_ST_A_M25PE20:
13212                 case FLASH_5717VENDOR_ST_A_M45PE20:
13213                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13214                         break;
13215                 default:
13216                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13217                         break;
13218                 }
13219                 break;
13220         default:
13221                 tg3_flag_set(tp, NO_NVRAM);
13222                 return;
13223         }
13224
13225         tg3_nvram_get_pagesize(tp, nvcfg1);
13226         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13227                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13228 }
13229
13230 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
13231 {
13232         u32 nvcfg1, nvmpinstrp;
13233
13234         nvcfg1 = tr32(NVRAM_CFG1);
13235         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13236
13237         switch (nvmpinstrp) {
13238         case FLASH_5720_EEPROM_HD:
13239         case FLASH_5720_EEPROM_LD:
13240                 tp->nvram_jedecnum = JEDEC_ATMEL;
13241                 tg3_flag_set(tp, NVRAM_BUFFERED);
13242
13243                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13244                 tw32(NVRAM_CFG1, nvcfg1);
13245                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
13246                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13247                 else
13248                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
13249                 return;
13250         case FLASH_5720VENDOR_M_ATMEL_DB011D:
13251         case FLASH_5720VENDOR_A_ATMEL_DB011B:
13252         case FLASH_5720VENDOR_A_ATMEL_DB011D:
13253         case FLASH_5720VENDOR_M_ATMEL_DB021D:
13254         case FLASH_5720VENDOR_A_ATMEL_DB021B:
13255         case FLASH_5720VENDOR_A_ATMEL_DB021D:
13256         case FLASH_5720VENDOR_M_ATMEL_DB041D:
13257         case FLASH_5720VENDOR_A_ATMEL_DB041B:
13258         case FLASH_5720VENDOR_A_ATMEL_DB041D:
13259         case FLASH_5720VENDOR_M_ATMEL_DB081D:
13260         case FLASH_5720VENDOR_A_ATMEL_DB081D:
13261         case FLASH_5720VENDOR_ATMEL_45USPT:
13262                 tp->nvram_jedecnum = JEDEC_ATMEL;
13263                 tg3_flag_set(tp, NVRAM_BUFFERED);
13264                 tg3_flag_set(tp, FLASH);
13265
13266                 switch (nvmpinstrp) {
13267                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13268                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13269                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13270                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13271                         break;
13272                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13273                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13274                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13275                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13276                         break;
13277                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13278                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13279                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13280                         break;
13281                 default:
13282                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13283                         break;
13284                 }
13285                 break;
13286         case FLASH_5720VENDOR_M_ST_M25PE10:
13287         case FLASH_5720VENDOR_M_ST_M45PE10:
13288         case FLASH_5720VENDOR_A_ST_M25PE10:
13289         case FLASH_5720VENDOR_A_ST_M45PE10:
13290         case FLASH_5720VENDOR_M_ST_M25PE20:
13291         case FLASH_5720VENDOR_M_ST_M45PE20:
13292         case FLASH_5720VENDOR_A_ST_M25PE20:
13293         case FLASH_5720VENDOR_A_ST_M45PE20:
13294         case FLASH_5720VENDOR_M_ST_M25PE40:
13295         case FLASH_5720VENDOR_M_ST_M45PE40:
13296         case FLASH_5720VENDOR_A_ST_M25PE40:
13297         case FLASH_5720VENDOR_A_ST_M45PE40:
13298         case FLASH_5720VENDOR_M_ST_M25PE80:
13299         case FLASH_5720VENDOR_M_ST_M45PE80:
13300         case FLASH_5720VENDOR_A_ST_M25PE80:
13301         case FLASH_5720VENDOR_A_ST_M45PE80:
13302         case FLASH_5720VENDOR_ST_25USPT:
13303         case FLASH_5720VENDOR_ST_45USPT:
13304                 tp->nvram_jedecnum = JEDEC_ST;
13305                 tg3_flag_set(tp, NVRAM_BUFFERED);
13306                 tg3_flag_set(tp, FLASH);
13307
13308                 switch (nvmpinstrp) {
13309                 case FLASH_5720VENDOR_M_ST_M25PE20:
13310                 case FLASH_5720VENDOR_M_ST_M45PE20:
13311                 case FLASH_5720VENDOR_A_ST_M25PE20:
13312                 case FLASH_5720VENDOR_A_ST_M45PE20:
13313                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13314                         break;
13315                 case FLASH_5720VENDOR_M_ST_M25PE40:
13316                 case FLASH_5720VENDOR_M_ST_M45PE40:
13317                 case FLASH_5720VENDOR_A_ST_M25PE40:
13318                 case FLASH_5720VENDOR_A_ST_M45PE40:
13319                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13320                         break;
13321                 case FLASH_5720VENDOR_M_ST_M25PE80:
13322                 case FLASH_5720VENDOR_M_ST_M45PE80:
13323                 case FLASH_5720VENDOR_A_ST_M25PE80:
13324                 case FLASH_5720VENDOR_A_ST_M45PE80:
13325                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13326                         break;
13327                 default:
13328                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13329                         break;
13330                 }
13331                 break;
13332         default:
13333                 tg3_flag_set(tp, NO_NVRAM);
13334                 return;
13335         }
13336
13337         tg3_nvram_get_pagesize(tp, nvcfg1);
13338         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13339                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13340 }
13341
13342 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
13343 static void __devinit tg3_nvram_init(struct tg3 *tp)
13344 {
13345         tw32_f(GRC_EEPROM_ADDR,
13346              (EEPROM_ADDR_FSM_RESET |
13347               (EEPROM_DEFAULT_CLOCK_PERIOD <<
13348                EEPROM_ADDR_CLKPERD_SHIFT)));
13349
13350         msleep(1);
13351
13352         /* Enable seeprom accesses. */
13353         tw32_f(GRC_LOCAL_CTRL,
13354              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13355         udelay(100);
13356
13357         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13358             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
13359                 tg3_flag_set(tp, NVRAM);
13360
13361                 if (tg3_nvram_lock(tp)) {
13362                         netdev_warn(tp->dev,
13363                                     "Cannot get nvram lock, %s failed\n",
13364                                     __func__);
13365                         return;
13366                 }
13367                 tg3_enable_nvram_access(tp);
13368
13369                 tp->nvram_size = 0;
13370
13371                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13372                         tg3_get_5752_nvram_info(tp);
13373                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13374                         tg3_get_5755_nvram_info(tp);
13375                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13376                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13377                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13378                         tg3_get_5787_nvram_info(tp);
13379                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13380                         tg3_get_5761_nvram_info(tp);
13381                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13382                         tg3_get_5906_nvram_info(tp);
13383                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13384                          tg3_flag(tp, 57765_CLASS))
13385                         tg3_get_57780_nvram_info(tp);
13386                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13387                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13388                         tg3_get_5717_nvram_info(tp);
13389                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13390                         tg3_get_5720_nvram_info(tp);
13391                 else
13392                         tg3_get_nvram_info(tp);
13393
13394                 if (tp->nvram_size == 0)
13395                         tg3_get_nvram_size(tp);
13396
13397                 tg3_disable_nvram_access(tp);
13398                 tg3_nvram_unlock(tp);
13399
13400         } else {
13401                 tg3_flag_clear(tp, NVRAM);
13402                 tg3_flag_clear(tp, NVRAM_BUFFERED);
13403
13404                 tg3_get_eeprom_size(tp);
13405         }
13406 }
13407
13408 struct subsys_tbl_ent {
13409         u16 subsys_vendor, subsys_devid;
13410         u32 phy_id;
13411 };
13412
13413 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
13414         /* Broadcom boards. */
13415         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13416           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13417         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13418           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13419         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13420           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13421         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13422           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13423         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13424           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13425         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13426           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13427         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13428           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13429         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13430           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13431         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13432           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13433         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13434           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13435         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13436           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13437
13438         /* 3com boards. */
13439         { TG3PCI_SUBVENDOR_ID_3COM,
13440           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13441         { TG3PCI_SUBVENDOR_ID_3COM,
13442           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13443         { TG3PCI_SUBVENDOR_ID_3COM,
13444           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13445         { TG3PCI_SUBVENDOR_ID_3COM,
13446           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13447         { TG3PCI_SUBVENDOR_ID_3COM,
13448           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13449
13450         /* DELL boards. */
13451         { TG3PCI_SUBVENDOR_ID_DELL,
13452           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13453         { TG3PCI_SUBVENDOR_ID_DELL,
13454           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13455         { TG3PCI_SUBVENDOR_ID_DELL,
13456           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13457         { TG3PCI_SUBVENDOR_ID_DELL,
13458           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13459
13460         /* Compaq boards. */
13461         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13462           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13463         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13464           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13465         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13466           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13467         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13468           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13469         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13470           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13471
13472         /* IBM boards. */
13473         { TG3PCI_SUBVENDOR_ID_IBM,
13474           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13475 };
13476
13477 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13478 {
13479         int i;
13480
13481         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13482                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13483                      tp->pdev->subsystem_vendor) &&
13484                     (subsys_id_to_phy_id[i].subsys_devid ==
13485                      tp->pdev->subsystem_device))
13486                         return &subsys_id_to_phy_id[i];
13487         }
13488         return NULL;
13489 }
13490
13491 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13492 {
13493         u32 val;
13494
13495         tp->phy_id = TG3_PHY_ID_INVALID;
13496         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13497
13498         /* Assume an onboard device and WOL capable by default.  */
13499         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13500         tg3_flag_set(tp, WOL_CAP);
13501
13502         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13503                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13504                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13505                         tg3_flag_set(tp, IS_NIC);
13506                 }
13507                 val = tr32(VCPU_CFGSHDW);
13508                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13509                         tg3_flag_set(tp, ASPM_WORKAROUND);
13510                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13511                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13512                         tg3_flag_set(tp, WOL_ENABLE);
13513                         device_set_wakeup_enable(&tp->pdev->dev, true);
13514                 }
13515                 goto done;
13516         }
13517
13518         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13519         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13520                 u32 nic_cfg, led_cfg;
13521                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13522                 int eeprom_phy_serdes = 0;
13523
13524                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13525                 tp->nic_sram_data_cfg = nic_cfg;
13526
13527                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13528                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13529                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13530                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13531                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13532                     (ver > 0) && (ver < 0x100))
13533                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13534
13535                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13536                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13537
13538                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13539                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13540                         eeprom_phy_serdes = 1;
13541
13542                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13543                 if (nic_phy_id != 0) {
13544                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13545                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13546
13547                         eeprom_phy_id  = (id1 >> 16) << 10;
13548                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
13549                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
13550                 } else
13551                         eeprom_phy_id = 0;
13552
13553                 tp->phy_id = eeprom_phy_id;
13554                 if (eeprom_phy_serdes) {
13555                         if (!tg3_flag(tp, 5705_PLUS))
13556                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13557                         else
13558                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13559                 }
13560
13561                 if (tg3_flag(tp, 5750_PLUS))
13562                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13563                                     SHASTA_EXT_LED_MODE_MASK);
13564                 else
13565                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13566
13567                 switch (led_cfg) {
13568                 default:
13569                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13570                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13571                         break;
13572
13573                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13574                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13575                         break;
13576
13577                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13578                         tp->led_ctrl = LED_CTRL_MODE_MAC;
13579
13580                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13581                          * read on some older 5700/5701 bootcode.
13582                          */
13583                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13584                             ASIC_REV_5700 ||
13585                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
13586                             ASIC_REV_5701)
13587                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13588
13589                         break;
13590
13591                 case SHASTA_EXT_LED_SHARED:
13592                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
13593                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13594                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13595                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13596                                                  LED_CTRL_MODE_PHY_2);
13597                         break;
13598
13599                 case SHASTA_EXT_LED_MAC:
13600                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13601                         break;
13602
13603                 case SHASTA_EXT_LED_COMBO:
13604                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
13605                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13606                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13607                                                  LED_CTRL_MODE_PHY_2);
13608                         break;
13609
13610                 }
13611
13612                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13613                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13614                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13615                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13616
13617                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13618                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13619
13620                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13621                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13622                         if ((tp->pdev->subsystem_vendor ==
13623                              PCI_VENDOR_ID_ARIMA) &&
13624                             (tp->pdev->subsystem_device == 0x205a ||
13625                              tp->pdev->subsystem_device == 0x2063))
13626                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13627                 } else {
13628                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13629                         tg3_flag_set(tp, IS_NIC);
13630                 }
13631
13632                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13633                         tg3_flag_set(tp, ENABLE_ASF);
13634                         if (tg3_flag(tp, 5750_PLUS))
13635                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13636                 }
13637
13638                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13639                     tg3_flag(tp, 5750_PLUS))
13640                         tg3_flag_set(tp, ENABLE_APE);
13641
13642                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13643                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13644                         tg3_flag_clear(tp, WOL_CAP);
13645
13646                 if (tg3_flag(tp, WOL_CAP) &&
13647                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13648                         tg3_flag_set(tp, WOL_ENABLE);
13649                         device_set_wakeup_enable(&tp->pdev->dev, true);
13650                 }
13651
13652                 if (cfg2 & (1 << 17))
13653                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13654
13655                 /* serdes signal pre-emphasis in register 0x590 set by */
13656                 /* bootcode if bit 18 is set */
13657                 if (cfg2 & (1 << 18))
13658                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13659
13660                 if ((tg3_flag(tp, 57765_PLUS) ||
13661                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13662                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13663                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13664                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13665
13666                 if (tg3_flag(tp, PCI_EXPRESS) &&
13667                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13668                     !tg3_flag(tp, 57765_PLUS)) {
13669                         u32 cfg3;
13670
13671                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13672                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13673                                 tg3_flag_set(tp, ASPM_WORKAROUND);
13674                 }
13675
13676                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13677                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13678                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13679                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13680                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13681                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13682         }
13683 done:
13684         if (tg3_flag(tp, WOL_CAP))
13685                 device_set_wakeup_enable(&tp->pdev->dev,
13686                                          tg3_flag(tp, WOL_ENABLE));
13687         else
13688                 device_set_wakeup_capable(&tp->pdev->dev, false);
13689 }
13690
13691 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13692 {
13693         int i;
13694         u32 val;
13695
13696         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13697         tw32(OTP_CTRL, cmd);
13698
13699         /* Wait for up to 1 ms for command to execute. */
13700         for (i = 0; i < 100; i++) {
13701                 val = tr32(OTP_STATUS);
13702                 if (val & OTP_STATUS_CMD_DONE)
13703                         break;
13704                 udelay(10);
13705         }
13706
13707         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13708 }
13709
13710 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13711  * configuration is a 32-bit value that straddles the alignment boundary.
13712  * We do two 32-bit reads and then shift and merge the results.
13713  */
13714 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13715 {
13716         u32 bhalf_otp, thalf_otp;
13717
13718         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13719
13720         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13721                 return 0;
13722
13723         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13724
13725         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13726                 return 0;
13727
13728         thalf_otp = tr32(OTP_READ_DATA);
13729
13730         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13731
13732         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13733                 return 0;
13734
13735         bhalf_otp = tr32(OTP_READ_DATA);
13736
13737         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13738 }
13739
13740 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13741 {
13742         u32 adv = ADVERTISED_Autoneg;
13743
13744         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13745                 adv |= ADVERTISED_1000baseT_Half |
13746                        ADVERTISED_1000baseT_Full;
13747
13748         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13749                 adv |= ADVERTISED_100baseT_Half |
13750                        ADVERTISED_100baseT_Full |
13751                        ADVERTISED_10baseT_Half |
13752                        ADVERTISED_10baseT_Full |
13753                        ADVERTISED_TP;
13754         else
13755                 adv |= ADVERTISED_FIBRE;
13756
13757         tp->link_config.advertising = adv;
13758         tp->link_config.speed = SPEED_UNKNOWN;
13759         tp->link_config.duplex = DUPLEX_UNKNOWN;
13760         tp->link_config.autoneg = AUTONEG_ENABLE;
13761         tp->link_config.active_speed = SPEED_UNKNOWN;
13762         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
13763
13764         tp->old_link = -1;
13765 }
13766
13767 static int __devinit tg3_phy_probe(struct tg3 *tp)
13768 {
13769         u32 hw_phy_id_1, hw_phy_id_2;
13770         u32 hw_phy_id, hw_phy_id_masked;
13771         int err;
13772
13773         /* flow control autonegotiation is default behavior */
13774         tg3_flag_set(tp, PAUSE_AUTONEG);
13775         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13776
13777         if (tg3_flag(tp, ENABLE_APE)) {
13778                 switch (tp->pci_fn) {
13779                 case 0:
13780                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
13781                         break;
13782                 case 1:
13783                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
13784                         break;
13785                 case 2:
13786                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
13787                         break;
13788                 case 3:
13789                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
13790                         break;
13791                 }
13792         }
13793
13794         if (tg3_flag(tp, USE_PHYLIB))
13795                 return tg3_phy_init(tp);
13796
13797         /* Reading the PHY ID register can conflict with ASF
13798          * firmware access to the PHY hardware.
13799          */
13800         err = 0;
13801         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13802                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13803         } else {
13804                 /* Now read the physical PHY_ID from the chip and verify
13805                  * that it is sane.  If it doesn't look good, we fall back
13806                  * to either the hard-coded table based PHY_ID and failing
13807                  * that the value found in the eeprom area.
13808                  */
13809                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13810                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13811
13812                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13813                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13814                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13815
13816                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13817         }
13818
13819         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13820                 tp->phy_id = hw_phy_id;
13821                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13822                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13823                 else
13824                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13825         } else {
13826                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13827                         /* Do nothing, phy ID already set up in
13828                          * tg3_get_eeprom_hw_cfg().
13829                          */
13830                 } else {
13831                         struct subsys_tbl_ent *p;
13832
13833                         /* No eeprom signature?  Try the hardcoded
13834                          * subsys device table.
13835                          */
13836                         p = tg3_lookup_by_subsys(tp);
13837                         if (!p)
13838                                 return -ENODEV;
13839
13840                         tp->phy_id = p->phy_id;
13841                         if (!tp->phy_id ||
13842                             tp->phy_id == TG3_PHY_ID_BCM8002)
13843                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13844                 }
13845         }
13846
13847         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13848             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13849              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13850              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13851               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13852              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13853               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13854                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13855
13856         tg3_phy_init_link_config(tp);
13857
13858         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13859             !tg3_flag(tp, ENABLE_APE) &&
13860             !tg3_flag(tp, ENABLE_ASF)) {
13861                 u32 bmsr, dummy;
13862
13863                 tg3_readphy(tp, MII_BMSR, &bmsr);
13864                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13865                     (bmsr & BMSR_LSTATUS))
13866                         goto skip_phy_reset;
13867
13868                 err = tg3_phy_reset(tp);
13869                 if (err)
13870                         return err;
13871
13872                 tg3_phy_set_wirespeed(tp);
13873
13874                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13875                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13876                                             tp->link_config.flowctrl);
13877
13878                         tg3_writephy(tp, MII_BMCR,
13879                                      BMCR_ANENABLE | BMCR_ANRESTART);
13880                 }
13881         }
13882
13883 skip_phy_reset:
13884         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13885                 err = tg3_init_5401phy_dsp(tp);
13886                 if (err)
13887                         return err;
13888
13889                 err = tg3_init_5401phy_dsp(tp);
13890         }
13891
13892         return err;
13893 }
13894
13895 static void __devinit tg3_read_vpd(struct tg3 *tp)
13896 {
13897         u8 *vpd_data;
13898         unsigned int block_end, rosize, len;
13899         u32 vpdlen;
13900         int j, i = 0;
13901
13902         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13903         if (!vpd_data)
13904                 goto out_no_vpd;
13905
13906         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13907         if (i < 0)
13908                 goto out_not_found;
13909
13910         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13911         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13912         i += PCI_VPD_LRDT_TAG_SIZE;
13913
13914         if (block_end > vpdlen)
13915                 goto out_not_found;
13916
13917         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13918                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13919         if (j > 0) {
13920                 len = pci_vpd_info_field_size(&vpd_data[j]);
13921
13922                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13923                 if (j + len > block_end || len != 4 ||
13924                     memcmp(&vpd_data[j], "1028", 4))
13925                         goto partno;
13926
13927                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13928                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13929                 if (j < 0)
13930                         goto partno;
13931
13932                 len = pci_vpd_info_field_size(&vpd_data[j]);
13933
13934                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13935                 if (j + len > block_end)
13936                         goto partno;
13937
13938                 memcpy(tp->fw_ver, &vpd_data[j], len);
13939                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13940         }
13941
13942 partno:
13943         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13944                                       PCI_VPD_RO_KEYWORD_PARTNO);
13945         if (i < 0)
13946                 goto out_not_found;
13947
13948         len = pci_vpd_info_field_size(&vpd_data[i]);
13949
13950         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13951         if (len > TG3_BPN_SIZE ||
13952             (len + i) > vpdlen)
13953                 goto out_not_found;
13954
13955         memcpy(tp->board_part_number, &vpd_data[i], len);
13956
13957 out_not_found:
13958         kfree(vpd_data);
13959         if (tp->board_part_number[0])
13960                 return;
13961
13962 out_no_vpd:
13963         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13964                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13965                         strcpy(tp->board_part_number, "BCM5717");
13966                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13967                         strcpy(tp->board_part_number, "BCM5718");
13968                 else
13969                         goto nomatch;
13970         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13971                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13972                         strcpy(tp->board_part_number, "BCM57780");
13973                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13974                         strcpy(tp->board_part_number, "BCM57760");
13975                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13976                         strcpy(tp->board_part_number, "BCM57790");
13977                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13978                         strcpy(tp->board_part_number, "BCM57788");
13979                 else
13980                         goto nomatch;
13981         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13982                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13983                         strcpy(tp->board_part_number, "BCM57761");
13984                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13985                         strcpy(tp->board_part_number, "BCM57765");
13986                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13987                         strcpy(tp->board_part_number, "BCM57781");
13988                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13989                         strcpy(tp->board_part_number, "BCM57785");
13990                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13991                         strcpy(tp->board_part_number, "BCM57791");
13992                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13993                         strcpy(tp->board_part_number, "BCM57795");
13994                 else
13995                         goto nomatch;
13996         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
13997                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
13998                         strcpy(tp->board_part_number, "BCM57762");
13999                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14000                         strcpy(tp->board_part_number, "BCM57766");
14001                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14002                         strcpy(tp->board_part_number, "BCM57782");
14003                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14004                         strcpy(tp->board_part_number, "BCM57786");
14005                 else
14006                         goto nomatch;
14007         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14008                 strcpy(tp->board_part_number, "BCM95906");
14009         } else {
14010 nomatch:
14011                 strcpy(tp->board_part_number, "none");
14012         }
14013 }
14014
14015 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14016 {
14017         u32 val;
14018
14019         if (tg3_nvram_read(tp, offset, &val) ||
14020             (val & 0xfc000000) != 0x0c000000 ||
14021             tg3_nvram_read(tp, offset + 4, &val) ||
14022             val != 0)
14023                 return 0;
14024
14025         return 1;
14026 }
14027
14028 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
14029 {
14030         u32 val, offset, start, ver_offset;
14031         int i, dst_off;
14032         bool newver = false;
14033
14034         if (tg3_nvram_read(tp, 0xc, &offset) ||
14035             tg3_nvram_read(tp, 0x4, &start))
14036                 return;
14037
14038         offset = tg3_nvram_logical_addr(tp, offset);
14039
14040         if (tg3_nvram_read(tp, offset, &val))
14041                 return;
14042
14043         if ((val & 0xfc000000) == 0x0c000000) {
14044                 if (tg3_nvram_read(tp, offset + 4, &val))
14045                         return;
14046
14047                 if (val == 0)
14048                         newver = true;
14049         }
14050
14051         dst_off = strlen(tp->fw_ver);
14052
14053         if (newver) {
14054                 if (TG3_VER_SIZE - dst_off < 16 ||
14055                     tg3_nvram_read(tp, offset + 8, &ver_offset))
14056                         return;
14057
14058                 offset = offset + ver_offset - start;
14059                 for (i = 0; i < 16; i += 4) {
14060                         __be32 v;
14061                         if (tg3_nvram_read_be32(tp, offset + i, &v))
14062                                 return;
14063
14064                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
14065                 }
14066         } else {
14067                 u32 major, minor;
14068
14069                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14070                         return;
14071
14072                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14073                         TG3_NVM_BCVER_MAJSFT;
14074                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
14075                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14076                          "v%d.%02d", major, minor);
14077         }
14078 }
14079
14080 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
14081 {
14082         u32 val, major, minor;
14083
14084         /* Use native endian representation */
14085         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14086                 return;
14087
14088         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14089                 TG3_NVM_HWSB_CFG1_MAJSFT;
14090         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14091                 TG3_NVM_HWSB_CFG1_MINSFT;
14092
14093         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14094 }
14095
14096 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
14097 {
14098         u32 offset, major, minor, build;
14099
14100         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
14101
14102         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14103                 return;
14104
14105         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14106         case TG3_EEPROM_SB_REVISION_0:
14107                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14108                 break;
14109         case TG3_EEPROM_SB_REVISION_2:
14110                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14111                 break;
14112         case TG3_EEPROM_SB_REVISION_3:
14113                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14114                 break;
14115         case TG3_EEPROM_SB_REVISION_4:
14116                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14117                 break;
14118         case TG3_EEPROM_SB_REVISION_5:
14119                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14120                 break;
14121         case TG3_EEPROM_SB_REVISION_6:
14122                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14123                 break;
14124         default:
14125                 return;
14126         }
14127
14128         if (tg3_nvram_read(tp, offset, &val))
14129                 return;
14130
14131         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14132                 TG3_EEPROM_SB_EDH_BLD_SHFT;
14133         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14134                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
14135         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
14136
14137         if (minor > 99 || build > 26)
14138                 return;
14139
14140         offset = strlen(tp->fw_ver);
14141         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14142                  " v%d.%02d", major, minor);
14143
14144         if (build > 0) {
14145                 offset = strlen(tp->fw_ver);
14146                 if (offset < TG3_VER_SIZE - 1)
14147                         tp->fw_ver[offset] = 'a' + build - 1;
14148         }
14149 }
14150
14151 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
14152 {
14153         u32 val, offset, start;
14154         int i, vlen;
14155
14156         for (offset = TG3_NVM_DIR_START;
14157              offset < TG3_NVM_DIR_END;
14158              offset += TG3_NVM_DIRENT_SIZE) {
14159                 if (tg3_nvram_read(tp, offset, &val))
14160                         return;
14161
14162                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14163                         break;
14164         }
14165
14166         if (offset == TG3_NVM_DIR_END)
14167                 return;
14168
14169         if (!tg3_flag(tp, 5705_PLUS))
14170                 start = 0x08000000;
14171         else if (tg3_nvram_read(tp, offset - 4, &start))
14172                 return;
14173
14174         if (tg3_nvram_read(tp, offset + 4, &offset) ||
14175             !tg3_fw_img_is_valid(tp, offset) ||
14176             tg3_nvram_read(tp, offset + 8, &val))
14177                 return;
14178
14179         offset += val - start;
14180
14181         vlen = strlen(tp->fw_ver);
14182
14183         tp->fw_ver[vlen++] = ',';
14184         tp->fw_ver[vlen++] = ' ';
14185
14186         for (i = 0; i < 4; i++) {
14187                 __be32 v;
14188                 if (tg3_nvram_read_be32(tp, offset, &v))
14189                         return;
14190
14191                 offset += sizeof(v);
14192
14193                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
14194                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
14195                         break;
14196                 }
14197
14198                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
14199                 vlen += sizeof(v);
14200         }
14201 }
14202
14203 static void __devinit tg3_probe_ncsi(struct tg3 *tp)
14204 {
14205         u32 apedata;
14206
14207         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
14208         if (apedata != APE_SEG_SIG_MAGIC)
14209                 return;
14210
14211         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
14212         if (!(apedata & APE_FW_STATUS_READY))
14213                 return;
14214
14215         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
14216                 tg3_flag_set(tp, APE_HAS_NCSI);
14217 }
14218
14219 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
14220 {
14221         int vlen;
14222         u32 apedata;
14223         char *fwtype;
14224
14225         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
14226
14227         if (tg3_flag(tp, APE_HAS_NCSI))
14228                 fwtype = "NCSI";
14229         else
14230                 fwtype = "DASH";
14231
14232         vlen = strlen(tp->fw_ver);
14233
14234         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
14235                  fwtype,
14236                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
14237                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
14238                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
14239                  (apedata & APE_FW_VERSION_BLDMSK));
14240 }
14241
14242 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
14243 {
14244         u32 val;
14245         bool vpd_vers = false;
14246
14247         if (tp->fw_ver[0] != 0)
14248                 vpd_vers = true;
14249
14250         if (tg3_flag(tp, NO_NVRAM)) {
14251                 strcat(tp->fw_ver, "sb");
14252                 return;
14253         }
14254
14255         if (tg3_nvram_read(tp, 0, &val))
14256                 return;
14257
14258         if (val == TG3_EEPROM_MAGIC)
14259                 tg3_read_bc_ver(tp);
14260         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
14261                 tg3_read_sb_ver(tp, val);
14262         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
14263                 tg3_read_hwsb_ver(tp);
14264
14265         if (tg3_flag(tp, ENABLE_ASF)) {
14266                 if (tg3_flag(tp, ENABLE_APE)) {
14267                         tg3_probe_ncsi(tp);
14268                         if (!vpd_vers)
14269                                 tg3_read_dash_ver(tp);
14270                 } else if (!vpd_vers) {
14271                         tg3_read_mgmtfw_ver(tp);
14272                 }
14273         }
14274
14275         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
14276 }
14277
14278 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
14279 {
14280         if (tg3_flag(tp, LRG_PROD_RING_CAP))
14281                 return TG3_RX_RET_MAX_SIZE_5717;
14282         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
14283                 return TG3_RX_RET_MAX_SIZE_5700;
14284         else
14285                 return TG3_RX_RET_MAX_SIZE_5705;
14286 }
14287
14288 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
14289         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
14290         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
14291         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
14292         { },
14293 };
14294
14295 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14296 {
14297         struct pci_dev *peer;
14298         unsigned int func, devnr = tp->pdev->devfn & ~7;
14299
14300         for (func = 0; func < 8; func++) {
14301                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14302                 if (peer && peer != tp->pdev)
14303                         break;
14304                 pci_dev_put(peer);
14305         }
14306         /* 5704 can be configured in single-port mode, set peer to
14307          * tp->pdev in that case.
14308          */
14309         if (!peer) {
14310                 peer = tp->pdev;
14311                 return peer;
14312         }
14313
14314         /*
14315          * We don't need to keep the refcount elevated; there's no way
14316          * to remove one half of this device without removing the other
14317          */
14318         pci_dev_put(peer);
14319
14320         return peer;
14321 }
14322
14323 static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
14324 {
14325         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
14326         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
14327                 u32 reg;
14328
14329                 /* All devices that use the alternate
14330                  * ASIC REV location have a CPMU.
14331                  */
14332                 tg3_flag_set(tp, CPMU_PRESENT);
14333
14334                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14335                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
14336                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
14337                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
14338                         reg = TG3PCI_GEN2_PRODID_ASICREV;
14339                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
14340                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
14341                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
14342                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
14343                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14344                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14345                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
14346                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
14347                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
14348                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14349                         reg = TG3PCI_GEN15_PRODID_ASICREV;
14350                 else
14351                         reg = TG3PCI_PRODID_ASICREV;
14352
14353                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
14354         }
14355
14356         /* Wrong chip ID in 5752 A0. This code can be removed later
14357          * as A0 is not in production.
14358          */
14359         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
14360                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
14361
14362         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14363             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14364             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14365                 tg3_flag_set(tp, 5717_PLUS);
14366
14367         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14368             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14369                 tg3_flag_set(tp, 57765_CLASS);
14370
14371         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
14372                 tg3_flag_set(tp, 57765_PLUS);
14373
14374         /* Intentionally exclude ASIC_REV_5906 */
14375         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14376             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14377             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14378             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14379             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14380             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14381             tg3_flag(tp, 57765_PLUS))
14382                 tg3_flag_set(tp, 5755_PLUS);
14383
14384         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14385             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14386                 tg3_flag_set(tp, 5780_CLASS);
14387
14388         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14389             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14390             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14391             tg3_flag(tp, 5755_PLUS) ||
14392             tg3_flag(tp, 5780_CLASS))
14393                 tg3_flag_set(tp, 5750_PLUS);
14394
14395         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14396             tg3_flag(tp, 5750_PLUS))
14397                 tg3_flag_set(tp, 5705_PLUS);
14398 }
14399
14400 static int __devinit tg3_get_invariants(struct tg3 *tp)
14401 {
14402         u32 misc_ctrl_reg;
14403         u32 pci_state_reg, grc_misc_cfg;
14404         u32 val;
14405         u16 pci_cmd;
14406         int err;
14407
14408         /* Force memory write invalidate off.  If we leave it on,
14409          * then on 5700_BX chips we have to enable a workaround.
14410          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
14411          * to match the cacheline size.  The Broadcom driver have this
14412          * workaround but turns MWI off all the times so never uses
14413          * it.  This seems to suggest that the workaround is insufficient.
14414          */
14415         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14416         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
14417         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14418
14419         /* Important! -- Make sure register accesses are byteswapped
14420          * correctly.  Also, for those chips that require it, make
14421          * sure that indirect register accesses are enabled before
14422          * the first operation.
14423          */
14424         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14425                               &misc_ctrl_reg);
14426         tp->misc_host_ctrl |= (misc_ctrl_reg &
14427                                MISC_HOST_CTRL_CHIPREV);
14428         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14429                                tp->misc_host_ctrl);
14430
14431         tg3_detect_asic_rev(tp, misc_ctrl_reg);
14432
14433         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14434          * we need to disable memory and use config. cycles
14435          * only to access all registers. The 5702/03 chips
14436          * can mistakenly decode the special cycles from the
14437          * ICH chipsets as memory write cycles, causing corruption
14438          * of register and memory space. Only certain ICH bridges
14439          * will drive special cycles with non-zero data during the
14440          * address phase which can fall within the 5703's address
14441          * range. This is not an ICH bug as the PCI spec allows
14442          * non-zero address during special cycles. However, only
14443          * these ICH bridges are known to drive non-zero addresses
14444          * during special cycles.
14445          *
14446          * Since special cycles do not cross PCI bridges, we only
14447          * enable this workaround if the 5703 is on the secondary
14448          * bus of these ICH bridges.
14449          */
14450         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14451             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14452                 static struct tg3_dev_id {
14453                         u32     vendor;
14454                         u32     device;
14455                         u32     rev;
14456                 } ich_chipsets[] = {
14457                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14458                           PCI_ANY_ID },
14459                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14460                           PCI_ANY_ID },
14461                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14462                           0xa },
14463                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14464                           PCI_ANY_ID },
14465                         { },
14466                 };
14467                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
14468                 struct pci_dev *bridge = NULL;
14469
14470                 while (pci_id->vendor != 0) {
14471                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
14472                                                 bridge);
14473                         if (!bridge) {
14474                                 pci_id++;
14475                                 continue;
14476                         }
14477                         if (pci_id->rev != PCI_ANY_ID) {
14478                                 if (bridge->revision > pci_id->rev)
14479                                         continue;
14480                         }
14481                         if (bridge->subordinate &&
14482                             (bridge->subordinate->number ==
14483                              tp->pdev->bus->number)) {
14484                                 tg3_flag_set(tp, ICH_WORKAROUND);
14485                                 pci_dev_put(bridge);
14486                                 break;
14487                         }
14488                 }
14489         }
14490
14491         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14492                 static struct tg3_dev_id {
14493                         u32     vendor;
14494                         u32     device;
14495                 } bridge_chipsets[] = {
14496                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14497                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14498                         { },
14499                 };
14500                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14501                 struct pci_dev *bridge = NULL;
14502
14503                 while (pci_id->vendor != 0) {
14504                         bridge = pci_get_device(pci_id->vendor,
14505                                                 pci_id->device,
14506                                                 bridge);
14507                         if (!bridge) {
14508                                 pci_id++;
14509                                 continue;
14510                         }
14511                         if (bridge->subordinate &&
14512                             (bridge->subordinate->number <=
14513                              tp->pdev->bus->number) &&
14514                             (bridge->subordinate->busn_res.end >=
14515                              tp->pdev->bus->number)) {
14516                                 tg3_flag_set(tp, 5701_DMA_BUG);
14517                                 pci_dev_put(bridge);
14518                                 break;
14519                         }
14520                 }
14521         }
14522
14523         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14524          * DMA addresses > 40-bit. This bridge may have other additional
14525          * 57xx devices behind it in some 4-port NIC designs for example.
14526          * Any tg3 device found behind the bridge will also need the 40-bit
14527          * DMA workaround.
14528          */
14529         if (tg3_flag(tp, 5780_CLASS)) {
14530                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14531                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14532         } else {
14533                 struct pci_dev *bridge = NULL;
14534
14535                 do {
14536                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14537                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
14538                                                 bridge);
14539                         if (bridge && bridge->subordinate &&
14540                             (bridge->subordinate->number <=
14541                              tp->pdev->bus->number) &&
14542                             (bridge->subordinate->busn_res.end >=
14543                              tp->pdev->bus->number)) {
14544                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14545                                 pci_dev_put(bridge);
14546                                 break;
14547                         }
14548                 } while (bridge);
14549         }
14550
14551         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14552             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14553                 tp->pdev_peer = tg3_find_peer(tp);
14554
14555         /* Determine TSO capabilities */
14556         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14557                 ; /* Do nothing. HW bug. */
14558         else if (tg3_flag(tp, 57765_PLUS))
14559                 tg3_flag_set(tp, HW_TSO_3);
14560         else if (tg3_flag(tp, 5755_PLUS) ||
14561                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14562                 tg3_flag_set(tp, HW_TSO_2);
14563         else if (tg3_flag(tp, 5750_PLUS)) {
14564                 tg3_flag_set(tp, HW_TSO_1);
14565                 tg3_flag_set(tp, TSO_BUG);
14566                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14567                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14568                         tg3_flag_clear(tp, TSO_BUG);
14569         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14570                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14571                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14572                         tg3_flag_set(tp, TSO_BUG);
14573                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14574                         tp->fw_needed = FIRMWARE_TG3TSO5;
14575                 else
14576                         tp->fw_needed = FIRMWARE_TG3TSO;
14577         }
14578
14579         /* Selectively allow TSO based on operating conditions */
14580         if (tg3_flag(tp, HW_TSO_1) ||
14581             tg3_flag(tp, HW_TSO_2) ||
14582             tg3_flag(tp, HW_TSO_3) ||
14583             tp->fw_needed) {
14584                 /* For firmware TSO, assume ASF is disabled.
14585                  * We'll disable TSO later if we discover ASF
14586                  * is enabled in tg3_get_eeprom_hw_cfg().
14587                  */
14588                 tg3_flag_set(tp, TSO_CAPABLE);
14589         } else {
14590                 tg3_flag_clear(tp, TSO_CAPABLE);
14591                 tg3_flag_clear(tp, TSO_BUG);
14592                 tp->fw_needed = NULL;
14593         }
14594
14595         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14596                 tp->fw_needed = FIRMWARE_TG3;
14597
14598         tp->irq_max = 1;
14599
14600         if (tg3_flag(tp, 5750_PLUS)) {
14601                 tg3_flag_set(tp, SUPPORT_MSI);
14602                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14603                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14604                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14605                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14606                      tp->pdev_peer == tp->pdev))
14607                         tg3_flag_clear(tp, SUPPORT_MSI);
14608
14609                 if (tg3_flag(tp, 5755_PLUS) ||
14610                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14611                         tg3_flag_set(tp, 1SHOT_MSI);
14612                 }
14613
14614                 if (tg3_flag(tp, 57765_PLUS)) {
14615                         tg3_flag_set(tp, SUPPORT_MSIX);
14616                         tp->irq_max = TG3_IRQ_MAX_VECS;
14617                 }
14618         }
14619
14620         tp->txq_max = 1;
14621         tp->rxq_max = 1;
14622         if (tp->irq_max > 1) {
14623                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
14624                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
14625
14626                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14627                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14628                         tp->txq_max = tp->irq_max - 1;
14629         }
14630
14631         if (tg3_flag(tp, 5755_PLUS) ||
14632             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14633                 tg3_flag_set(tp, SHORT_DMA_BUG);
14634
14635         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14636                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14637
14638         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14639             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14640             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14641                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14642
14643         if (tg3_flag(tp, 57765_PLUS) &&
14644             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14645                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14646
14647         if (!tg3_flag(tp, 5705_PLUS) ||
14648             tg3_flag(tp, 5780_CLASS) ||
14649             tg3_flag(tp, USE_JUMBO_BDFLAG))
14650                 tg3_flag_set(tp, JUMBO_CAPABLE);
14651
14652         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14653                               &pci_state_reg);
14654
14655         if (pci_is_pcie(tp->pdev)) {
14656                 u16 lnkctl;
14657
14658                 tg3_flag_set(tp, PCI_EXPRESS);
14659
14660                 pci_read_config_word(tp->pdev,
14661                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14662                                      &lnkctl);
14663                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14664                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14665                             ASIC_REV_5906) {
14666                                 tg3_flag_clear(tp, HW_TSO_2);
14667                                 tg3_flag_clear(tp, TSO_CAPABLE);
14668                         }
14669                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14670                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14671                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14672                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14673                                 tg3_flag_set(tp, CLKREQ_BUG);
14674                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14675                         tg3_flag_set(tp, L1PLLPD_EN);
14676                 }
14677         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14678                 /* BCM5785 devices are effectively PCIe devices, and should
14679                  * follow PCIe codepaths, but do not have a PCIe capabilities
14680                  * section.
14681                  */
14682                 tg3_flag_set(tp, PCI_EXPRESS);
14683         } else if (!tg3_flag(tp, 5705_PLUS) ||
14684                    tg3_flag(tp, 5780_CLASS)) {
14685                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14686                 if (!tp->pcix_cap) {
14687                         dev_err(&tp->pdev->dev,
14688                                 "Cannot find PCI-X capability, aborting\n");
14689                         return -EIO;
14690                 }
14691
14692                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14693                         tg3_flag_set(tp, PCIX_MODE);
14694         }
14695
14696         /* If we have an AMD 762 or VIA K8T800 chipset, write
14697          * reordering to the mailbox registers done by the host
14698          * controller can cause major troubles.  We read back from
14699          * every mailbox register write to force the writes to be
14700          * posted to the chip in order.
14701          */
14702         if (pci_dev_present(tg3_write_reorder_chipsets) &&
14703             !tg3_flag(tp, PCI_EXPRESS))
14704                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14705
14706         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14707                              &tp->pci_cacheline_sz);
14708         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14709                              &tp->pci_lat_timer);
14710         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14711             tp->pci_lat_timer < 64) {
14712                 tp->pci_lat_timer = 64;
14713                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14714                                       tp->pci_lat_timer);
14715         }
14716
14717         /* Important! -- It is critical that the PCI-X hw workaround
14718          * situation is decided before the first MMIO register access.
14719          */
14720         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14721                 /* 5700 BX chips need to have their TX producer index
14722                  * mailboxes written twice to workaround a bug.
14723                  */
14724                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14725
14726                 /* If we are in PCI-X mode, enable register write workaround.
14727                  *
14728                  * The workaround is to use indirect register accesses
14729                  * for all chip writes not to mailbox registers.
14730                  */
14731                 if (tg3_flag(tp, PCIX_MODE)) {
14732                         u32 pm_reg;
14733
14734                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14735
14736                         /* The chip can have it's power management PCI config
14737                          * space registers clobbered due to this bug.
14738                          * So explicitly force the chip into D0 here.
14739                          */
14740                         pci_read_config_dword(tp->pdev,
14741                                               tp->pm_cap + PCI_PM_CTRL,
14742                                               &pm_reg);
14743                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14744                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14745                         pci_write_config_dword(tp->pdev,
14746                                                tp->pm_cap + PCI_PM_CTRL,
14747                                                pm_reg);
14748
14749                         /* Also, force SERR#/PERR# in PCI command. */
14750                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14751                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14752                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14753                 }
14754         }
14755
14756         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14757                 tg3_flag_set(tp, PCI_HIGH_SPEED);
14758         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14759                 tg3_flag_set(tp, PCI_32BIT);
14760
14761         /* Chip-specific fixup from Broadcom driver */
14762         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14763             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14764                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14765                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14766         }
14767
14768         /* Default fast path register access methods */
14769         tp->read32 = tg3_read32;
14770         tp->write32 = tg3_write32;
14771         tp->read32_mbox = tg3_read32;
14772         tp->write32_mbox = tg3_write32;
14773         tp->write32_tx_mbox = tg3_write32;
14774         tp->write32_rx_mbox = tg3_write32;
14775
14776         /* Various workaround register access methods */
14777         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14778                 tp->write32 = tg3_write_indirect_reg32;
14779         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14780                  (tg3_flag(tp, PCI_EXPRESS) &&
14781                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14782                 /*
14783                  * Back to back register writes can cause problems on these
14784                  * chips, the workaround is to read back all reg writes
14785                  * except those to mailbox regs.
14786                  *
14787                  * See tg3_write_indirect_reg32().
14788                  */
14789                 tp->write32 = tg3_write_flush_reg32;
14790         }
14791
14792         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14793                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14794                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14795                         tp->write32_rx_mbox = tg3_write_flush_reg32;
14796         }
14797
14798         if (tg3_flag(tp, ICH_WORKAROUND)) {
14799                 tp->read32 = tg3_read_indirect_reg32;
14800                 tp->write32 = tg3_write_indirect_reg32;
14801                 tp->read32_mbox = tg3_read_indirect_mbox;
14802                 tp->write32_mbox = tg3_write_indirect_mbox;
14803                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14804                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14805
14806                 iounmap(tp->regs);
14807                 tp->regs = NULL;
14808
14809                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14810                 pci_cmd &= ~PCI_COMMAND_MEMORY;
14811                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14812         }
14813         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14814                 tp->read32_mbox = tg3_read32_mbox_5906;
14815                 tp->write32_mbox = tg3_write32_mbox_5906;
14816                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14817                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14818         }
14819
14820         if (tp->write32 == tg3_write_indirect_reg32 ||
14821             (tg3_flag(tp, PCIX_MODE) &&
14822              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14823               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14824                 tg3_flag_set(tp, SRAM_USE_CONFIG);
14825
14826         /* The memory arbiter has to be enabled in order for SRAM accesses
14827          * to succeed.  Normally on powerup the tg3 chip firmware will make
14828          * sure it is enabled, but other entities such as system netboot
14829          * code might disable it.
14830          */
14831         val = tr32(MEMARB_MODE);
14832         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14833
14834         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14835         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14836             tg3_flag(tp, 5780_CLASS)) {
14837                 if (tg3_flag(tp, PCIX_MODE)) {
14838                         pci_read_config_dword(tp->pdev,
14839                                               tp->pcix_cap + PCI_X_STATUS,
14840                                               &val);
14841                         tp->pci_fn = val & 0x7;
14842                 }
14843         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14844                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14845                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14846                     NIC_SRAM_CPMUSTAT_SIG) {
14847                         tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14848                         tp->pci_fn = tp->pci_fn ? 1 : 0;
14849                 }
14850         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14851                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14852                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14853                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14854                     NIC_SRAM_CPMUSTAT_SIG) {
14855                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14856                                      TG3_CPMU_STATUS_FSHFT_5719;
14857                 }
14858         }
14859
14860         /* Get eeprom hw config before calling tg3_set_power_state().
14861          * In particular, the TG3_FLAG_IS_NIC flag must be
14862          * determined before calling tg3_set_power_state() so that
14863          * we know whether or not to switch out of Vaux power.
14864          * When the flag is set, it means that GPIO1 is used for eeprom
14865          * write protect and also implies that it is a LOM where GPIOs
14866          * are not used to switch power.
14867          */
14868         tg3_get_eeprom_hw_cfg(tp);
14869
14870         if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14871                 tg3_flag_clear(tp, TSO_CAPABLE);
14872                 tg3_flag_clear(tp, TSO_BUG);
14873                 tp->fw_needed = NULL;
14874         }
14875
14876         if (tg3_flag(tp, ENABLE_APE)) {
14877                 /* Allow reads and writes to the
14878                  * APE register and memory space.
14879                  */
14880                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14881                                  PCISTATE_ALLOW_APE_SHMEM_WR |
14882                                  PCISTATE_ALLOW_APE_PSPACE_WR;
14883                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14884                                        pci_state_reg);
14885
14886                 tg3_ape_lock_init(tp);
14887         }
14888
14889         /* Set up tp->grc_local_ctrl before calling
14890          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14891          * will bring 5700's external PHY out of reset.
14892          * It is also used as eeprom write protect on LOMs.
14893          */
14894         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14895         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14896             tg3_flag(tp, EEPROM_WRITE_PROT))
14897                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14898                                        GRC_LCLCTRL_GPIO_OUTPUT1);
14899         /* Unused GPIO3 must be driven as output on 5752 because there
14900          * are no pull-up resistors on unused GPIO pins.
14901          */
14902         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14903                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14904
14905         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14906             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14907             tg3_flag(tp, 57765_CLASS))
14908                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14909
14910         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14911             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14912                 /* Turn off the debug UART. */
14913                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14914                 if (tg3_flag(tp, IS_NIC))
14915                         /* Keep VMain power. */
14916                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14917                                               GRC_LCLCTRL_GPIO_OUTPUT0;
14918         }
14919
14920         /* Switch out of Vaux if it is a NIC */
14921         tg3_pwrsrc_switch_to_vmain(tp);
14922
14923         /* Derive initial jumbo mode from MTU assigned in
14924          * ether_setup() via the alloc_etherdev() call
14925          */
14926         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14927                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14928
14929         /* Determine WakeOnLan speed to use. */
14930         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14931             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14932             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14933             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14934                 tg3_flag_clear(tp, WOL_SPEED_100MB);
14935         } else {
14936                 tg3_flag_set(tp, WOL_SPEED_100MB);
14937         }
14938
14939         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14940                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14941
14942         /* A few boards don't want Ethernet@WireSpeed phy feature */
14943         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14944             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14945              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14946              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14947             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14948             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14949                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14950
14951         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14952             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14953                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14954         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14955                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14956
14957         if (tg3_flag(tp, 5705_PLUS) &&
14958             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14959             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14960             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14961             !tg3_flag(tp, 57765_PLUS)) {
14962                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14963                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14964                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14965                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14966                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14967                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14968                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14969                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14970                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14971                 } else
14972                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14973         }
14974
14975         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14976             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14977                 tp->phy_otp = tg3_read_otp_phycfg(tp);
14978                 if (tp->phy_otp == 0)
14979                         tp->phy_otp = TG3_OTP_DEFAULT;
14980         }
14981
14982         if (tg3_flag(tp, CPMU_PRESENT))
14983                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14984         else
14985                 tp->mi_mode = MAC_MI_MODE_BASE;
14986
14987         tp->coalesce_mode = 0;
14988         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14989             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14990                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14991
14992         /* Set these bits to enable statistics workaround. */
14993         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14994             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14995             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14996                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14997                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14998         }
14999
15000         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15001             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15002                 tg3_flag_set(tp, USE_PHYLIB);
15003
15004         err = tg3_mdio_init(tp);
15005         if (err)
15006                 return err;
15007
15008         /* Initialize data/descriptor byte/word swapping. */
15009         val = tr32(GRC_MODE);
15010         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
15011                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15012                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
15013                         GRC_MODE_B2HRX_ENABLE |
15014                         GRC_MODE_HTX2B_ENABLE |
15015                         GRC_MODE_HOST_STACKUP);
15016         else
15017                 val &= GRC_MODE_HOST_STACKUP;
15018
15019         tw32(GRC_MODE, val | tp->grc_mode);
15020
15021         tg3_switch_clocks(tp);
15022
15023         /* Clear this out for sanity. */
15024         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15025
15026         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15027                               &pci_state_reg);
15028         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
15029             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
15030                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
15031
15032                 if (chiprevid == CHIPREV_ID_5701_A0 ||
15033                     chiprevid == CHIPREV_ID_5701_B0 ||
15034                     chiprevid == CHIPREV_ID_5701_B2 ||
15035                     chiprevid == CHIPREV_ID_5701_B5) {
15036                         void __iomem *sram_base;
15037
15038                         /* Write some dummy words into the SRAM status block
15039                          * area, see if it reads back correctly.  If the return
15040                          * value is bad, force enable the PCIX workaround.
15041                          */
15042                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15043
15044                         writel(0x00000000, sram_base);
15045                         writel(0x00000000, sram_base + 4);
15046                         writel(0xffffffff, sram_base + 4);
15047                         if (readl(sram_base) != 0x00000000)
15048                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15049                 }
15050         }
15051
15052         udelay(50);
15053         tg3_nvram_init(tp);
15054
15055         grc_misc_cfg = tr32(GRC_MISC_CFG);
15056         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15057
15058         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15059             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
15060              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
15061                 tg3_flag_set(tp, IS_5788);
15062
15063         if (!tg3_flag(tp, IS_5788) &&
15064             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
15065                 tg3_flag_set(tp, TAGGED_STATUS);
15066         if (tg3_flag(tp, TAGGED_STATUS)) {
15067                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
15068                                       HOSTCC_MODE_CLRTICK_TXBD);
15069
15070                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
15071                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15072                                        tp->misc_host_ctrl);
15073         }
15074
15075         /* Preserve the APE MAC_MODE bits */
15076         if (tg3_flag(tp, ENABLE_APE))
15077                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
15078         else
15079                 tp->mac_mode = 0;
15080
15081         /* these are limited to 10/100 only */
15082         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
15083              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15084             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15085              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
15086              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
15087               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
15088               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
15089             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
15090              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
15091               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
15092               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
15093             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
15094             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15095             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15096             (tp->phy_flags & TG3_PHYFLG_IS_FET))
15097                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
15098
15099         err = tg3_phy_probe(tp);
15100         if (err) {
15101                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
15102                 /* ... but do not return immediately ... */
15103                 tg3_mdio_fini(tp);
15104         }
15105
15106         tg3_read_vpd(tp);
15107         tg3_read_fw_ver(tp);
15108
15109         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
15110                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15111         } else {
15112                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15113                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15114                 else
15115                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15116         }
15117
15118         /* 5700 {AX,BX} chips have a broken status block link
15119          * change bit implementation, so we must use the
15120          * status register in those cases.
15121          */
15122         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15123                 tg3_flag_set(tp, USE_LINKCHG_REG);
15124         else
15125                 tg3_flag_clear(tp, USE_LINKCHG_REG);
15126
15127         /* The led_ctrl is set during tg3_phy_probe, here we might
15128          * have to force the link status polling mechanism based
15129          * upon subsystem IDs.
15130          */
15131         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
15132             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15133             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
15134                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15135                 tg3_flag_set(tp, USE_LINKCHG_REG);
15136         }
15137
15138         /* For all SERDES we poll the MAC status register. */
15139         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
15140                 tg3_flag_set(tp, POLL_SERDES);
15141         else
15142                 tg3_flag_clear(tp, POLL_SERDES);
15143
15144         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
15145         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
15146         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15147             tg3_flag(tp, PCIX_MODE)) {
15148                 tp->rx_offset = NET_SKB_PAD;
15149 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
15150                 tp->rx_copy_thresh = ~(u16)0;
15151 #endif
15152         }
15153
15154         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
15155         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
15156         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
15157
15158         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
15159
15160         /* Increment the rx prod index on the rx std ring by at most
15161          * 8 for these chips to workaround hw errata.
15162          */
15163         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
15164             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
15165             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
15166                 tp->rx_std_max_post = 8;
15167
15168         if (tg3_flag(tp, ASPM_WORKAROUND))
15169                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
15170                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
15171
15172         return err;
15173 }
15174
15175 #ifdef CONFIG_SPARC
15176 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
15177 {
15178         struct net_device *dev = tp->dev;
15179         struct pci_dev *pdev = tp->pdev;
15180         struct device_node *dp = pci_device_to_OF_node(pdev);
15181         const unsigned char *addr;
15182         int len;
15183
15184         addr = of_get_property(dp, "local-mac-address", &len);
15185         if (addr && len == 6) {
15186                 memcpy(dev->dev_addr, addr, 6);
15187                 memcpy(dev->perm_addr, dev->dev_addr, 6);
15188                 return 0;
15189         }
15190         return -ENODEV;
15191 }
15192
15193 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
15194 {
15195         struct net_device *dev = tp->dev;
15196
15197         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
15198         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
15199         return 0;
15200 }
15201 #endif
15202
15203 static int __devinit tg3_get_device_address(struct tg3 *tp)
15204 {
15205         struct net_device *dev = tp->dev;
15206         u32 hi, lo, mac_offset;
15207         int addr_ok = 0;
15208
15209 #ifdef CONFIG_SPARC
15210         if (!tg3_get_macaddr_sparc(tp))
15211                 return 0;
15212 #endif
15213
15214         mac_offset = 0x7c;
15215         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15216             tg3_flag(tp, 5780_CLASS)) {
15217                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
15218                         mac_offset = 0xcc;
15219                 if (tg3_nvram_lock(tp))
15220                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
15221                 else
15222                         tg3_nvram_unlock(tp);
15223         } else if (tg3_flag(tp, 5717_PLUS)) {
15224                 if (tp->pci_fn & 1)
15225                         mac_offset = 0xcc;
15226                 if (tp->pci_fn > 1)
15227                         mac_offset += 0x18c;
15228         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15229                 mac_offset = 0x10;
15230
15231         /* First try to get it from MAC address mailbox. */
15232         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
15233         if ((hi >> 16) == 0x484b) {
15234                 dev->dev_addr[0] = (hi >>  8) & 0xff;
15235                 dev->dev_addr[1] = (hi >>  0) & 0xff;
15236
15237                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
15238                 dev->dev_addr[2] = (lo >> 24) & 0xff;
15239                 dev->dev_addr[3] = (lo >> 16) & 0xff;
15240                 dev->dev_addr[4] = (lo >>  8) & 0xff;
15241                 dev->dev_addr[5] = (lo >>  0) & 0xff;
15242
15243                 /* Some old bootcode may report a 0 MAC address in SRAM */
15244                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
15245         }
15246         if (!addr_ok) {
15247                 /* Next, try NVRAM. */
15248                 if (!tg3_flag(tp, NO_NVRAM) &&
15249                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
15250                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
15251                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
15252                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
15253                 }
15254                 /* Finally just fetch it out of the MAC control regs. */
15255                 else {
15256                         hi = tr32(MAC_ADDR_0_HIGH);
15257                         lo = tr32(MAC_ADDR_0_LOW);
15258
15259                         dev->dev_addr[5] = lo & 0xff;
15260                         dev->dev_addr[4] = (lo >> 8) & 0xff;
15261                         dev->dev_addr[3] = (lo >> 16) & 0xff;
15262                         dev->dev_addr[2] = (lo >> 24) & 0xff;
15263                         dev->dev_addr[1] = hi & 0xff;
15264                         dev->dev_addr[0] = (hi >> 8) & 0xff;
15265                 }
15266         }
15267
15268         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
15269 #ifdef CONFIG_SPARC
15270                 if (!tg3_get_default_macaddr_sparc(tp))
15271                         return 0;
15272 #endif
15273                 return -EINVAL;
15274         }
15275         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
15276         return 0;
15277 }
15278
15279 #define BOUNDARY_SINGLE_CACHELINE       1
15280 #define BOUNDARY_MULTI_CACHELINE        2
15281
15282 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
15283 {
15284         int cacheline_size;
15285         u8 byte;
15286         int goal;
15287
15288         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
15289         if (byte == 0)
15290                 cacheline_size = 1024;
15291         else
15292                 cacheline_size = (int) byte * 4;
15293
15294         /* On 5703 and later chips, the boundary bits have no
15295          * effect.
15296          */
15297         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15298             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
15299             !tg3_flag(tp, PCI_EXPRESS))
15300                 goto out;
15301
15302 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
15303         goal = BOUNDARY_MULTI_CACHELINE;
15304 #else
15305 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
15306         goal = BOUNDARY_SINGLE_CACHELINE;
15307 #else
15308         goal = 0;
15309 #endif
15310 #endif
15311
15312         if (tg3_flag(tp, 57765_PLUS)) {
15313                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
15314                 goto out;
15315         }
15316
15317         if (!goal)
15318                 goto out;
15319
15320         /* PCI controllers on most RISC systems tend to disconnect
15321          * when a device tries to burst across a cache-line boundary.
15322          * Therefore, letting tg3 do so just wastes PCI bandwidth.
15323          *
15324          * Unfortunately, for PCI-E there are only limited
15325          * write-side controls for this, and thus for reads
15326          * we will still get the disconnects.  We'll also waste
15327          * these PCI cycles for both read and write for chips
15328          * other than 5700 and 5701 which do not implement the
15329          * boundary bits.
15330          */
15331         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
15332                 switch (cacheline_size) {
15333                 case 16:
15334                 case 32:
15335                 case 64:
15336                 case 128:
15337                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15338                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
15339                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
15340                         } else {
15341                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15342                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15343                         }
15344                         break;
15345
15346                 case 256:
15347                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
15348                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
15349                         break;
15350
15351                 default:
15352                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15353                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15354                         break;
15355                 }
15356         } else if (tg3_flag(tp, PCI_EXPRESS)) {
15357                 switch (cacheline_size) {
15358                 case 16:
15359                 case 32:
15360                 case 64:
15361                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15362                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15363                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
15364                                 break;
15365                         }
15366                         /* fallthrough */
15367                 case 128:
15368                 default:
15369                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15370                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
15371                         break;
15372                 }
15373         } else {
15374                 switch (cacheline_size) {
15375                 case 16:
15376                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15377                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
15378                                         DMA_RWCTRL_WRITE_BNDRY_16);
15379                                 break;
15380                         }
15381                         /* fallthrough */
15382                 case 32:
15383                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15384                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
15385                                         DMA_RWCTRL_WRITE_BNDRY_32);
15386                                 break;
15387                         }
15388                         /* fallthrough */
15389                 case 64:
15390                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15391                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
15392                                         DMA_RWCTRL_WRITE_BNDRY_64);
15393                                 break;
15394                         }
15395                         /* fallthrough */
15396                 case 128:
15397                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15398                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
15399                                         DMA_RWCTRL_WRITE_BNDRY_128);
15400                                 break;
15401                         }
15402                         /* fallthrough */
15403                 case 256:
15404                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
15405                                 DMA_RWCTRL_WRITE_BNDRY_256);
15406                         break;
15407                 case 512:
15408                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
15409                                 DMA_RWCTRL_WRITE_BNDRY_512);
15410                         break;
15411                 case 1024:
15412                 default:
15413                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
15414                                 DMA_RWCTRL_WRITE_BNDRY_1024);
15415                         break;
15416                 }
15417         }
15418
15419 out:
15420         return val;
15421 }
15422
15423 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
15424 {
15425         struct tg3_internal_buffer_desc test_desc;
15426         u32 sram_dma_descs;
15427         int i, ret;
15428
15429         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15430
15431         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15432         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15433         tw32(RDMAC_STATUS, 0);
15434         tw32(WDMAC_STATUS, 0);
15435
15436         tw32(BUFMGR_MODE, 0);
15437         tw32(FTQ_RESET, 0);
15438
15439         test_desc.addr_hi = ((u64) buf_dma) >> 32;
15440         test_desc.addr_lo = buf_dma & 0xffffffff;
15441         test_desc.nic_mbuf = 0x00002100;
15442         test_desc.len = size;
15443
15444         /*
15445          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15446          * the *second* time the tg3 driver was getting loaded after an
15447          * initial scan.
15448          *
15449          * Broadcom tells me:
15450          *   ...the DMA engine is connected to the GRC block and a DMA
15451          *   reset may affect the GRC block in some unpredictable way...
15452          *   The behavior of resets to individual blocks has not been tested.
15453          *
15454          * Broadcom noted the GRC reset will also reset all sub-components.
15455          */
15456         if (to_device) {
15457                 test_desc.cqid_sqid = (13 << 8) | 2;
15458
15459                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15460                 udelay(40);
15461         } else {
15462                 test_desc.cqid_sqid = (16 << 8) | 7;
15463
15464                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15465                 udelay(40);
15466         }
15467         test_desc.flags = 0x00000005;
15468
15469         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15470                 u32 val;
15471
15472                 val = *(((u32 *)&test_desc) + i);
15473                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15474                                        sram_dma_descs + (i * sizeof(u32)));
15475                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15476         }
15477         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15478
15479         if (to_device)
15480                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15481         else
15482                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15483
15484         ret = -ENODEV;
15485         for (i = 0; i < 40; i++) {
15486                 u32 val;
15487
15488                 if (to_device)
15489                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15490                 else
15491                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15492                 if ((val & 0xffff) == sram_dma_descs) {
15493                         ret = 0;
15494                         break;
15495                 }
15496
15497                 udelay(100);
15498         }
15499
15500         return ret;
15501 }
15502
15503 #define TEST_BUFFER_SIZE        0x2000
15504
15505 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15506         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15507         { },
15508 };
15509
15510 static int __devinit tg3_test_dma(struct tg3 *tp)
15511 {
15512         dma_addr_t buf_dma;
15513         u32 *buf, saved_dma_rwctrl;
15514         int ret = 0;
15515
15516         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15517                                  &buf_dma, GFP_KERNEL);
15518         if (!buf) {
15519                 ret = -ENOMEM;
15520                 goto out_nofree;
15521         }
15522
15523         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15524                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15525
15526         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15527
15528         if (tg3_flag(tp, 57765_PLUS))
15529                 goto out;
15530
15531         if (tg3_flag(tp, PCI_EXPRESS)) {
15532                 /* DMA read watermark not used on PCIE */
15533                 tp->dma_rwctrl |= 0x00180000;
15534         } else if (!tg3_flag(tp, PCIX_MODE)) {
15535                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15536                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15537                         tp->dma_rwctrl |= 0x003f0000;
15538                 else
15539                         tp->dma_rwctrl |= 0x003f000f;
15540         } else {
15541                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15542                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15543                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15544                         u32 read_water = 0x7;
15545
15546                         /* If the 5704 is behind the EPB bridge, we can
15547                          * do the less restrictive ONE_DMA workaround for
15548                          * better performance.
15549                          */
15550                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15551                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15552                                 tp->dma_rwctrl |= 0x8000;
15553                         else if (ccval == 0x6 || ccval == 0x7)
15554                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15555
15556                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15557                                 read_water = 4;
15558                         /* Set bit 23 to enable PCIX hw bug fix */
15559                         tp->dma_rwctrl |=
15560                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15561                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15562                                 (1 << 23);
15563                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15564                         /* 5780 always in PCIX mode */
15565                         tp->dma_rwctrl |= 0x00144000;
15566                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15567                         /* 5714 always in PCIX mode */
15568                         tp->dma_rwctrl |= 0x00148000;
15569                 } else {
15570                         tp->dma_rwctrl |= 0x001b000f;
15571                 }
15572         }
15573
15574         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15575             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15576                 tp->dma_rwctrl &= 0xfffffff0;
15577
15578         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15579             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15580                 /* Remove this if it causes problems for some boards. */
15581                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15582
15583                 /* On 5700/5701 chips, we need to set this bit.
15584                  * Otherwise the chip will issue cacheline transactions
15585                  * to streamable DMA memory with not all the byte
15586                  * enables turned on.  This is an error on several
15587                  * RISC PCI controllers, in particular sparc64.
15588                  *
15589                  * On 5703/5704 chips, this bit has been reassigned
15590                  * a different meaning.  In particular, it is used
15591                  * on those chips to enable a PCI-X workaround.
15592                  */
15593                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15594         }
15595
15596         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15597
15598 #if 0
15599         /* Unneeded, already done by tg3_get_invariants.  */
15600         tg3_switch_clocks(tp);
15601 #endif
15602
15603         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15604             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15605                 goto out;
15606
15607         /* It is best to perform DMA test with maximum write burst size
15608          * to expose the 5700/5701 write DMA bug.
15609          */
15610         saved_dma_rwctrl = tp->dma_rwctrl;
15611         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15612         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15613
15614         while (1) {
15615                 u32 *p = buf, i;
15616
15617                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15618                         p[i] = i;
15619
15620                 /* Send the buffer to the chip. */
15621                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15622                 if (ret) {
15623                         dev_err(&tp->pdev->dev,
15624                                 "%s: Buffer write failed. err = %d\n",
15625                                 __func__, ret);
15626                         break;
15627                 }
15628
15629 #if 0
15630                 /* validate data reached card RAM correctly. */
15631                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15632                         u32 val;
15633                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
15634                         if (le32_to_cpu(val) != p[i]) {
15635                                 dev_err(&tp->pdev->dev,
15636                                         "%s: Buffer corrupted on device! "
15637                                         "(%d != %d)\n", __func__, val, i);
15638                                 /* ret = -ENODEV here? */
15639                         }
15640                         p[i] = 0;
15641                 }
15642 #endif
15643                 /* Now read it back. */
15644                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15645                 if (ret) {
15646                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15647                                 "err = %d\n", __func__, ret);
15648                         break;
15649                 }
15650
15651                 /* Verify it. */
15652                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15653                         if (p[i] == i)
15654                                 continue;
15655
15656                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15657                             DMA_RWCTRL_WRITE_BNDRY_16) {
15658                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15659                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15660                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15661                                 break;
15662                         } else {
15663                                 dev_err(&tp->pdev->dev,
15664                                         "%s: Buffer corrupted on read back! "
15665                                         "(%d != %d)\n", __func__, p[i], i);
15666                                 ret = -ENODEV;
15667                                 goto out;
15668                         }
15669                 }
15670
15671                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15672                         /* Success. */
15673                         ret = 0;
15674                         break;
15675                 }
15676         }
15677         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15678             DMA_RWCTRL_WRITE_BNDRY_16) {
15679                 /* DMA test passed without adjusting DMA boundary,
15680                  * now look for chipsets that are known to expose the
15681                  * DMA bug without failing the test.
15682                  */
15683                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15684                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15685                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15686                 } else {
15687                         /* Safe to use the calculated DMA boundary. */
15688                         tp->dma_rwctrl = saved_dma_rwctrl;
15689                 }
15690
15691                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15692         }
15693
15694 out:
15695         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15696 out_nofree:
15697         return ret;
15698 }
15699
15700 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15701 {
15702         if (tg3_flag(tp, 57765_PLUS)) {
15703                 tp->bufmgr_config.mbuf_read_dma_low_water =
15704                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15705                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15706                         DEFAULT_MB_MACRX_LOW_WATER_57765;
15707                 tp->bufmgr_config.mbuf_high_water =
15708                         DEFAULT_MB_HIGH_WATER_57765;
15709
15710                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15711                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15712                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15713                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15714                 tp->bufmgr_config.mbuf_high_water_jumbo =
15715                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15716         } else if (tg3_flag(tp, 5705_PLUS)) {
15717                 tp->bufmgr_config.mbuf_read_dma_low_water =
15718                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15719                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15720                         DEFAULT_MB_MACRX_LOW_WATER_5705;
15721                 tp->bufmgr_config.mbuf_high_water =
15722                         DEFAULT_MB_HIGH_WATER_5705;
15723                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15724                         tp->bufmgr_config.mbuf_mac_rx_low_water =
15725                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
15726                         tp->bufmgr_config.mbuf_high_water =
15727                                 DEFAULT_MB_HIGH_WATER_5906;
15728                 }
15729
15730                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15731                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15732                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15733                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15734                 tp->bufmgr_config.mbuf_high_water_jumbo =
15735                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15736         } else {
15737                 tp->bufmgr_config.mbuf_read_dma_low_water =
15738                         DEFAULT_MB_RDMA_LOW_WATER;
15739                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15740                         DEFAULT_MB_MACRX_LOW_WATER;
15741                 tp->bufmgr_config.mbuf_high_water =
15742                         DEFAULT_MB_HIGH_WATER;
15743
15744                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15745                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15746                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15747                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15748                 tp->bufmgr_config.mbuf_high_water_jumbo =
15749                         DEFAULT_MB_HIGH_WATER_JUMBO;
15750         }
15751
15752         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15753         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15754 }
15755
15756 static char * __devinit tg3_phy_string(struct tg3 *tp)
15757 {
15758         switch (tp->phy_id & TG3_PHY_ID_MASK) {
15759         case TG3_PHY_ID_BCM5400:        return "5400";
15760         case TG3_PHY_ID_BCM5401:        return "5401";
15761         case TG3_PHY_ID_BCM5411:        return "5411";
15762         case TG3_PHY_ID_BCM5701:        return "5701";
15763         case TG3_PHY_ID_BCM5703:        return "5703";
15764         case TG3_PHY_ID_BCM5704:        return "5704";
15765         case TG3_PHY_ID_BCM5705:        return "5705";
15766         case TG3_PHY_ID_BCM5750:        return "5750";
15767         case TG3_PHY_ID_BCM5752:        return "5752";
15768         case TG3_PHY_ID_BCM5714:        return "5714";
15769         case TG3_PHY_ID_BCM5780:        return "5780";
15770         case TG3_PHY_ID_BCM5755:        return "5755";
15771         case TG3_PHY_ID_BCM5787:        return "5787";
15772         case TG3_PHY_ID_BCM5784:        return "5784";
15773         case TG3_PHY_ID_BCM5756:        return "5722/5756";
15774         case TG3_PHY_ID_BCM5906:        return "5906";
15775         case TG3_PHY_ID_BCM5761:        return "5761";
15776         case TG3_PHY_ID_BCM5718C:       return "5718C";
15777         case TG3_PHY_ID_BCM5718S:       return "5718S";
15778         case TG3_PHY_ID_BCM57765:       return "57765";
15779         case TG3_PHY_ID_BCM5719C:       return "5719C";
15780         case TG3_PHY_ID_BCM5720C:       return "5720C";
15781         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
15782         case 0:                 return "serdes";
15783         default:                return "unknown";
15784         }
15785 }
15786
15787 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15788 {
15789         if (tg3_flag(tp, PCI_EXPRESS)) {
15790                 strcpy(str, "PCI Express");
15791                 return str;
15792         } else if (tg3_flag(tp, PCIX_MODE)) {
15793                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15794
15795                 strcpy(str, "PCIX:");
15796
15797                 if ((clock_ctrl == 7) ||
15798                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15799                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15800                         strcat(str, "133MHz");
15801                 else if (clock_ctrl == 0)
15802                         strcat(str, "33MHz");
15803                 else if (clock_ctrl == 2)
15804                         strcat(str, "50MHz");
15805                 else if (clock_ctrl == 4)
15806                         strcat(str, "66MHz");
15807                 else if (clock_ctrl == 6)
15808                         strcat(str, "100MHz");
15809         } else {
15810                 strcpy(str, "PCI:");
15811                 if (tg3_flag(tp, PCI_HIGH_SPEED))
15812                         strcat(str, "66MHz");
15813                 else
15814                         strcat(str, "33MHz");
15815         }
15816         if (tg3_flag(tp, PCI_32BIT))
15817                 strcat(str, ":32-bit");
15818         else
15819                 strcat(str, ":64-bit");
15820         return str;
15821 }
15822
15823 static void __devinit tg3_init_coal(struct tg3 *tp)
15824 {
15825         struct ethtool_coalesce *ec = &tp->coal;
15826
15827         memset(ec, 0, sizeof(*ec));
15828         ec->cmd = ETHTOOL_GCOALESCE;
15829         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15830         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15831         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15832         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15833         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15834         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15835         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15836         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15837         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15838
15839         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15840                                  HOSTCC_MODE_CLRTICK_TXBD)) {
15841                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15842                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15843                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15844                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15845         }
15846
15847         if (tg3_flag(tp, 5705_PLUS)) {
15848                 ec->rx_coalesce_usecs_irq = 0;
15849                 ec->tx_coalesce_usecs_irq = 0;
15850                 ec->stats_block_coalesce_usecs = 0;
15851         }
15852 }
15853
15854 static int __devinit tg3_init_one(struct pci_dev *pdev,
15855                                   const struct pci_device_id *ent)
15856 {
15857         struct net_device *dev;
15858         struct tg3 *tp;
15859         int i, err, pm_cap;
15860         u32 sndmbx, rcvmbx, intmbx;
15861         char str[40];
15862         u64 dma_mask, persist_dma_mask;
15863         netdev_features_t features = 0;
15864
15865         printk_once(KERN_INFO "%s\n", version);
15866
15867         err = pci_enable_device(pdev);
15868         if (err) {
15869                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15870                 return err;
15871         }
15872
15873         err = pci_request_regions(pdev, DRV_MODULE_NAME);
15874         if (err) {
15875                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15876                 goto err_out_disable_pdev;
15877         }
15878
15879         pci_set_master(pdev);
15880
15881         /* Find power-management capability. */
15882         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15883         if (pm_cap == 0) {
15884                 dev_err(&pdev->dev,
15885                         "Cannot find Power Management capability, aborting\n");
15886                 err = -EIO;
15887                 goto err_out_free_res;
15888         }
15889
15890         err = pci_set_power_state(pdev, PCI_D0);
15891         if (err) {
15892                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15893                 goto err_out_free_res;
15894         }
15895
15896         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15897         if (!dev) {
15898                 err = -ENOMEM;
15899                 goto err_out_power_down;
15900         }
15901
15902         SET_NETDEV_DEV(dev, &pdev->dev);
15903
15904         tp = netdev_priv(dev);
15905         tp->pdev = pdev;
15906         tp->dev = dev;
15907         tp->pm_cap = pm_cap;
15908         tp->rx_mode = TG3_DEF_RX_MODE;
15909         tp->tx_mode = TG3_DEF_TX_MODE;
15910
15911         if (tg3_debug > 0)
15912                 tp->msg_enable = tg3_debug;
15913         else
15914                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15915
15916         /* The word/byte swap controls here control register access byte
15917          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15918          * setting below.
15919          */
15920         tp->misc_host_ctrl =
15921                 MISC_HOST_CTRL_MASK_PCI_INT |
15922                 MISC_HOST_CTRL_WORD_SWAP |
15923                 MISC_HOST_CTRL_INDIR_ACCESS |
15924                 MISC_HOST_CTRL_PCISTATE_RW;
15925
15926         /* The NONFRM (non-frame) byte/word swap controls take effect
15927          * on descriptor entries, anything which isn't packet data.
15928          *
15929          * The StrongARM chips on the board (one for tx, one for rx)
15930          * are running in big-endian mode.
15931          */
15932         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15933                         GRC_MODE_WSWAP_NONFRM_DATA);
15934 #ifdef __BIG_ENDIAN
15935         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15936 #endif
15937         spin_lock_init(&tp->lock);
15938         spin_lock_init(&tp->indirect_lock);
15939         INIT_WORK(&tp->reset_task, tg3_reset_task);
15940
15941         tp->regs = pci_ioremap_bar(pdev, BAR_0);
15942         if (!tp->regs) {
15943                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15944                 err = -ENOMEM;
15945                 goto err_out_free_dev;
15946         }
15947
15948         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15949             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15950             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15951             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15952             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15953             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15954             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15955             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15956                 tg3_flag_set(tp, ENABLE_APE);
15957                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15958                 if (!tp->aperegs) {
15959                         dev_err(&pdev->dev,
15960                                 "Cannot map APE registers, aborting\n");
15961                         err = -ENOMEM;
15962                         goto err_out_iounmap;
15963                 }
15964         }
15965
15966         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15967         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15968
15969         dev->ethtool_ops = &tg3_ethtool_ops;
15970         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15971         dev->netdev_ops = &tg3_netdev_ops;
15972         dev->irq = pdev->irq;
15973
15974         err = tg3_get_invariants(tp);
15975         if (err) {
15976                 dev_err(&pdev->dev,
15977                         "Problem fetching invariants of chip, aborting\n");
15978                 goto err_out_apeunmap;
15979         }
15980
15981         /* The EPB bridge inside 5714, 5715, and 5780 and any
15982          * device behind the EPB cannot support DMA addresses > 40-bit.
15983          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15984          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15985          * do DMA address check in tg3_start_xmit().
15986          */
15987         if (tg3_flag(tp, IS_5788))
15988                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15989         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15990                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15991 #ifdef CONFIG_HIGHMEM
15992                 dma_mask = DMA_BIT_MASK(64);
15993 #endif
15994         } else
15995                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15996
15997         /* Configure DMA attributes. */
15998         if (dma_mask > DMA_BIT_MASK(32)) {
15999                 err = pci_set_dma_mask(pdev, dma_mask);
16000                 if (!err) {
16001                         features |= NETIF_F_HIGHDMA;
16002                         err = pci_set_consistent_dma_mask(pdev,
16003                                                           persist_dma_mask);
16004                         if (err < 0) {
16005                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
16006                                         "DMA for consistent allocations\n");
16007                                 goto err_out_apeunmap;
16008                         }
16009                 }
16010         }
16011         if (err || dma_mask == DMA_BIT_MASK(32)) {
16012                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
16013                 if (err) {
16014                         dev_err(&pdev->dev,
16015                                 "No usable DMA configuration, aborting\n");
16016                         goto err_out_apeunmap;
16017                 }
16018         }
16019
16020         tg3_init_bufmgr_config(tp);
16021
16022         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16023
16024         /* 5700 B0 chips do not support checksumming correctly due
16025          * to hardware bugs.
16026          */
16027         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
16028                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16029
16030                 if (tg3_flag(tp, 5755_PLUS))
16031                         features |= NETIF_F_IPV6_CSUM;
16032         }
16033
16034         /* TSO is on by default on chips that support hardware TSO.
16035          * Firmware TSO on older chips gives lower performance, so it
16036          * is off by default, but can be enabled using ethtool.
16037          */
16038         if ((tg3_flag(tp, HW_TSO_1) ||
16039              tg3_flag(tp, HW_TSO_2) ||
16040              tg3_flag(tp, HW_TSO_3)) &&
16041             (features & NETIF_F_IP_CSUM))
16042                 features |= NETIF_F_TSO;
16043         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
16044                 if (features & NETIF_F_IPV6_CSUM)
16045                         features |= NETIF_F_TSO6;
16046                 if (tg3_flag(tp, HW_TSO_3) ||
16047                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
16048                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
16049                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
16050                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
16051                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
16052                         features |= NETIF_F_TSO_ECN;
16053         }
16054
16055         dev->features |= features;
16056         dev->vlan_features |= features;
16057
16058         /*
16059          * Add loopback capability only for a subset of devices that support
16060          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16061          * loopback for the remaining devices.
16062          */
16063         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
16064             !tg3_flag(tp, CPMU_PRESENT))
16065                 /* Add the loopback capability */
16066                 features |= NETIF_F_LOOPBACK;
16067
16068         dev->hw_features |= features;
16069
16070         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
16071             !tg3_flag(tp, TSO_CAPABLE) &&
16072             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
16073                 tg3_flag_set(tp, MAX_RXPEND_64);
16074                 tp->rx_pending = 63;
16075         }
16076
16077         err = tg3_get_device_address(tp);
16078         if (err) {
16079                 dev_err(&pdev->dev,
16080                         "Could not obtain valid ethernet address, aborting\n");
16081                 goto err_out_apeunmap;
16082         }
16083
16084         /*
16085          * Reset chip in case UNDI or EFI driver did not shutdown
16086          * DMA self test will enable WDMAC and we'll see (spurious)
16087          * pending DMA on the PCI bus at that point.
16088          */
16089         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
16090             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
16091                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
16092                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16093         }
16094
16095         err = tg3_test_dma(tp);
16096         if (err) {
16097                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
16098                 goto err_out_apeunmap;
16099         }
16100
16101         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
16102         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
16103         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
16104         for (i = 0; i < tp->irq_max; i++) {
16105                 struct tg3_napi *tnapi = &tp->napi[i];
16106
16107                 tnapi->tp = tp;
16108                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
16109
16110                 tnapi->int_mbox = intmbx;
16111                 if (i <= 4)
16112                         intmbx += 0x8;
16113                 else
16114                         intmbx += 0x4;
16115
16116                 tnapi->consmbox = rcvmbx;
16117                 tnapi->prodmbox = sndmbx;
16118
16119                 if (i)
16120                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
16121                 else
16122                         tnapi->coal_now = HOSTCC_MODE_NOW;
16123
16124                 if (!tg3_flag(tp, SUPPORT_MSIX))
16125                         break;
16126
16127                 /*
16128                  * If we support MSIX, we'll be using RSS.  If we're using
16129                  * RSS, the first vector only handles link interrupts and the
16130                  * remaining vectors handle rx and tx interrupts.  Reuse the
16131                  * mailbox values for the next iteration.  The values we setup
16132                  * above are still useful for the single vectored mode.
16133                  */
16134                 if (!i)
16135                         continue;
16136
16137                 rcvmbx += 0x8;
16138
16139                 if (sndmbx & 0x4)
16140                         sndmbx -= 0x4;
16141                 else
16142                         sndmbx += 0xc;
16143         }
16144
16145         tg3_init_coal(tp);
16146
16147         pci_set_drvdata(pdev, dev);
16148
16149         if (tg3_flag(tp, 5717_PLUS)) {
16150                 /* Resume a low-power mode */
16151                 tg3_frob_aux_power(tp, false);
16152         }
16153
16154         tg3_timer_init(tp);
16155
16156         err = register_netdev(dev);
16157         if (err) {
16158                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
16159                 goto err_out_apeunmap;
16160         }
16161
16162         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16163                     tp->board_part_number,
16164                     tp->pci_chip_rev_id,
16165                     tg3_bus_string(tp, str),
16166                     dev->dev_addr);
16167
16168         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
16169                 struct phy_device *phydev;
16170                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
16171                 netdev_info(dev,
16172                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
16173                             phydev->drv->name, dev_name(&phydev->dev));
16174         } else {
16175                 char *ethtype;
16176
16177                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
16178                         ethtype = "10/100Base-TX";
16179                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
16180                         ethtype = "1000Base-SX";
16181                 else
16182                         ethtype = "10/100/1000Base-T";
16183
16184                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
16185                             "(WireSpeed[%d], EEE[%d])\n",
16186                             tg3_phy_string(tp), ethtype,
16187                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
16188                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
16189         }
16190
16191         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
16192                     (dev->features & NETIF_F_RXCSUM) != 0,
16193                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
16194                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
16195                     tg3_flag(tp, ENABLE_ASF) != 0,
16196                     tg3_flag(tp, TSO_CAPABLE) != 0);
16197         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16198                     tp->dma_rwctrl,
16199                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
16200                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
16201
16202         pci_save_state(pdev);
16203
16204         return 0;
16205
16206 err_out_apeunmap:
16207         if (tp->aperegs) {
16208                 iounmap(tp->aperegs);
16209                 tp->aperegs = NULL;
16210         }
16211
16212 err_out_iounmap:
16213         if (tp->regs) {
16214                 iounmap(tp->regs);
16215                 tp->regs = NULL;
16216         }
16217
16218 err_out_free_dev:
16219         free_netdev(dev);
16220
16221 err_out_power_down:
16222         pci_set_power_state(pdev, PCI_D3hot);
16223
16224 err_out_free_res:
16225         pci_release_regions(pdev);
16226
16227 err_out_disable_pdev:
16228         pci_disable_device(pdev);
16229         pci_set_drvdata(pdev, NULL);
16230         return err;
16231 }
16232
16233 static void __devexit tg3_remove_one(struct pci_dev *pdev)
16234 {
16235         struct net_device *dev = pci_get_drvdata(pdev);
16236
16237         if (dev) {
16238                 struct tg3 *tp = netdev_priv(dev);
16239
16240                 release_firmware(tp->fw);
16241
16242                 tg3_reset_task_cancel(tp);
16243
16244                 if (tg3_flag(tp, USE_PHYLIB)) {
16245                         tg3_phy_fini(tp);
16246                         tg3_mdio_fini(tp);
16247                 }
16248
16249                 unregister_netdev(dev);
16250                 if (tp->aperegs) {
16251                         iounmap(tp->aperegs);
16252                         tp->aperegs = NULL;
16253                 }
16254                 if (tp->regs) {
16255                         iounmap(tp->regs);
16256                         tp->regs = NULL;
16257                 }
16258                 free_netdev(dev);
16259                 pci_release_regions(pdev);
16260                 pci_disable_device(pdev);
16261                 pci_set_drvdata(pdev, NULL);
16262         }
16263 }
16264
16265 #ifdef CONFIG_PM_SLEEP
16266 static int tg3_suspend(struct device *device)
16267 {
16268         struct pci_dev *pdev = to_pci_dev(device);
16269         struct net_device *dev = pci_get_drvdata(pdev);
16270         struct tg3 *tp = netdev_priv(dev);
16271         int err;
16272
16273         if (!netif_running(dev))
16274                 return 0;
16275
16276         tg3_reset_task_cancel(tp);
16277         tg3_phy_stop(tp);
16278         tg3_netif_stop(tp);
16279
16280         tg3_timer_stop(tp);
16281
16282         tg3_full_lock(tp, 1);
16283         tg3_disable_ints(tp);
16284         tg3_full_unlock(tp);
16285
16286         netif_device_detach(dev);
16287
16288         tg3_full_lock(tp, 0);
16289         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16290         tg3_flag_clear(tp, INIT_COMPLETE);
16291         tg3_full_unlock(tp);
16292
16293         err = tg3_power_down_prepare(tp);
16294         if (err) {
16295                 int err2;
16296
16297                 tg3_full_lock(tp, 0);
16298
16299                 tg3_flag_set(tp, INIT_COMPLETE);
16300                 err2 = tg3_restart_hw(tp, 1);
16301                 if (err2)
16302                         goto out;
16303
16304                 tg3_timer_start(tp);
16305
16306                 netif_device_attach(dev);
16307                 tg3_netif_start(tp);
16308
16309 out:
16310                 tg3_full_unlock(tp);
16311
16312                 if (!err2)
16313                         tg3_phy_start(tp);
16314         }
16315
16316         return err;
16317 }
16318
16319 static int tg3_resume(struct device *device)
16320 {
16321         struct pci_dev *pdev = to_pci_dev(device);
16322         struct net_device *dev = pci_get_drvdata(pdev);
16323         struct tg3 *tp = netdev_priv(dev);
16324         int err;
16325
16326         if (!netif_running(dev))
16327                 return 0;
16328
16329         netif_device_attach(dev);
16330
16331         tg3_full_lock(tp, 0);
16332
16333         tg3_flag_set(tp, INIT_COMPLETE);
16334         err = tg3_restart_hw(tp, 1);
16335         if (err)
16336                 goto out;
16337
16338         tg3_timer_start(tp);
16339
16340         tg3_netif_start(tp);
16341
16342 out:
16343         tg3_full_unlock(tp);
16344
16345         if (!err)
16346                 tg3_phy_start(tp);
16347
16348         return err;
16349 }
16350
16351 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
16352 #define TG3_PM_OPS (&tg3_pm_ops)
16353
16354 #else
16355
16356 #define TG3_PM_OPS NULL
16357
16358 #endif /* CONFIG_PM_SLEEP */
16359
16360 /**
16361  * tg3_io_error_detected - called when PCI error is detected
16362  * @pdev: Pointer to PCI device
16363  * @state: The current pci connection state
16364  *
16365  * This function is called after a PCI bus error affecting
16366  * this device has been detected.
16367  */
16368 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
16369                                               pci_channel_state_t state)
16370 {
16371         struct net_device *netdev = pci_get_drvdata(pdev);
16372         struct tg3 *tp = netdev_priv(netdev);
16373         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
16374
16375         netdev_info(netdev, "PCI I/O error detected\n");
16376
16377         rtnl_lock();
16378
16379         if (!netif_running(netdev))
16380                 goto done;
16381
16382         tg3_phy_stop(tp);
16383
16384         tg3_netif_stop(tp);
16385
16386         tg3_timer_stop(tp);
16387
16388         /* Want to make sure that the reset task doesn't run */
16389         tg3_reset_task_cancel(tp);
16390
16391         netif_device_detach(netdev);
16392
16393         /* Clean up software state, even if MMIO is blocked */
16394         tg3_full_lock(tp, 0);
16395         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
16396         tg3_full_unlock(tp);
16397
16398 done:
16399         if (state == pci_channel_io_perm_failure)
16400                 err = PCI_ERS_RESULT_DISCONNECT;
16401         else
16402                 pci_disable_device(pdev);
16403
16404         rtnl_unlock();
16405
16406         return err;
16407 }
16408
16409 /**
16410  * tg3_io_slot_reset - called after the pci bus has been reset.
16411  * @pdev: Pointer to PCI device
16412  *
16413  * Restart the card from scratch, as if from a cold-boot.
16414  * At this point, the card has exprienced a hard reset,
16415  * followed by fixups by BIOS, and has its config space
16416  * set up identically to what it was at cold boot.
16417  */
16418 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16419 {
16420         struct net_device *netdev = pci_get_drvdata(pdev);
16421         struct tg3 *tp = netdev_priv(netdev);
16422         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16423         int err;
16424
16425         rtnl_lock();
16426
16427         if (pci_enable_device(pdev)) {
16428                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16429                 goto done;
16430         }
16431
16432         pci_set_master(pdev);
16433         pci_restore_state(pdev);
16434         pci_save_state(pdev);
16435
16436         if (!netif_running(netdev)) {
16437                 rc = PCI_ERS_RESULT_RECOVERED;
16438                 goto done;
16439         }
16440
16441         err = tg3_power_up(tp);
16442         if (err)
16443                 goto done;
16444
16445         rc = PCI_ERS_RESULT_RECOVERED;
16446
16447 done:
16448         rtnl_unlock();
16449
16450         return rc;
16451 }
16452
16453 /**
16454  * tg3_io_resume - called when traffic can start flowing again.
16455  * @pdev: Pointer to PCI device
16456  *
16457  * This callback is called when the error recovery driver tells
16458  * us that its OK to resume normal operation.
16459  */
16460 static void tg3_io_resume(struct pci_dev *pdev)
16461 {
16462         struct net_device *netdev = pci_get_drvdata(pdev);
16463         struct tg3 *tp = netdev_priv(netdev);
16464         int err;
16465
16466         rtnl_lock();
16467
16468         if (!netif_running(netdev))
16469                 goto done;
16470
16471         tg3_full_lock(tp, 0);
16472         tg3_flag_set(tp, INIT_COMPLETE);
16473         err = tg3_restart_hw(tp, 1);
16474         tg3_full_unlock(tp);
16475         if (err) {
16476                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16477                 goto done;
16478         }
16479
16480         netif_device_attach(netdev);
16481
16482         tg3_timer_start(tp);
16483
16484         tg3_netif_start(tp);
16485
16486         tg3_phy_start(tp);
16487
16488 done:
16489         rtnl_unlock();
16490 }
16491
16492 static struct pci_error_handlers tg3_err_handler = {
16493         .error_detected = tg3_io_error_detected,
16494         .slot_reset     = tg3_io_slot_reset,
16495         .resume         = tg3_io_resume
16496 };
16497
16498 static struct pci_driver tg3_driver = {
16499         .name           = DRV_MODULE_NAME,
16500         .id_table       = tg3_pci_tbl,
16501         .probe          = tg3_init_one,
16502         .remove         = __devexit_p(tg3_remove_one),
16503         .err_handler    = &tg3_err_handler,
16504         .driver.pm      = TG3_PM_OPS,
16505 };
16506
16507 static int __init tg3_init(void)
16508 {
16509         return pci_register_driver(&tg3_driver);
16510 }
16511
16512 static void __exit tg3_cleanup(void)
16513 {
16514         pci_unregister_driver(&tg3_driver);
16515 }
16516
16517 module_init(tg3_init);
16518 module_exit(tg3_cleanup);