]> Pileus Git - ~andy/linux/blob - drivers/net/ethernet/broadcom/tg3.c
24f6623c82ab21b4080a2aa79b2170b3488f243a
[~andy/linux] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2012 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #if IS_ENABLED(CONFIG_HWMON)
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50 #endif
51
52 #include <net/checksum.h>
53 #include <net/ip.h>
54
55 #include <linux/io.h>
56 #include <asm/byteorder.h>
57 #include <linux/uaccess.h>
58
59 #ifdef CONFIG_SPARC
60 #include <asm/idprom.h>
61 #include <asm/prom.h>
62 #endif
63
64 #define BAR_0   0
65 #define BAR_2   2
66
67 #include "tg3.h"
68
69 /* Functions & macros to verify TG3_FLAGS types */
70
71 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
72 {
73         return test_bit(flag, bits);
74 }
75
76 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
77 {
78         set_bit(flag, bits);
79 }
80
81 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
82 {
83         clear_bit(flag, bits);
84 }
85
86 #define tg3_flag(tp, flag)                              \
87         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
88 #define tg3_flag_set(tp, flag)                          \
89         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_clear(tp, flag)                        \
91         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
92
93 #define DRV_MODULE_NAME         "tg3"
94 #define TG3_MAJ_NUM                     3
95 #define TG3_MIN_NUM                     125
96 #define DRV_MODULE_VERSION      \
97         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
98 #define DRV_MODULE_RELDATE      "September 26, 2012"
99
100 #define RESET_KIND_SHUTDOWN     0
101 #define RESET_KIND_INIT         1
102 #define RESET_KIND_SUSPEND      2
103
104 #define TG3_DEF_RX_MODE         0
105 #define TG3_DEF_TX_MODE         0
106 #define TG3_DEF_MSG_ENABLE        \
107         (NETIF_MSG_DRV          | \
108          NETIF_MSG_PROBE        | \
109          NETIF_MSG_LINK         | \
110          NETIF_MSG_TIMER        | \
111          NETIF_MSG_IFDOWN       | \
112          NETIF_MSG_IFUP         | \
113          NETIF_MSG_RX_ERR       | \
114          NETIF_MSG_TX_ERR)
115
116 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
117
118 /* length of time before we decide the hardware is borked,
119  * and dev->tx_timeout() should be called to fix the problem
120  */
121
122 #define TG3_TX_TIMEOUT                  (5 * HZ)
123
124 /* hardware minimum and maximum for a single frame's data payload */
125 #define TG3_MIN_MTU                     60
126 #define TG3_MAX_MTU(tp) \
127         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
128
129 /* These numbers seem to be hard coded in the NIC firmware somehow.
130  * You can't change the ring sizes, but you can change where you place
131  * them in the NIC onboard memory.
132  */
133 #define TG3_RX_STD_RING_SIZE(tp) \
134         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
135          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
136 #define TG3_DEF_RX_RING_PENDING         200
137 #define TG3_RX_JMB_RING_SIZE(tp) \
138         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
139          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
140 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
141
142 /* Do not place this n-ring entries value into the tp struct itself,
143  * we really want to expose these constants to GCC so that modulo et
144  * al.  operations are done with shifts and masks instead of with
145  * hw multiply/modulo instructions.  Another solution would be to
146  * replace things like '% foo' with '& (foo - 1)'.
147  */
148
149 #define TG3_TX_RING_SIZE                512
150 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
151
152 #define TG3_RX_STD_RING_BYTES(tp) \
153         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
154 #define TG3_RX_JMB_RING_BYTES(tp) \
155         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
156 #define TG3_RX_RCB_RING_BYTES(tp) \
157         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
158 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
159                                  TG3_TX_RING_SIZE)
160 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
161
162 #define TG3_DMA_BYTE_ENAB               64
163
164 #define TG3_RX_STD_DMA_SZ               1536
165 #define TG3_RX_JMB_DMA_SZ               9046
166
167 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
168
169 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
170 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
171
172 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
173         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
174
175 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
176         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
177
178 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
179  * that are at least dword aligned when used in PCIX mode.  The driver
180  * works around this bug by double copying the packet.  This workaround
181  * is built into the normal double copy length check for efficiency.
182  *
183  * However, the double copy is only necessary on those architectures
184  * where unaligned memory accesses are inefficient.  For those architectures
185  * where unaligned memory accesses incur little penalty, we can reintegrate
186  * the 5701 in the normal rx path.  Doing so saves a device structure
187  * dereference by hardcoding the double copy threshold in place.
188  */
189 #define TG3_RX_COPY_THRESHOLD           256
190 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
191         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
192 #else
193         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
194 #endif
195
196 #if (NET_IP_ALIGN != 0)
197 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
198 #else
199 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
200 #endif
201
202 /* minimum number of free TX descriptors required to wake up TX process */
203 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
204 #define TG3_TX_BD_DMA_MAX_2K            2048
205 #define TG3_TX_BD_DMA_MAX_4K            4096
206
207 #define TG3_RAW_IP_ALIGN 2
208
209 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
210 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
211
212 #define FIRMWARE_TG3            "tigon/tg3.bin"
213 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
214 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
215
216 static char version[] __devinitdata =
217         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
218
219 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
220 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
221 MODULE_LICENSE("GPL");
222 MODULE_VERSION(DRV_MODULE_VERSION);
223 MODULE_FIRMWARE(FIRMWARE_TG3);
224 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
225 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
226
227 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
228 module_param(tg3_debug, int, 0);
229 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
230
231 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
306         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
307         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
308         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
309         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
310         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
311         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
312         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
313         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
314         {}
315 };
316
317 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
318
319 static const struct {
320         const char string[ETH_GSTRING_LEN];
321 } ethtool_stats_keys[] = {
322         { "rx_octets" },
323         { "rx_fragments" },
324         { "rx_ucast_packets" },
325         { "rx_mcast_packets" },
326         { "rx_bcast_packets" },
327         { "rx_fcs_errors" },
328         { "rx_align_errors" },
329         { "rx_xon_pause_rcvd" },
330         { "rx_xoff_pause_rcvd" },
331         { "rx_mac_ctrl_rcvd" },
332         { "rx_xoff_entered" },
333         { "rx_frame_too_long_errors" },
334         { "rx_jabbers" },
335         { "rx_undersize_packets" },
336         { "rx_in_length_errors" },
337         { "rx_out_length_errors" },
338         { "rx_64_or_less_octet_packets" },
339         { "rx_65_to_127_octet_packets" },
340         { "rx_128_to_255_octet_packets" },
341         { "rx_256_to_511_octet_packets" },
342         { "rx_512_to_1023_octet_packets" },
343         { "rx_1024_to_1522_octet_packets" },
344         { "rx_1523_to_2047_octet_packets" },
345         { "rx_2048_to_4095_octet_packets" },
346         { "rx_4096_to_8191_octet_packets" },
347         { "rx_8192_to_9022_octet_packets" },
348
349         { "tx_octets" },
350         { "tx_collisions" },
351
352         { "tx_xon_sent" },
353         { "tx_xoff_sent" },
354         { "tx_flow_control" },
355         { "tx_mac_errors" },
356         { "tx_single_collisions" },
357         { "tx_mult_collisions" },
358         { "tx_deferred" },
359         { "tx_excessive_collisions" },
360         { "tx_late_collisions" },
361         { "tx_collide_2times" },
362         { "tx_collide_3times" },
363         { "tx_collide_4times" },
364         { "tx_collide_5times" },
365         { "tx_collide_6times" },
366         { "tx_collide_7times" },
367         { "tx_collide_8times" },
368         { "tx_collide_9times" },
369         { "tx_collide_10times" },
370         { "tx_collide_11times" },
371         { "tx_collide_12times" },
372         { "tx_collide_13times" },
373         { "tx_collide_14times" },
374         { "tx_collide_15times" },
375         { "tx_ucast_packets" },
376         { "tx_mcast_packets" },
377         { "tx_bcast_packets" },
378         { "tx_carrier_sense_errors" },
379         { "tx_discards" },
380         { "tx_errors" },
381
382         { "dma_writeq_full" },
383         { "dma_write_prioq_full" },
384         { "rxbds_empty" },
385         { "rx_discards" },
386         { "rx_errors" },
387         { "rx_threshold_hit" },
388
389         { "dma_readq_full" },
390         { "dma_read_prioq_full" },
391         { "tx_comp_queue_full" },
392
393         { "ring_set_send_prod_index" },
394         { "ring_status_update" },
395         { "nic_irqs" },
396         { "nic_avoided_irqs" },
397         { "nic_tx_threshold_hit" },
398
399         { "mbuf_lwm_thresh_hit" },
400 };
401
402 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
403
404
405 static const struct {
406         const char string[ETH_GSTRING_LEN];
407 } ethtool_test_keys[] = {
408         { "nvram test        (online) " },
409         { "link test         (online) " },
410         { "register test     (offline)" },
411         { "memory test       (offline)" },
412         { "mac loopback test (offline)" },
413         { "phy loopback test (offline)" },
414         { "ext loopback test (offline)" },
415         { "interrupt test    (offline)" },
416 };
417
418 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
419
420
421 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
422 {
423         writel(val, tp->regs + off);
424 }
425
426 static u32 tg3_read32(struct tg3 *tp, u32 off)
427 {
428         return readl(tp->regs + off);
429 }
430
431 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
432 {
433         writel(val, tp->aperegs + off);
434 }
435
436 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
437 {
438         return readl(tp->aperegs + off);
439 }
440
441 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
442 {
443         unsigned long flags;
444
445         spin_lock_irqsave(&tp->indirect_lock, flags);
446         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
447         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
448         spin_unlock_irqrestore(&tp->indirect_lock, flags);
449 }
450
451 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
452 {
453         writel(val, tp->regs + off);
454         readl(tp->regs + off);
455 }
456
457 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
458 {
459         unsigned long flags;
460         u32 val;
461
462         spin_lock_irqsave(&tp->indirect_lock, flags);
463         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
464         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
465         spin_unlock_irqrestore(&tp->indirect_lock, flags);
466         return val;
467 }
468
469 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
470 {
471         unsigned long flags;
472
473         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
475                                        TG3_64BIT_REG_LOW, val);
476                 return;
477         }
478         if (off == TG3_RX_STD_PROD_IDX_REG) {
479                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
480                                        TG3_64BIT_REG_LOW, val);
481                 return;
482         }
483
484         spin_lock_irqsave(&tp->indirect_lock, flags);
485         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
486         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
487         spin_unlock_irqrestore(&tp->indirect_lock, flags);
488
489         /* In indirect mode when disabling interrupts, we also need
490          * to clear the interrupt bit in the GRC local ctrl register.
491          */
492         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
493             (val == 0x1)) {
494                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
495                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
496         }
497 }
498
499 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
500 {
501         unsigned long flags;
502         u32 val;
503
504         spin_lock_irqsave(&tp->indirect_lock, flags);
505         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
506         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507         spin_unlock_irqrestore(&tp->indirect_lock, flags);
508         return val;
509 }
510
511 /* usec_wait specifies the wait time in usec when writing to certain registers
512  * where it is unsafe to read back the register without some delay.
513  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
514  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
515  */
516 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
517 {
518         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
519                 /* Non-posted methods */
520                 tp->write32(tp, off, val);
521         else {
522                 /* Posted method */
523                 tg3_write32(tp, off, val);
524                 if (usec_wait)
525                         udelay(usec_wait);
526                 tp->read32(tp, off);
527         }
528         /* Wait again after the read for the posted method to guarantee that
529          * the wait time is met.
530          */
531         if (usec_wait)
532                 udelay(usec_wait);
533 }
534
535 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
536 {
537         tp->write32_mbox(tp, off, val);
538         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
539                 tp->read32_mbox(tp, off);
540 }
541
542 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
543 {
544         void __iomem *mbox = tp->regs + off;
545         writel(val, mbox);
546         if (tg3_flag(tp, TXD_MBOX_HWBUG))
547                 writel(val, mbox);
548         if (tg3_flag(tp, MBOX_WRITE_REORDER))
549                 readl(mbox);
550 }
551
552 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
553 {
554         return readl(tp->regs + off + GRCMBOX_BASE);
555 }
556
557 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
558 {
559         writel(val, tp->regs + off + GRCMBOX_BASE);
560 }
561
562 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
563 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
564 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
565 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
566 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
567
568 #define tw32(reg, val)                  tp->write32(tp, reg, val)
569 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
570 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
571 #define tr32(reg)                       tp->read32(tp, reg)
572
573 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
574 {
575         unsigned long flags;
576
577         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
578             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
579                 return;
580
581         spin_lock_irqsave(&tp->indirect_lock, flags);
582         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
583                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
584                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
585
586                 /* Always leave this as zero. */
587                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
588         } else {
589                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
590                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
591
592                 /* Always leave this as zero. */
593                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
594         }
595         spin_unlock_irqrestore(&tp->indirect_lock, flags);
596 }
597
598 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
599 {
600         unsigned long flags;
601
602         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
603             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
604                 *val = 0;
605                 return;
606         }
607
608         spin_lock_irqsave(&tp->indirect_lock, flags);
609         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
610                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
611                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
612
613                 /* Always leave this as zero. */
614                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
615         } else {
616                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
617                 *val = tr32(TG3PCI_MEM_WIN_DATA);
618
619                 /* Always leave this as zero. */
620                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
621         }
622         spin_unlock_irqrestore(&tp->indirect_lock, flags);
623 }
624
625 static void tg3_ape_lock_init(struct tg3 *tp)
626 {
627         int i;
628         u32 regbase, bit;
629
630         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
631                 regbase = TG3_APE_LOCK_GRANT;
632         else
633                 regbase = TG3_APE_PER_LOCK_GRANT;
634
635         /* Make sure the driver hasn't any stale locks. */
636         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
637                 switch (i) {
638                 case TG3_APE_LOCK_PHY0:
639                 case TG3_APE_LOCK_PHY1:
640                 case TG3_APE_LOCK_PHY2:
641                 case TG3_APE_LOCK_PHY3:
642                         bit = APE_LOCK_GRANT_DRIVER;
643                         break;
644                 default:
645                         if (!tp->pci_fn)
646                                 bit = APE_LOCK_GRANT_DRIVER;
647                         else
648                                 bit = 1 << tp->pci_fn;
649                 }
650                 tg3_ape_write32(tp, regbase + 4 * i, bit);
651         }
652
653 }
654
655 static int tg3_ape_lock(struct tg3 *tp, int locknum)
656 {
657         int i, off;
658         int ret = 0;
659         u32 status, req, gnt, bit;
660
661         if (!tg3_flag(tp, ENABLE_APE))
662                 return 0;
663
664         switch (locknum) {
665         case TG3_APE_LOCK_GPIO:
666                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
667                         return 0;
668         case TG3_APE_LOCK_GRC:
669         case TG3_APE_LOCK_MEM:
670                 if (!tp->pci_fn)
671                         bit = APE_LOCK_REQ_DRIVER;
672                 else
673                         bit = 1 << tp->pci_fn;
674                 break;
675         case TG3_APE_LOCK_PHY0:
676         case TG3_APE_LOCK_PHY1:
677         case TG3_APE_LOCK_PHY2:
678         case TG3_APE_LOCK_PHY3:
679                 bit = APE_LOCK_REQ_DRIVER;
680                 break;
681         default:
682                 return -EINVAL;
683         }
684
685         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
686                 req = TG3_APE_LOCK_REQ;
687                 gnt = TG3_APE_LOCK_GRANT;
688         } else {
689                 req = TG3_APE_PER_LOCK_REQ;
690                 gnt = TG3_APE_PER_LOCK_GRANT;
691         }
692
693         off = 4 * locknum;
694
695         tg3_ape_write32(tp, req + off, bit);
696
697         /* Wait for up to 1 millisecond to acquire lock. */
698         for (i = 0; i < 100; i++) {
699                 status = tg3_ape_read32(tp, gnt + off);
700                 if (status == bit)
701                         break;
702                 udelay(10);
703         }
704
705         if (status != bit) {
706                 /* Revoke the lock request. */
707                 tg3_ape_write32(tp, gnt + off, bit);
708                 ret = -EBUSY;
709         }
710
711         return ret;
712 }
713
714 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
715 {
716         u32 gnt, bit;
717
718         if (!tg3_flag(tp, ENABLE_APE))
719                 return;
720
721         switch (locknum) {
722         case TG3_APE_LOCK_GPIO:
723                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
724                         return;
725         case TG3_APE_LOCK_GRC:
726         case TG3_APE_LOCK_MEM:
727                 if (!tp->pci_fn)
728                         bit = APE_LOCK_GRANT_DRIVER;
729                 else
730                         bit = 1 << tp->pci_fn;
731                 break;
732         case TG3_APE_LOCK_PHY0:
733         case TG3_APE_LOCK_PHY1:
734         case TG3_APE_LOCK_PHY2:
735         case TG3_APE_LOCK_PHY3:
736                 bit = APE_LOCK_GRANT_DRIVER;
737                 break;
738         default:
739                 return;
740         }
741
742         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
743                 gnt = TG3_APE_LOCK_GRANT;
744         else
745                 gnt = TG3_APE_PER_LOCK_GRANT;
746
747         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
748 }
749
750 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
751 {
752         u32 apedata;
753
754         while (timeout_us) {
755                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
756                         return -EBUSY;
757
758                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
759                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
760                         break;
761
762                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
763
764                 udelay(10);
765                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
766         }
767
768         return timeout_us ? 0 : -EBUSY;
769 }
770
771 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
772 {
773         u32 i, apedata;
774
775         for (i = 0; i < timeout_us / 10; i++) {
776                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
777
778                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
779                         break;
780
781                 udelay(10);
782         }
783
784         return i == timeout_us / 10;
785 }
786
787 int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off, u32 len)
788 {
789         int err;
790         u32 i, bufoff, msgoff, maxlen, apedata;
791
792         if (!tg3_flag(tp, APE_HAS_NCSI))
793                 return 0;
794
795         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
796         if (apedata != APE_SEG_SIG_MAGIC)
797                 return -ENODEV;
798
799         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
800         if (!(apedata & APE_FW_STATUS_READY))
801                 return -EAGAIN;
802
803         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
804                  TG3_APE_SHMEM_BASE;
805         msgoff = bufoff + 2 * sizeof(u32);
806         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
807
808         while (len) {
809                 u32 length;
810
811                 /* Cap xfer sizes to scratchpad limits. */
812                 length = (len > maxlen) ? maxlen : len;
813                 len -= length;
814
815                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
816                 if (!(apedata & APE_FW_STATUS_READY))
817                         return -EAGAIN;
818
819                 /* Wait for up to 1 msec for APE to service previous event. */
820                 err = tg3_ape_event_lock(tp, 1000);
821                 if (err)
822                         return err;
823
824                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
825                           APE_EVENT_STATUS_SCRTCHPD_READ |
826                           APE_EVENT_STATUS_EVENT_PENDING;
827                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
828
829                 tg3_ape_write32(tp, bufoff, base_off);
830                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
831
832                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
833                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
834
835                 base_off += length;
836
837                 if (tg3_ape_wait_for_event(tp, 30000))
838                         return -EAGAIN;
839
840                 for (i = 0; length; i += 4, length -= 4) {
841                         u32 val = tg3_ape_read32(tp, msgoff + i);
842                         memcpy(data, &val, sizeof(u32));
843                         data++;
844                 }
845         }
846
847         return 0;
848 }
849
850 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
851 {
852         int err;
853         u32 apedata;
854
855         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
856         if (apedata != APE_SEG_SIG_MAGIC)
857                 return -EAGAIN;
858
859         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
860         if (!(apedata & APE_FW_STATUS_READY))
861                 return -EAGAIN;
862
863         /* Wait for up to 1 millisecond for APE to service previous event. */
864         err = tg3_ape_event_lock(tp, 1000);
865         if (err)
866                 return err;
867
868         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
869                         event | APE_EVENT_STATUS_EVENT_PENDING);
870
871         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
872         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
873
874         return 0;
875 }
876
877 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
878 {
879         u32 event;
880         u32 apedata;
881
882         if (!tg3_flag(tp, ENABLE_APE))
883                 return;
884
885         switch (kind) {
886         case RESET_KIND_INIT:
887                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
888                                 APE_HOST_SEG_SIG_MAGIC);
889                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
890                                 APE_HOST_SEG_LEN_MAGIC);
891                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
892                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
893                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
894                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
895                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
896                                 APE_HOST_BEHAV_NO_PHYLOCK);
897                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
898                                     TG3_APE_HOST_DRVR_STATE_START);
899
900                 event = APE_EVENT_STATUS_STATE_START;
901                 break;
902         case RESET_KIND_SHUTDOWN:
903                 /* With the interface we are currently using,
904                  * APE does not track driver state.  Wiping
905                  * out the HOST SEGMENT SIGNATURE forces
906                  * the APE to assume OS absent status.
907                  */
908                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
909
910                 if (device_may_wakeup(&tp->pdev->dev) &&
911                     tg3_flag(tp, WOL_ENABLE)) {
912                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
913                                             TG3_APE_HOST_WOL_SPEED_AUTO);
914                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
915                 } else
916                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
917
918                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
919
920                 event = APE_EVENT_STATUS_STATE_UNLOAD;
921                 break;
922         case RESET_KIND_SUSPEND:
923                 event = APE_EVENT_STATUS_STATE_SUSPEND;
924                 break;
925         default:
926                 return;
927         }
928
929         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
930
931         tg3_ape_send_event(tp, event);
932 }
933
934 static void tg3_disable_ints(struct tg3 *tp)
935 {
936         int i;
937
938         tw32(TG3PCI_MISC_HOST_CTRL,
939              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
940         for (i = 0; i < tp->irq_max; i++)
941                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
942 }
943
944 static void tg3_enable_ints(struct tg3 *tp)
945 {
946         int i;
947
948         tp->irq_sync = 0;
949         wmb();
950
951         tw32(TG3PCI_MISC_HOST_CTRL,
952              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
953
954         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
955         for (i = 0; i < tp->irq_cnt; i++) {
956                 struct tg3_napi *tnapi = &tp->napi[i];
957
958                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
959                 if (tg3_flag(tp, 1SHOT_MSI))
960                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
961
962                 tp->coal_now |= tnapi->coal_now;
963         }
964
965         /* Force an initial interrupt */
966         if (!tg3_flag(tp, TAGGED_STATUS) &&
967             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
968                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
969         else
970                 tw32(HOSTCC_MODE, tp->coal_now);
971
972         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
973 }
974
975 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
976 {
977         struct tg3 *tp = tnapi->tp;
978         struct tg3_hw_status *sblk = tnapi->hw_status;
979         unsigned int work_exists = 0;
980
981         /* check for phy events */
982         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
983                 if (sblk->status & SD_STATUS_LINK_CHG)
984                         work_exists = 1;
985         }
986
987         /* check for TX work to do */
988         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
989                 work_exists = 1;
990
991         /* check for RX work to do */
992         if (tnapi->rx_rcb_prod_idx &&
993             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
994                 work_exists = 1;
995
996         return work_exists;
997 }
998
999 /* tg3_int_reenable
1000  *  similar to tg3_enable_ints, but it accurately determines whether there
1001  *  is new work pending and can return without flushing the PIO write
1002  *  which reenables interrupts
1003  */
1004 static void tg3_int_reenable(struct tg3_napi *tnapi)
1005 {
1006         struct tg3 *tp = tnapi->tp;
1007
1008         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1009         mmiowb();
1010
1011         /* When doing tagged status, this work check is unnecessary.
1012          * The last_tag we write above tells the chip which piece of
1013          * work we've completed.
1014          */
1015         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1016                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1017                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1018 }
1019
1020 static void tg3_switch_clocks(struct tg3 *tp)
1021 {
1022         u32 clock_ctrl;
1023         u32 orig_clock_ctrl;
1024
1025         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1026                 return;
1027
1028         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1029
1030         orig_clock_ctrl = clock_ctrl;
1031         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1032                        CLOCK_CTRL_CLKRUN_OENABLE |
1033                        0x1f);
1034         tp->pci_clock_ctrl = clock_ctrl;
1035
1036         if (tg3_flag(tp, 5705_PLUS)) {
1037                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1038                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1039                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1040                 }
1041         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1042                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1043                             clock_ctrl |
1044                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1045                             40);
1046                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1047                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1048                             40);
1049         }
1050         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1051 }
1052
1053 #define PHY_BUSY_LOOPS  5000
1054
1055 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1056 {
1057         u32 frame_val;
1058         unsigned int loops;
1059         int ret;
1060
1061         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1062                 tw32_f(MAC_MI_MODE,
1063                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1064                 udelay(80);
1065         }
1066
1067         tg3_ape_lock(tp, tp->phy_ape_lock);
1068
1069         *val = 0x0;
1070
1071         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1072                       MI_COM_PHY_ADDR_MASK);
1073         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1074                       MI_COM_REG_ADDR_MASK);
1075         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1076
1077         tw32_f(MAC_MI_COM, frame_val);
1078
1079         loops = PHY_BUSY_LOOPS;
1080         while (loops != 0) {
1081                 udelay(10);
1082                 frame_val = tr32(MAC_MI_COM);
1083
1084                 if ((frame_val & MI_COM_BUSY) == 0) {
1085                         udelay(5);
1086                         frame_val = tr32(MAC_MI_COM);
1087                         break;
1088                 }
1089                 loops -= 1;
1090         }
1091
1092         ret = -EBUSY;
1093         if (loops != 0) {
1094                 *val = frame_val & MI_COM_DATA_MASK;
1095                 ret = 0;
1096         }
1097
1098         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1099                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1100                 udelay(80);
1101         }
1102
1103         tg3_ape_unlock(tp, tp->phy_ape_lock);
1104
1105         return ret;
1106 }
1107
1108 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1109 {
1110         u32 frame_val;
1111         unsigned int loops;
1112         int ret;
1113
1114         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1115             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1116                 return 0;
1117
1118         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1119                 tw32_f(MAC_MI_MODE,
1120                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1121                 udelay(80);
1122         }
1123
1124         tg3_ape_lock(tp, tp->phy_ape_lock);
1125
1126         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1127                       MI_COM_PHY_ADDR_MASK);
1128         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1129                       MI_COM_REG_ADDR_MASK);
1130         frame_val |= (val & MI_COM_DATA_MASK);
1131         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1132
1133         tw32_f(MAC_MI_COM, frame_val);
1134
1135         loops = PHY_BUSY_LOOPS;
1136         while (loops != 0) {
1137                 udelay(10);
1138                 frame_val = tr32(MAC_MI_COM);
1139                 if ((frame_val & MI_COM_BUSY) == 0) {
1140                         udelay(5);
1141                         frame_val = tr32(MAC_MI_COM);
1142                         break;
1143                 }
1144                 loops -= 1;
1145         }
1146
1147         ret = -EBUSY;
1148         if (loops != 0)
1149                 ret = 0;
1150
1151         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1152                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1153                 udelay(80);
1154         }
1155
1156         tg3_ape_unlock(tp, tp->phy_ape_lock);
1157
1158         return ret;
1159 }
1160
1161 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1162 {
1163         int err;
1164
1165         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1166         if (err)
1167                 goto done;
1168
1169         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1170         if (err)
1171                 goto done;
1172
1173         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1174                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1175         if (err)
1176                 goto done;
1177
1178         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1179
1180 done:
1181         return err;
1182 }
1183
1184 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1185 {
1186         int err;
1187
1188         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1189         if (err)
1190                 goto done;
1191
1192         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1193         if (err)
1194                 goto done;
1195
1196         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1197                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1198         if (err)
1199                 goto done;
1200
1201         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1202
1203 done:
1204         return err;
1205 }
1206
1207 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1208 {
1209         int err;
1210
1211         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1212         if (!err)
1213                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1214
1215         return err;
1216 }
1217
1218 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1219 {
1220         int err;
1221
1222         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1223         if (!err)
1224                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1225
1226         return err;
1227 }
1228
1229 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1230 {
1231         int err;
1232
1233         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1234                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1235                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1236         if (!err)
1237                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1238
1239         return err;
1240 }
1241
1242 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1243 {
1244         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1245                 set |= MII_TG3_AUXCTL_MISC_WREN;
1246
1247         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1248 }
1249
1250 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1251         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1252                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1253                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1254
1255 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1256         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1257                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1258
1259 static int tg3_bmcr_reset(struct tg3 *tp)
1260 {
1261         u32 phy_control;
1262         int limit, err;
1263
1264         /* OK, reset it, and poll the BMCR_RESET bit until it
1265          * clears or we time out.
1266          */
1267         phy_control = BMCR_RESET;
1268         err = tg3_writephy(tp, MII_BMCR, phy_control);
1269         if (err != 0)
1270                 return -EBUSY;
1271
1272         limit = 5000;
1273         while (limit--) {
1274                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1275                 if (err != 0)
1276                         return -EBUSY;
1277
1278                 if ((phy_control & BMCR_RESET) == 0) {
1279                         udelay(40);
1280                         break;
1281                 }
1282                 udelay(10);
1283         }
1284         if (limit < 0)
1285                 return -EBUSY;
1286
1287         return 0;
1288 }
1289
1290 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1291 {
1292         struct tg3 *tp = bp->priv;
1293         u32 val;
1294
1295         spin_lock_bh(&tp->lock);
1296
1297         if (tg3_readphy(tp, reg, &val))
1298                 val = -EIO;
1299
1300         spin_unlock_bh(&tp->lock);
1301
1302         return val;
1303 }
1304
1305 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1306 {
1307         struct tg3 *tp = bp->priv;
1308         u32 ret = 0;
1309
1310         spin_lock_bh(&tp->lock);
1311
1312         if (tg3_writephy(tp, reg, val))
1313                 ret = -EIO;
1314
1315         spin_unlock_bh(&tp->lock);
1316
1317         return ret;
1318 }
1319
1320 static int tg3_mdio_reset(struct mii_bus *bp)
1321 {
1322         return 0;
1323 }
1324
1325 static void tg3_mdio_config_5785(struct tg3 *tp)
1326 {
1327         u32 val;
1328         struct phy_device *phydev;
1329
1330         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1331         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1332         case PHY_ID_BCM50610:
1333         case PHY_ID_BCM50610M:
1334                 val = MAC_PHYCFG2_50610_LED_MODES;
1335                 break;
1336         case PHY_ID_BCMAC131:
1337                 val = MAC_PHYCFG2_AC131_LED_MODES;
1338                 break;
1339         case PHY_ID_RTL8211C:
1340                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1341                 break;
1342         case PHY_ID_RTL8201E:
1343                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1344                 break;
1345         default:
1346                 return;
1347         }
1348
1349         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1350                 tw32(MAC_PHYCFG2, val);
1351
1352                 val = tr32(MAC_PHYCFG1);
1353                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1354                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1355                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1356                 tw32(MAC_PHYCFG1, val);
1357
1358                 return;
1359         }
1360
1361         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1362                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1363                        MAC_PHYCFG2_FMODE_MASK_MASK |
1364                        MAC_PHYCFG2_GMODE_MASK_MASK |
1365                        MAC_PHYCFG2_ACT_MASK_MASK   |
1366                        MAC_PHYCFG2_QUAL_MASK_MASK |
1367                        MAC_PHYCFG2_INBAND_ENABLE;
1368
1369         tw32(MAC_PHYCFG2, val);
1370
1371         val = tr32(MAC_PHYCFG1);
1372         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1373                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1374         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1375                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1376                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1377                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1378                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1379         }
1380         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1381                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1382         tw32(MAC_PHYCFG1, val);
1383
1384         val = tr32(MAC_EXT_RGMII_MODE);
1385         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1386                  MAC_RGMII_MODE_RX_QUALITY |
1387                  MAC_RGMII_MODE_RX_ACTIVITY |
1388                  MAC_RGMII_MODE_RX_ENG_DET |
1389                  MAC_RGMII_MODE_TX_ENABLE |
1390                  MAC_RGMII_MODE_TX_LOWPWR |
1391                  MAC_RGMII_MODE_TX_RESET);
1392         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1393                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1394                         val |= MAC_RGMII_MODE_RX_INT_B |
1395                                MAC_RGMII_MODE_RX_QUALITY |
1396                                MAC_RGMII_MODE_RX_ACTIVITY |
1397                                MAC_RGMII_MODE_RX_ENG_DET;
1398                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1399                         val |= MAC_RGMII_MODE_TX_ENABLE |
1400                                MAC_RGMII_MODE_TX_LOWPWR |
1401                                MAC_RGMII_MODE_TX_RESET;
1402         }
1403         tw32(MAC_EXT_RGMII_MODE, val);
1404 }
1405
1406 static void tg3_mdio_start(struct tg3 *tp)
1407 {
1408         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1409         tw32_f(MAC_MI_MODE, tp->mi_mode);
1410         udelay(80);
1411
1412         if (tg3_flag(tp, MDIOBUS_INITED) &&
1413             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1414                 tg3_mdio_config_5785(tp);
1415 }
1416
1417 static int tg3_mdio_init(struct tg3 *tp)
1418 {
1419         int i;
1420         u32 reg;
1421         struct phy_device *phydev;
1422
1423         if (tg3_flag(tp, 5717_PLUS)) {
1424                 u32 is_serdes;
1425
1426                 tp->phy_addr = tp->pci_fn + 1;
1427
1428                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1429                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1430                 else
1431                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1432                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1433                 if (is_serdes)
1434                         tp->phy_addr += 7;
1435         } else
1436                 tp->phy_addr = TG3_PHY_MII_ADDR;
1437
1438         tg3_mdio_start(tp);
1439
1440         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1441                 return 0;
1442
1443         tp->mdio_bus = mdiobus_alloc();
1444         if (tp->mdio_bus == NULL)
1445                 return -ENOMEM;
1446
1447         tp->mdio_bus->name     = "tg3 mdio bus";
1448         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1449                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1450         tp->mdio_bus->priv     = tp;
1451         tp->mdio_bus->parent   = &tp->pdev->dev;
1452         tp->mdio_bus->read     = &tg3_mdio_read;
1453         tp->mdio_bus->write    = &tg3_mdio_write;
1454         tp->mdio_bus->reset    = &tg3_mdio_reset;
1455         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1456         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1457
1458         for (i = 0; i < PHY_MAX_ADDR; i++)
1459                 tp->mdio_bus->irq[i] = PHY_POLL;
1460
1461         /* The bus registration will look for all the PHYs on the mdio bus.
1462          * Unfortunately, it does not ensure the PHY is powered up before
1463          * accessing the PHY ID registers.  A chip reset is the
1464          * quickest way to bring the device back to an operational state..
1465          */
1466         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1467                 tg3_bmcr_reset(tp);
1468
1469         i = mdiobus_register(tp->mdio_bus);
1470         if (i) {
1471                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1472                 mdiobus_free(tp->mdio_bus);
1473                 return i;
1474         }
1475
1476         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1477
1478         if (!phydev || !phydev->drv) {
1479                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1480                 mdiobus_unregister(tp->mdio_bus);
1481                 mdiobus_free(tp->mdio_bus);
1482                 return -ENODEV;
1483         }
1484
1485         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1486         case PHY_ID_BCM57780:
1487                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1488                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1489                 break;
1490         case PHY_ID_BCM50610:
1491         case PHY_ID_BCM50610M:
1492                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1493                                      PHY_BRCM_RX_REFCLK_UNUSED |
1494                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1495                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1496                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1497                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1498                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1499                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1500                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1501                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1502                 /* fallthru */
1503         case PHY_ID_RTL8211C:
1504                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1505                 break;
1506         case PHY_ID_RTL8201E:
1507         case PHY_ID_BCMAC131:
1508                 phydev->interface = PHY_INTERFACE_MODE_MII;
1509                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1510                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1511                 break;
1512         }
1513
1514         tg3_flag_set(tp, MDIOBUS_INITED);
1515
1516         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1517                 tg3_mdio_config_5785(tp);
1518
1519         return 0;
1520 }
1521
1522 static void tg3_mdio_fini(struct tg3 *tp)
1523 {
1524         if (tg3_flag(tp, MDIOBUS_INITED)) {
1525                 tg3_flag_clear(tp, MDIOBUS_INITED);
1526                 mdiobus_unregister(tp->mdio_bus);
1527                 mdiobus_free(tp->mdio_bus);
1528         }
1529 }
1530
1531 /* tp->lock is held. */
1532 static inline void tg3_generate_fw_event(struct tg3 *tp)
1533 {
1534         u32 val;
1535
1536         val = tr32(GRC_RX_CPU_EVENT);
1537         val |= GRC_RX_CPU_DRIVER_EVENT;
1538         tw32_f(GRC_RX_CPU_EVENT, val);
1539
1540         tp->last_event_jiffies = jiffies;
1541 }
1542
1543 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1544
1545 /* tp->lock is held. */
1546 static void tg3_wait_for_event_ack(struct tg3 *tp)
1547 {
1548         int i;
1549         unsigned int delay_cnt;
1550         long time_remain;
1551
1552         /* If enough time has passed, no wait is necessary. */
1553         time_remain = (long)(tp->last_event_jiffies + 1 +
1554                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1555                       (long)jiffies;
1556         if (time_remain < 0)
1557                 return;
1558
1559         /* Check if we can shorten the wait time. */
1560         delay_cnt = jiffies_to_usecs(time_remain);
1561         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1562                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1563         delay_cnt = (delay_cnt >> 3) + 1;
1564
1565         for (i = 0; i < delay_cnt; i++) {
1566                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1567                         break;
1568                 udelay(8);
1569         }
1570 }
1571
1572 /* tp->lock is held. */
1573 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1574 {
1575         u32 reg, val;
1576
1577         val = 0;
1578         if (!tg3_readphy(tp, MII_BMCR, &reg))
1579                 val = reg << 16;
1580         if (!tg3_readphy(tp, MII_BMSR, &reg))
1581                 val |= (reg & 0xffff);
1582         *data++ = val;
1583
1584         val = 0;
1585         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1586                 val = reg << 16;
1587         if (!tg3_readphy(tp, MII_LPA, &reg))
1588                 val |= (reg & 0xffff);
1589         *data++ = val;
1590
1591         val = 0;
1592         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1593                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1594                         val = reg << 16;
1595                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1596                         val |= (reg & 0xffff);
1597         }
1598         *data++ = val;
1599
1600         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1601                 val = reg << 16;
1602         else
1603                 val = 0;
1604         *data++ = val;
1605 }
1606
1607 /* tp->lock is held. */
1608 static void tg3_ump_link_report(struct tg3 *tp)
1609 {
1610         u32 data[4];
1611
1612         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1613                 return;
1614
1615         tg3_phy_gather_ump_data(tp, data);
1616
1617         tg3_wait_for_event_ack(tp);
1618
1619         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1620         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1621         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1622         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1623         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1624         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1625
1626         tg3_generate_fw_event(tp);
1627 }
1628
1629 /* tp->lock is held. */
1630 static void tg3_stop_fw(struct tg3 *tp)
1631 {
1632         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1633                 /* Wait for RX cpu to ACK the previous event. */
1634                 tg3_wait_for_event_ack(tp);
1635
1636                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1637
1638                 tg3_generate_fw_event(tp);
1639
1640                 /* Wait for RX cpu to ACK this event. */
1641                 tg3_wait_for_event_ack(tp);
1642         }
1643 }
1644
1645 /* tp->lock is held. */
1646 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1647 {
1648         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1649                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1650
1651         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1652                 switch (kind) {
1653                 case RESET_KIND_INIT:
1654                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1655                                       DRV_STATE_START);
1656                         break;
1657
1658                 case RESET_KIND_SHUTDOWN:
1659                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1660                                       DRV_STATE_UNLOAD);
1661                         break;
1662
1663                 case RESET_KIND_SUSPEND:
1664                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1665                                       DRV_STATE_SUSPEND);
1666                         break;
1667
1668                 default:
1669                         break;
1670                 }
1671         }
1672
1673         if (kind == RESET_KIND_INIT ||
1674             kind == RESET_KIND_SUSPEND)
1675                 tg3_ape_driver_state_change(tp, kind);
1676 }
1677
1678 /* tp->lock is held. */
1679 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1680 {
1681         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1682                 switch (kind) {
1683                 case RESET_KIND_INIT:
1684                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1685                                       DRV_STATE_START_DONE);
1686                         break;
1687
1688                 case RESET_KIND_SHUTDOWN:
1689                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1690                                       DRV_STATE_UNLOAD_DONE);
1691                         break;
1692
1693                 default:
1694                         break;
1695                 }
1696         }
1697
1698         if (kind == RESET_KIND_SHUTDOWN)
1699                 tg3_ape_driver_state_change(tp, kind);
1700 }
1701
1702 /* tp->lock is held. */
1703 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1704 {
1705         if (tg3_flag(tp, ENABLE_ASF)) {
1706                 switch (kind) {
1707                 case RESET_KIND_INIT:
1708                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1709                                       DRV_STATE_START);
1710                         break;
1711
1712                 case RESET_KIND_SHUTDOWN:
1713                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1714                                       DRV_STATE_UNLOAD);
1715                         break;
1716
1717                 case RESET_KIND_SUSPEND:
1718                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1719                                       DRV_STATE_SUSPEND);
1720                         break;
1721
1722                 default:
1723                         break;
1724                 }
1725         }
1726 }
1727
1728 static int tg3_poll_fw(struct tg3 *tp)
1729 {
1730         int i;
1731         u32 val;
1732
1733         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1734                 /* Wait up to 20ms for init done. */
1735                 for (i = 0; i < 200; i++) {
1736                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1737                                 return 0;
1738                         udelay(100);
1739                 }
1740                 return -ENODEV;
1741         }
1742
1743         /* Wait for firmware initialization to complete. */
1744         for (i = 0; i < 100000; i++) {
1745                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1746                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1747                         break;
1748                 udelay(10);
1749         }
1750
1751         /* Chip might not be fitted with firmware.  Some Sun onboard
1752          * parts are configured like that.  So don't signal the timeout
1753          * of the above loop as an error, but do report the lack of
1754          * running firmware once.
1755          */
1756         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1757                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1758
1759                 netdev_info(tp->dev, "No firmware running\n");
1760         }
1761
1762         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1763                 /* The 57765 A0 needs a little more
1764                  * time to do some important work.
1765                  */
1766                 mdelay(10);
1767         }
1768
1769         return 0;
1770 }
1771
1772 static void tg3_link_report(struct tg3 *tp)
1773 {
1774         if (!netif_carrier_ok(tp->dev)) {
1775                 netif_info(tp, link, tp->dev, "Link is down\n");
1776                 tg3_ump_link_report(tp);
1777         } else if (netif_msg_link(tp)) {
1778                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1779                             (tp->link_config.active_speed == SPEED_1000 ?
1780                              1000 :
1781                              (tp->link_config.active_speed == SPEED_100 ?
1782                               100 : 10)),
1783                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1784                              "full" : "half"));
1785
1786                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1787                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1788                             "on" : "off",
1789                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1790                             "on" : "off");
1791
1792                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1793                         netdev_info(tp->dev, "EEE is %s\n",
1794                                     tp->setlpicnt ? "enabled" : "disabled");
1795
1796                 tg3_ump_link_report(tp);
1797         }
1798 }
1799
1800 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1801 {
1802         u16 miireg;
1803
1804         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1805                 miireg = ADVERTISE_1000XPAUSE;
1806         else if (flow_ctrl & FLOW_CTRL_TX)
1807                 miireg = ADVERTISE_1000XPSE_ASYM;
1808         else if (flow_ctrl & FLOW_CTRL_RX)
1809                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1810         else
1811                 miireg = 0;
1812
1813         return miireg;
1814 }
1815
1816 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1817 {
1818         u8 cap = 0;
1819
1820         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1821                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1822         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1823                 if (lcladv & ADVERTISE_1000XPAUSE)
1824                         cap = FLOW_CTRL_RX;
1825                 if (rmtadv & ADVERTISE_1000XPAUSE)
1826                         cap = FLOW_CTRL_TX;
1827         }
1828
1829         return cap;
1830 }
1831
1832 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1833 {
1834         u8 autoneg;
1835         u8 flowctrl = 0;
1836         u32 old_rx_mode = tp->rx_mode;
1837         u32 old_tx_mode = tp->tx_mode;
1838
1839         if (tg3_flag(tp, USE_PHYLIB))
1840                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1841         else
1842                 autoneg = tp->link_config.autoneg;
1843
1844         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1845                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1846                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1847                 else
1848                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1849         } else
1850                 flowctrl = tp->link_config.flowctrl;
1851
1852         tp->link_config.active_flowctrl = flowctrl;
1853
1854         if (flowctrl & FLOW_CTRL_RX)
1855                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1856         else
1857                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1858
1859         if (old_rx_mode != tp->rx_mode)
1860                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1861
1862         if (flowctrl & FLOW_CTRL_TX)
1863                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1864         else
1865                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1866
1867         if (old_tx_mode != tp->tx_mode)
1868                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1869 }
1870
1871 static void tg3_adjust_link(struct net_device *dev)
1872 {
1873         u8 oldflowctrl, linkmesg = 0;
1874         u32 mac_mode, lcl_adv, rmt_adv;
1875         struct tg3 *tp = netdev_priv(dev);
1876         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1877
1878         spin_lock_bh(&tp->lock);
1879
1880         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1881                                     MAC_MODE_HALF_DUPLEX);
1882
1883         oldflowctrl = tp->link_config.active_flowctrl;
1884
1885         if (phydev->link) {
1886                 lcl_adv = 0;
1887                 rmt_adv = 0;
1888
1889                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1890                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1891                 else if (phydev->speed == SPEED_1000 ||
1892                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1893                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1894                 else
1895                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1896
1897                 if (phydev->duplex == DUPLEX_HALF)
1898                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1899                 else {
1900                         lcl_adv = mii_advertise_flowctrl(
1901                                   tp->link_config.flowctrl);
1902
1903                         if (phydev->pause)
1904                                 rmt_adv = LPA_PAUSE_CAP;
1905                         if (phydev->asym_pause)
1906                                 rmt_adv |= LPA_PAUSE_ASYM;
1907                 }
1908
1909                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1910         } else
1911                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1912
1913         if (mac_mode != tp->mac_mode) {
1914                 tp->mac_mode = mac_mode;
1915                 tw32_f(MAC_MODE, tp->mac_mode);
1916                 udelay(40);
1917         }
1918
1919         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1920                 if (phydev->speed == SPEED_10)
1921                         tw32(MAC_MI_STAT,
1922                              MAC_MI_STAT_10MBPS_MODE |
1923                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1924                 else
1925                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1926         }
1927
1928         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1929                 tw32(MAC_TX_LENGTHS,
1930                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1931                       (6 << TX_LENGTHS_IPG_SHIFT) |
1932                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1933         else
1934                 tw32(MAC_TX_LENGTHS,
1935                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1936                       (6 << TX_LENGTHS_IPG_SHIFT) |
1937                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1938
1939         if (phydev->link != tp->old_link ||
1940             phydev->speed != tp->link_config.active_speed ||
1941             phydev->duplex != tp->link_config.active_duplex ||
1942             oldflowctrl != tp->link_config.active_flowctrl)
1943                 linkmesg = 1;
1944
1945         tp->old_link = phydev->link;
1946         tp->link_config.active_speed = phydev->speed;
1947         tp->link_config.active_duplex = phydev->duplex;
1948
1949         spin_unlock_bh(&tp->lock);
1950
1951         if (linkmesg)
1952                 tg3_link_report(tp);
1953 }
1954
1955 static int tg3_phy_init(struct tg3 *tp)
1956 {
1957         struct phy_device *phydev;
1958
1959         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1960                 return 0;
1961
1962         /* Bring the PHY back to a known state. */
1963         tg3_bmcr_reset(tp);
1964
1965         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1966
1967         /* Attach the MAC to the PHY. */
1968         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1969                              phydev->dev_flags, phydev->interface);
1970         if (IS_ERR(phydev)) {
1971                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1972                 return PTR_ERR(phydev);
1973         }
1974
1975         /* Mask with MAC supported features. */
1976         switch (phydev->interface) {
1977         case PHY_INTERFACE_MODE_GMII:
1978         case PHY_INTERFACE_MODE_RGMII:
1979                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1980                         phydev->supported &= (PHY_GBIT_FEATURES |
1981                                               SUPPORTED_Pause |
1982                                               SUPPORTED_Asym_Pause);
1983                         break;
1984                 }
1985                 /* fallthru */
1986         case PHY_INTERFACE_MODE_MII:
1987                 phydev->supported &= (PHY_BASIC_FEATURES |
1988                                       SUPPORTED_Pause |
1989                                       SUPPORTED_Asym_Pause);
1990                 break;
1991         default:
1992                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1993                 return -EINVAL;
1994         }
1995
1996         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1997
1998         phydev->advertising = phydev->supported;
1999
2000         return 0;
2001 }
2002
2003 static void tg3_phy_start(struct tg3 *tp)
2004 {
2005         struct phy_device *phydev;
2006
2007         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2008                 return;
2009
2010         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2011
2012         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2013                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2014                 phydev->speed = tp->link_config.speed;
2015                 phydev->duplex = tp->link_config.duplex;
2016                 phydev->autoneg = tp->link_config.autoneg;
2017                 phydev->advertising = tp->link_config.advertising;
2018         }
2019
2020         phy_start(phydev);
2021
2022         phy_start_aneg(phydev);
2023 }
2024
2025 static void tg3_phy_stop(struct tg3 *tp)
2026 {
2027         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2028                 return;
2029
2030         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2031 }
2032
2033 static void tg3_phy_fini(struct tg3 *tp)
2034 {
2035         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2036                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2037                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2038         }
2039 }
2040
2041 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2042 {
2043         int err;
2044         u32 val;
2045
2046         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2047                 return 0;
2048
2049         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2050                 /* Cannot do read-modify-write on 5401 */
2051                 err = tg3_phy_auxctl_write(tp,
2052                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2053                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2054                                            0x4c20);
2055                 goto done;
2056         }
2057
2058         err = tg3_phy_auxctl_read(tp,
2059                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2060         if (err)
2061                 return err;
2062
2063         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2064         err = tg3_phy_auxctl_write(tp,
2065                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2066
2067 done:
2068         return err;
2069 }
2070
2071 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2072 {
2073         u32 phytest;
2074
2075         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2076                 u32 phy;
2077
2078                 tg3_writephy(tp, MII_TG3_FET_TEST,
2079                              phytest | MII_TG3_FET_SHADOW_EN);
2080                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2081                         if (enable)
2082                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2083                         else
2084                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2085                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2086                 }
2087                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2088         }
2089 }
2090
2091 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2092 {
2093         u32 reg;
2094
2095         if (!tg3_flag(tp, 5705_PLUS) ||
2096             (tg3_flag(tp, 5717_PLUS) &&
2097              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2098                 return;
2099
2100         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2101                 tg3_phy_fet_toggle_apd(tp, enable);
2102                 return;
2103         }
2104
2105         reg = MII_TG3_MISC_SHDW_WREN |
2106               MII_TG3_MISC_SHDW_SCR5_SEL |
2107               MII_TG3_MISC_SHDW_SCR5_LPED |
2108               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2109               MII_TG3_MISC_SHDW_SCR5_SDTL |
2110               MII_TG3_MISC_SHDW_SCR5_C125OE;
2111         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2112                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2113
2114         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2115
2116
2117         reg = MII_TG3_MISC_SHDW_WREN |
2118               MII_TG3_MISC_SHDW_APD_SEL |
2119               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2120         if (enable)
2121                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2122
2123         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2124 }
2125
2126 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2127 {
2128         u32 phy;
2129
2130         if (!tg3_flag(tp, 5705_PLUS) ||
2131             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2132                 return;
2133
2134         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2135                 u32 ephy;
2136
2137                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2138                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2139
2140                         tg3_writephy(tp, MII_TG3_FET_TEST,
2141                                      ephy | MII_TG3_FET_SHADOW_EN);
2142                         if (!tg3_readphy(tp, reg, &phy)) {
2143                                 if (enable)
2144                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2145                                 else
2146                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2147                                 tg3_writephy(tp, reg, phy);
2148                         }
2149                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2150                 }
2151         } else {
2152                 int ret;
2153
2154                 ret = tg3_phy_auxctl_read(tp,
2155                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2156                 if (!ret) {
2157                         if (enable)
2158                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2159                         else
2160                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2161                         tg3_phy_auxctl_write(tp,
2162                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2163                 }
2164         }
2165 }
2166
2167 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2168 {
2169         int ret;
2170         u32 val;
2171
2172         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2173                 return;
2174
2175         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2176         if (!ret)
2177                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2178                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2179 }
2180
2181 static void tg3_phy_apply_otp(struct tg3 *tp)
2182 {
2183         u32 otp, phy;
2184
2185         if (!tp->phy_otp)
2186                 return;
2187
2188         otp = tp->phy_otp;
2189
2190         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2191                 return;
2192
2193         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2194         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2195         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2196
2197         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2198               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2199         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2200
2201         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2202         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2203         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2204
2205         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2206         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2207
2208         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2209         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2210
2211         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2212               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2213         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2214
2215         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2216 }
2217
2218 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2219 {
2220         u32 val;
2221
2222         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2223                 return;
2224
2225         tp->setlpicnt = 0;
2226
2227         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2228             current_link_up == 1 &&
2229             tp->link_config.active_duplex == DUPLEX_FULL &&
2230             (tp->link_config.active_speed == SPEED_100 ||
2231              tp->link_config.active_speed == SPEED_1000)) {
2232                 u32 eeectl;
2233
2234                 if (tp->link_config.active_speed == SPEED_1000)
2235                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2236                 else
2237                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2238
2239                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2240
2241                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2242                                   TG3_CL45_D7_EEERES_STAT, &val);
2243
2244                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2245                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2246                         tp->setlpicnt = 2;
2247         }
2248
2249         if (!tp->setlpicnt) {
2250                 if (current_link_up == 1 &&
2251                    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2252                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2253                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2254                 }
2255
2256                 val = tr32(TG3_CPMU_EEE_MODE);
2257                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2258         }
2259 }
2260
2261 static void tg3_phy_eee_enable(struct tg3 *tp)
2262 {
2263         u32 val;
2264
2265         if (tp->link_config.active_speed == SPEED_1000 &&
2266             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2267              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2268              tg3_flag(tp, 57765_CLASS)) &&
2269             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2270                 val = MII_TG3_DSP_TAP26_ALNOKO |
2271                       MII_TG3_DSP_TAP26_RMRXSTO;
2272                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2273                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2274         }
2275
2276         val = tr32(TG3_CPMU_EEE_MODE);
2277         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2278 }
2279
2280 static int tg3_wait_macro_done(struct tg3 *tp)
2281 {
2282         int limit = 100;
2283
2284         while (limit--) {
2285                 u32 tmp32;
2286
2287                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2288                         if ((tmp32 & 0x1000) == 0)
2289                                 break;
2290                 }
2291         }
2292         if (limit < 0)
2293                 return -EBUSY;
2294
2295         return 0;
2296 }
2297
2298 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2299 {
2300         static const u32 test_pat[4][6] = {
2301         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2302         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2303         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2304         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2305         };
2306         int chan;
2307
2308         for (chan = 0; chan < 4; chan++) {
2309                 int i;
2310
2311                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2312                              (chan * 0x2000) | 0x0200);
2313                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2314
2315                 for (i = 0; i < 6; i++)
2316                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2317                                      test_pat[chan][i]);
2318
2319                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2320                 if (tg3_wait_macro_done(tp)) {
2321                         *resetp = 1;
2322                         return -EBUSY;
2323                 }
2324
2325                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2326                              (chan * 0x2000) | 0x0200);
2327                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2328                 if (tg3_wait_macro_done(tp)) {
2329                         *resetp = 1;
2330                         return -EBUSY;
2331                 }
2332
2333                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2334                 if (tg3_wait_macro_done(tp)) {
2335                         *resetp = 1;
2336                         return -EBUSY;
2337                 }
2338
2339                 for (i = 0; i < 6; i += 2) {
2340                         u32 low, high;
2341
2342                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2343                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2344                             tg3_wait_macro_done(tp)) {
2345                                 *resetp = 1;
2346                                 return -EBUSY;
2347                         }
2348                         low &= 0x7fff;
2349                         high &= 0x000f;
2350                         if (low != test_pat[chan][i] ||
2351                             high != test_pat[chan][i+1]) {
2352                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2353                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2354                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2355
2356                                 return -EBUSY;
2357                         }
2358                 }
2359         }
2360
2361         return 0;
2362 }
2363
2364 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2365 {
2366         int chan;
2367
2368         for (chan = 0; chan < 4; chan++) {
2369                 int i;
2370
2371                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2372                              (chan * 0x2000) | 0x0200);
2373                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2374                 for (i = 0; i < 6; i++)
2375                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2376                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2377                 if (tg3_wait_macro_done(tp))
2378                         return -EBUSY;
2379         }
2380
2381         return 0;
2382 }
2383
2384 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2385 {
2386         u32 reg32, phy9_orig;
2387         int retries, do_phy_reset, err;
2388
2389         retries = 10;
2390         do_phy_reset = 1;
2391         do {
2392                 if (do_phy_reset) {
2393                         err = tg3_bmcr_reset(tp);
2394                         if (err)
2395                                 return err;
2396                         do_phy_reset = 0;
2397                 }
2398
2399                 /* Disable transmitter and interrupt.  */
2400                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2401                         continue;
2402
2403                 reg32 |= 0x3000;
2404                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2405
2406                 /* Set full-duplex, 1000 mbps.  */
2407                 tg3_writephy(tp, MII_BMCR,
2408                              BMCR_FULLDPLX | BMCR_SPEED1000);
2409
2410                 /* Set to master mode.  */
2411                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2412                         continue;
2413
2414                 tg3_writephy(tp, MII_CTRL1000,
2415                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2416
2417                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2418                 if (err)
2419                         return err;
2420
2421                 /* Block the PHY control access.  */
2422                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2423
2424                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2425                 if (!err)
2426                         break;
2427         } while (--retries);
2428
2429         err = tg3_phy_reset_chanpat(tp);
2430         if (err)
2431                 return err;
2432
2433         tg3_phydsp_write(tp, 0x8005, 0x0000);
2434
2435         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2436         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2437
2438         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2439
2440         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2441
2442         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2443                 reg32 &= ~0x3000;
2444                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2445         } else if (!err)
2446                 err = -EBUSY;
2447
2448         return err;
2449 }
2450
2451 /* This will reset the tigon3 PHY if there is no valid
2452  * link unless the FORCE argument is non-zero.
2453  */
2454 static int tg3_phy_reset(struct tg3 *tp)
2455 {
2456         u32 val, cpmuctrl;
2457         int err;
2458
2459         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2460                 val = tr32(GRC_MISC_CFG);
2461                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2462                 udelay(40);
2463         }
2464         err  = tg3_readphy(tp, MII_BMSR, &val);
2465         err |= tg3_readphy(tp, MII_BMSR, &val);
2466         if (err != 0)
2467                 return -EBUSY;
2468
2469         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2470                 netif_carrier_off(tp->dev);
2471                 tg3_link_report(tp);
2472         }
2473
2474         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2475             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2476             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2477                 err = tg3_phy_reset_5703_4_5(tp);
2478                 if (err)
2479                         return err;
2480                 goto out;
2481         }
2482
2483         cpmuctrl = 0;
2484         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2485             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2486                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2487                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2488                         tw32(TG3_CPMU_CTRL,
2489                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2490         }
2491
2492         err = tg3_bmcr_reset(tp);
2493         if (err)
2494                 return err;
2495
2496         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2497                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2498                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2499
2500                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2501         }
2502
2503         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2504             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2505                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2506                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2507                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2508                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2509                         udelay(40);
2510                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2511                 }
2512         }
2513
2514         if (tg3_flag(tp, 5717_PLUS) &&
2515             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2516                 return 0;
2517
2518         tg3_phy_apply_otp(tp);
2519
2520         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2521                 tg3_phy_toggle_apd(tp, true);
2522         else
2523                 tg3_phy_toggle_apd(tp, false);
2524
2525 out:
2526         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2527             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2528                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2529                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2530                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2531         }
2532
2533         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2534                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2535                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2536         }
2537
2538         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2539                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2540                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2541                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2542                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2543                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2544                 }
2545         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2546                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2547                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2548                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2549                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2550                                 tg3_writephy(tp, MII_TG3_TEST1,
2551                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2552                         } else
2553                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2554
2555                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2556                 }
2557         }
2558
2559         /* Set Extended packet length bit (bit 14) on all chips that */
2560         /* support jumbo frames */
2561         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2562                 /* Cannot do read-modify-write on 5401 */
2563                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2564         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2565                 /* Set bit 14 with read-modify-write to preserve other bits */
2566                 err = tg3_phy_auxctl_read(tp,
2567                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2568                 if (!err)
2569                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2570                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2571         }
2572
2573         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2574          * jumbo frames transmission.
2575          */
2576         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2577                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2578                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2579                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2580         }
2581
2582         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2583                 /* adjust output voltage */
2584                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2585         }
2586
2587         tg3_phy_toggle_automdix(tp, 1);
2588         tg3_phy_set_wirespeed(tp);
2589         return 0;
2590 }
2591
2592 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2593 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2594 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2595                                           TG3_GPIO_MSG_NEED_VAUX)
2596 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2597         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2598          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2599          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2600          (TG3_GPIO_MSG_DRVR_PRES << 12))
2601
2602 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2603         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2604          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2605          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2606          (TG3_GPIO_MSG_NEED_VAUX << 12))
2607
2608 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2609 {
2610         u32 status, shift;
2611
2612         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2613             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2614                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2615         else
2616                 status = tr32(TG3_CPMU_DRV_STATUS);
2617
2618         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2619         status &= ~(TG3_GPIO_MSG_MASK << shift);
2620         status |= (newstat << shift);
2621
2622         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2623             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2624                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2625         else
2626                 tw32(TG3_CPMU_DRV_STATUS, status);
2627
2628         return status >> TG3_APE_GPIO_MSG_SHIFT;
2629 }
2630
2631 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2632 {
2633         if (!tg3_flag(tp, IS_NIC))
2634                 return 0;
2635
2636         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2637             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2638             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2639                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2640                         return -EIO;
2641
2642                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2643
2644                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2645                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2646
2647                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2648         } else {
2649                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2650                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2651         }
2652
2653         return 0;
2654 }
2655
2656 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2657 {
2658         u32 grc_local_ctrl;
2659
2660         if (!tg3_flag(tp, IS_NIC) ||
2661             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2662             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2663                 return;
2664
2665         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2666
2667         tw32_wait_f(GRC_LOCAL_CTRL,
2668                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2669                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2670
2671         tw32_wait_f(GRC_LOCAL_CTRL,
2672                     grc_local_ctrl,
2673                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2674
2675         tw32_wait_f(GRC_LOCAL_CTRL,
2676                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2677                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2678 }
2679
2680 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2681 {
2682         if (!tg3_flag(tp, IS_NIC))
2683                 return;
2684
2685         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2686             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2687                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2688                             (GRC_LCLCTRL_GPIO_OE0 |
2689                              GRC_LCLCTRL_GPIO_OE1 |
2690                              GRC_LCLCTRL_GPIO_OE2 |
2691                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2692                              GRC_LCLCTRL_GPIO_OUTPUT1),
2693                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2694         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2695                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2696                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2697                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2698                                      GRC_LCLCTRL_GPIO_OE1 |
2699                                      GRC_LCLCTRL_GPIO_OE2 |
2700                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2701                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2702                                      tp->grc_local_ctrl;
2703                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2704                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2705
2706                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2707                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2708                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2709
2710                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2711                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2712                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2713         } else {
2714                 u32 no_gpio2;
2715                 u32 grc_local_ctrl = 0;
2716
2717                 /* Workaround to prevent overdrawing Amps. */
2718                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2719                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2720                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2721                                     grc_local_ctrl,
2722                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2723                 }
2724
2725                 /* On 5753 and variants, GPIO2 cannot be used. */
2726                 no_gpio2 = tp->nic_sram_data_cfg &
2727                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2728
2729                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2730                                   GRC_LCLCTRL_GPIO_OE1 |
2731                                   GRC_LCLCTRL_GPIO_OE2 |
2732                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2733                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2734                 if (no_gpio2) {
2735                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2736                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2737                 }
2738                 tw32_wait_f(GRC_LOCAL_CTRL,
2739                             tp->grc_local_ctrl | grc_local_ctrl,
2740                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2741
2742                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2743
2744                 tw32_wait_f(GRC_LOCAL_CTRL,
2745                             tp->grc_local_ctrl | grc_local_ctrl,
2746                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2747
2748                 if (!no_gpio2) {
2749                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2750                         tw32_wait_f(GRC_LOCAL_CTRL,
2751                                     tp->grc_local_ctrl | grc_local_ctrl,
2752                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2753                 }
2754         }
2755 }
2756
2757 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2758 {
2759         u32 msg = 0;
2760
2761         /* Serialize power state transitions */
2762         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2763                 return;
2764
2765         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2766                 msg = TG3_GPIO_MSG_NEED_VAUX;
2767
2768         msg = tg3_set_function_status(tp, msg);
2769
2770         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2771                 goto done;
2772
2773         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2774                 tg3_pwrsrc_switch_to_vaux(tp);
2775         else
2776                 tg3_pwrsrc_die_with_vmain(tp);
2777
2778 done:
2779         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2780 }
2781
2782 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2783 {
2784         bool need_vaux = false;
2785
2786         /* The GPIOs do something completely different on 57765. */
2787         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2788                 return;
2789
2790         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2791             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2792             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2793                 tg3_frob_aux_power_5717(tp, include_wol ?
2794                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2795                 return;
2796         }
2797
2798         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2799                 struct net_device *dev_peer;
2800
2801                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2802
2803                 /* remove_one() may have been run on the peer. */
2804                 if (dev_peer) {
2805                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2806
2807                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2808                                 return;
2809
2810                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2811                             tg3_flag(tp_peer, ENABLE_ASF))
2812                                 need_vaux = true;
2813                 }
2814         }
2815
2816         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2817             tg3_flag(tp, ENABLE_ASF))
2818                 need_vaux = true;
2819
2820         if (need_vaux)
2821                 tg3_pwrsrc_switch_to_vaux(tp);
2822         else
2823                 tg3_pwrsrc_die_with_vmain(tp);
2824 }
2825
2826 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2827 {
2828         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2829                 return 1;
2830         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2831                 if (speed != SPEED_10)
2832                         return 1;
2833         } else if (speed == SPEED_10)
2834                 return 1;
2835
2836         return 0;
2837 }
2838
2839 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2840 {
2841         u32 val;
2842
2843         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2844                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2845                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2846                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2847
2848                         sg_dig_ctrl |=
2849                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2850                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2851                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2852                 }
2853                 return;
2854         }
2855
2856         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2857                 tg3_bmcr_reset(tp);
2858                 val = tr32(GRC_MISC_CFG);
2859                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2860                 udelay(40);
2861                 return;
2862         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2863                 u32 phytest;
2864                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2865                         u32 phy;
2866
2867                         tg3_writephy(tp, MII_ADVERTISE, 0);
2868                         tg3_writephy(tp, MII_BMCR,
2869                                      BMCR_ANENABLE | BMCR_ANRESTART);
2870
2871                         tg3_writephy(tp, MII_TG3_FET_TEST,
2872                                      phytest | MII_TG3_FET_SHADOW_EN);
2873                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2874                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2875                                 tg3_writephy(tp,
2876                                              MII_TG3_FET_SHDW_AUXMODE4,
2877                                              phy);
2878                         }
2879                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2880                 }
2881                 return;
2882         } else if (do_low_power) {
2883                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2884                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2885
2886                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2887                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2888                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2889                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2890         }
2891
2892         /* The PHY should not be powered down on some chips because
2893          * of bugs.
2894          */
2895         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2896             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2897             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2898              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2899             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2900              !tp->pci_fn))
2901                 return;
2902
2903         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2904             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2905                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2906                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2907                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2908                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2909         }
2910
2911         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2912 }
2913
2914 /* tp->lock is held. */
2915 static int tg3_nvram_lock(struct tg3 *tp)
2916 {
2917         if (tg3_flag(tp, NVRAM)) {
2918                 int i;
2919
2920                 if (tp->nvram_lock_cnt == 0) {
2921                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2922                         for (i = 0; i < 8000; i++) {
2923                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2924                                         break;
2925                                 udelay(20);
2926                         }
2927                         if (i == 8000) {
2928                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2929                                 return -ENODEV;
2930                         }
2931                 }
2932                 tp->nvram_lock_cnt++;
2933         }
2934         return 0;
2935 }
2936
2937 /* tp->lock is held. */
2938 static void tg3_nvram_unlock(struct tg3 *tp)
2939 {
2940         if (tg3_flag(tp, NVRAM)) {
2941                 if (tp->nvram_lock_cnt > 0)
2942                         tp->nvram_lock_cnt--;
2943                 if (tp->nvram_lock_cnt == 0)
2944                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2945         }
2946 }
2947
2948 /* tp->lock is held. */
2949 static void tg3_enable_nvram_access(struct tg3 *tp)
2950 {
2951         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2952                 u32 nvaccess = tr32(NVRAM_ACCESS);
2953
2954                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2955         }
2956 }
2957
2958 /* tp->lock is held. */
2959 static void tg3_disable_nvram_access(struct tg3 *tp)
2960 {
2961         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2962                 u32 nvaccess = tr32(NVRAM_ACCESS);
2963
2964                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2965         }
2966 }
2967
2968 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2969                                         u32 offset, u32 *val)
2970 {
2971         u32 tmp;
2972         int i;
2973
2974         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2975                 return -EINVAL;
2976
2977         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2978                                         EEPROM_ADDR_DEVID_MASK |
2979                                         EEPROM_ADDR_READ);
2980         tw32(GRC_EEPROM_ADDR,
2981              tmp |
2982              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2983              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2984               EEPROM_ADDR_ADDR_MASK) |
2985              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2986
2987         for (i = 0; i < 1000; i++) {
2988                 tmp = tr32(GRC_EEPROM_ADDR);
2989
2990                 if (tmp & EEPROM_ADDR_COMPLETE)
2991                         break;
2992                 msleep(1);
2993         }
2994         if (!(tmp & EEPROM_ADDR_COMPLETE))
2995                 return -EBUSY;
2996
2997         tmp = tr32(GRC_EEPROM_DATA);
2998
2999         /*
3000          * The data will always be opposite the native endian
3001          * format.  Perform a blind byteswap to compensate.
3002          */
3003         *val = swab32(tmp);
3004
3005         return 0;
3006 }
3007
3008 #define NVRAM_CMD_TIMEOUT 10000
3009
3010 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3011 {
3012         int i;
3013
3014         tw32(NVRAM_CMD, nvram_cmd);
3015         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3016                 udelay(10);
3017                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3018                         udelay(10);
3019                         break;
3020                 }
3021         }
3022
3023         if (i == NVRAM_CMD_TIMEOUT)
3024                 return -EBUSY;
3025
3026         return 0;
3027 }
3028
3029 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3030 {
3031         if (tg3_flag(tp, NVRAM) &&
3032             tg3_flag(tp, NVRAM_BUFFERED) &&
3033             tg3_flag(tp, FLASH) &&
3034             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3035             (tp->nvram_jedecnum == JEDEC_ATMEL))
3036
3037                 addr = ((addr / tp->nvram_pagesize) <<
3038                         ATMEL_AT45DB0X1B_PAGE_POS) +
3039                        (addr % tp->nvram_pagesize);
3040
3041         return addr;
3042 }
3043
3044 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3045 {
3046         if (tg3_flag(tp, NVRAM) &&
3047             tg3_flag(tp, NVRAM_BUFFERED) &&
3048             tg3_flag(tp, FLASH) &&
3049             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3050             (tp->nvram_jedecnum == JEDEC_ATMEL))
3051
3052                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3053                         tp->nvram_pagesize) +
3054                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3055
3056         return addr;
3057 }
3058
3059 /* NOTE: Data read in from NVRAM is byteswapped according to
3060  * the byteswapping settings for all other register accesses.
3061  * tg3 devices are BE devices, so on a BE machine, the data
3062  * returned will be exactly as it is seen in NVRAM.  On a LE
3063  * machine, the 32-bit value will be byteswapped.
3064  */
3065 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3066 {
3067         int ret;
3068
3069         if (!tg3_flag(tp, NVRAM))
3070                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3071
3072         offset = tg3_nvram_phys_addr(tp, offset);
3073
3074         if (offset > NVRAM_ADDR_MSK)
3075                 return -EINVAL;
3076
3077         ret = tg3_nvram_lock(tp);
3078         if (ret)
3079                 return ret;
3080
3081         tg3_enable_nvram_access(tp);
3082
3083         tw32(NVRAM_ADDR, offset);
3084         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3085                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3086
3087         if (ret == 0)
3088                 *val = tr32(NVRAM_RDDATA);
3089
3090         tg3_disable_nvram_access(tp);
3091
3092         tg3_nvram_unlock(tp);
3093
3094         return ret;
3095 }
3096
3097 /* Ensures NVRAM data is in bytestream format. */
3098 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3099 {
3100         u32 v;
3101         int res = tg3_nvram_read(tp, offset, &v);
3102         if (!res)
3103                 *val = cpu_to_be32(v);
3104         return res;
3105 }
3106
3107 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3108                                     u32 offset, u32 len, u8 *buf)
3109 {
3110         int i, j, rc = 0;
3111         u32 val;
3112
3113         for (i = 0; i < len; i += 4) {
3114                 u32 addr;
3115                 __be32 data;
3116
3117                 addr = offset + i;
3118
3119                 memcpy(&data, buf + i, 4);
3120
3121                 /*
3122                  * The SEEPROM interface expects the data to always be opposite
3123                  * the native endian format.  We accomplish this by reversing
3124                  * all the operations that would have been performed on the
3125                  * data from a call to tg3_nvram_read_be32().
3126                  */
3127                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3128
3129                 val = tr32(GRC_EEPROM_ADDR);
3130                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3131
3132                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3133                         EEPROM_ADDR_READ);
3134                 tw32(GRC_EEPROM_ADDR, val |
3135                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3136                         (addr & EEPROM_ADDR_ADDR_MASK) |
3137                         EEPROM_ADDR_START |
3138                         EEPROM_ADDR_WRITE);
3139
3140                 for (j = 0; j < 1000; j++) {
3141                         val = tr32(GRC_EEPROM_ADDR);
3142
3143                         if (val & EEPROM_ADDR_COMPLETE)
3144                                 break;
3145                         msleep(1);
3146                 }
3147                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3148                         rc = -EBUSY;
3149                         break;
3150                 }
3151         }
3152
3153         return rc;
3154 }
3155
3156 /* offset and length are dword aligned */
3157 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3158                 u8 *buf)
3159 {
3160         int ret = 0;
3161         u32 pagesize = tp->nvram_pagesize;
3162         u32 pagemask = pagesize - 1;
3163         u32 nvram_cmd;
3164         u8 *tmp;
3165
3166         tmp = kmalloc(pagesize, GFP_KERNEL);
3167         if (tmp == NULL)
3168                 return -ENOMEM;
3169
3170         while (len) {
3171                 int j;
3172                 u32 phy_addr, page_off, size;
3173
3174                 phy_addr = offset & ~pagemask;
3175
3176                 for (j = 0; j < pagesize; j += 4) {
3177                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3178                                                   (__be32 *) (tmp + j));
3179                         if (ret)
3180                                 break;
3181                 }
3182                 if (ret)
3183                         break;
3184
3185                 page_off = offset & pagemask;
3186                 size = pagesize;
3187                 if (len < size)
3188                         size = len;
3189
3190                 len -= size;
3191
3192                 memcpy(tmp + page_off, buf, size);
3193
3194                 offset = offset + (pagesize - page_off);
3195
3196                 tg3_enable_nvram_access(tp);
3197
3198                 /*
3199                  * Before we can erase the flash page, we need
3200                  * to issue a special "write enable" command.
3201                  */
3202                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3203
3204                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3205                         break;
3206
3207                 /* Erase the target page */
3208                 tw32(NVRAM_ADDR, phy_addr);
3209
3210                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3211                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3212
3213                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3214                         break;
3215
3216                 /* Issue another write enable to start the write. */
3217                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3218
3219                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3220                         break;
3221
3222                 for (j = 0; j < pagesize; j += 4) {
3223                         __be32 data;
3224
3225                         data = *((__be32 *) (tmp + j));
3226
3227                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3228
3229                         tw32(NVRAM_ADDR, phy_addr + j);
3230
3231                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3232                                 NVRAM_CMD_WR;
3233
3234                         if (j == 0)
3235                                 nvram_cmd |= NVRAM_CMD_FIRST;
3236                         else if (j == (pagesize - 4))
3237                                 nvram_cmd |= NVRAM_CMD_LAST;
3238
3239                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3240                         if (ret)
3241                                 break;
3242                 }
3243                 if (ret)
3244                         break;
3245         }
3246
3247         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3248         tg3_nvram_exec_cmd(tp, nvram_cmd);
3249
3250         kfree(tmp);
3251
3252         return ret;
3253 }
3254
3255 /* offset and length are dword aligned */
3256 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3257                 u8 *buf)
3258 {
3259         int i, ret = 0;
3260
3261         for (i = 0; i < len; i += 4, offset += 4) {
3262                 u32 page_off, phy_addr, nvram_cmd;
3263                 __be32 data;
3264
3265                 memcpy(&data, buf + i, 4);
3266                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3267
3268                 page_off = offset % tp->nvram_pagesize;
3269
3270                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3271
3272                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3273
3274                 if (page_off == 0 || i == 0)
3275                         nvram_cmd |= NVRAM_CMD_FIRST;
3276                 if (page_off == (tp->nvram_pagesize - 4))
3277                         nvram_cmd |= NVRAM_CMD_LAST;
3278
3279                 if (i == (len - 4))
3280                         nvram_cmd |= NVRAM_CMD_LAST;
3281
3282                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3283                     !tg3_flag(tp, FLASH) ||
3284                     !tg3_flag(tp, 57765_PLUS))
3285                         tw32(NVRAM_ADDR, phy_addr);
3286
3287                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3288                     !tg3_flag(tp, 5755_PLUS) &&
3289                     (tp->nvram_jedecnum == JEDEC_ST) &&
3290                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3291                         u32 cmd;
3292
3293                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3294                         ret = tg3_nvram_exec_cmd(tp, cmd);
3295                         if (ret)
3296                                 break;
3297                 }
3298                 if (!tg3_flag(tp, FLASH)) {
3299                         /* We always do complete word writes to eeprom. */
3300                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3301                 }
3302
3303                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3304                 if (ret)
3305                         break;
3306         }
3307         return ret;
3308 }
3309
3310 /* offset and length are dword aligned */
3311 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3312 {
3313         int ret;
3314
3315         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3316                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3317                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3318                 udelay(40);
3319         }
3320
3321         if (!tg3_flag(tp, NVRAM)) {
3322                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3323         } else {
3324                 u32 grc_mode;
3325
3326                 ret = tg3_nvram_lock(tp);
3327                 if (ret)
3328                         return ret;
3329
3330                 tg3_enable_nvram_access(tp);
3331                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3332                         tw32(NVRAM_WRITE1, 0x406);
3333
3334                 grc_mode = tr32(GRC_MODE);
3335                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3336
3337                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3338                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3339                                 buf);
3340                 } else {
3341                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3342                                 buf);
3343                 }
3344
3345                 grc_mode = tr32(GRC_MODE);
3346                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3347
3348                 tg3_disable_nvram_access(tp);
3349                 tg3_nvram_unlock(tp);
3350         }
3351
3352         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3353                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3354                 udelay(40);
3355         }
3356
3357         return ret;
3358 }
3359
3360 #define RX_CPU_SCRATCH_BASE     0x30000
3361 #define RX_CPU_SCRATCH_SIZE     0x04000
3362 #define TX_CPU_SCRATCH_BASE     0x34000
3363 #define TX_CPU_SCRATCH_SIZE     0x04000
3364
3365 /* tp->lock is held. */
3366 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3367 {
3368         int i;
3369
3370         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3371
3372         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3373                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3374
3375                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3376                 return 0;
3377         }
3378         if (offset == RX_CPU_BASE) {
3379                 for (i = 0; i < 10000; i++) {
3380                         tw32(offset + CPU_STATE, 0xffffffff);
3381                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3382                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3383                                 break;
3384                 }
3385
3386                 tw32(offset + CPU_STATE, 0xffffffff);
3387                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3388                 udelay(10);
3389         } else {
3390                 for (i = 0; i < 10000; i++) {
3391                         tw32(offset + CPU_STATE, 0xffffffff);
3392                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3393                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3394                                 break;
3395                 }
3396         }
3397
3398         if (i >= 10000) {
3399                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3400                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3401                 return -ENODEV;
3402         }
3403
3404         /* Clear firmware's nvram arbitration. */
3405         if (tg3_flag(tp, NVRAM))
3406                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3407         return 0;
3408 }
3409
3410 struct fw_info {
3411         unsigned int fw_base;
3412         unsigned int fw_len;
3413         const __be32 *fw_data;
3414 };
3415
3416 /* tp->lock is held. */
3417 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3418                                  u32 cpu_scratch_base, int cpu_scratch_size,
3419                                  struct fw_info *info)
3420 {
3421         int err, lock_err, i;
3422         void (*write_op)(struct tg3 *, u32, u32);
3423
3424         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3425                 netdev_err(tp->dev,
3426                            "%s: Trying to load TX cpu firmware which is 5705\n",
3427                            __func__);
3428                 return -EINVAL;
3429         }
3430
3431         if (tg3_flag(tp, 5705_PLUS))
3432                 write_op = tg3_write_mem;
3433         else
3434                 write_op = tg3_write_indirect_reg32;
3435
3436         /* It is possible that bootcode is still loading at this point.
3437          * Get the nvram lock first before halting the cpu.
3438          */
3439         lock_err = tg3_nvram_lock(tp);
3440         err = tg3_halt_cpu(tp, cpu_base);
3441         if (!lock_err)
3442                 tg3_nvram_unlock(tp);
3443         if (err)
3444                 goto out;
3445
3446         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3447                 write_op(tp, cpu_scratch_base + i, 0);
3448         tw32(cpu_base + CPU_STATE, 0xffffffff);
3449         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3450         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3451                 write_op(tp, (cpu_scratch_base +
3452                               (info->fw_base & 0xffff) +
3453                               (i * sizeof(u32))),
3454                               be32_to_cpu(info->fw_data[i]));
3455
3456         err = 0;
3457
3458 out:
3459         return err;
3460 }
3461
3462 /* tp->lock is held. */
3463 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3464 {
3465         struct fw_info info;
3466         const __be32 *fw_data;
3467         int err, i;
3468
3469         fw_data = (void *)tp->fw->data;
3470
3471         /* Firmware blob starts with version numbers, followed by
3472            start address and length. We are setting complete length.
3473            length = end_address_of_bss - start_address_of_text.
3474            Remainder is the blob to be loaded contiguously
3475            from start address. */
3476
3477         info.fw_base = be32_to_cpu(fw_data[1]);
3478         info.fw_len = tp->fw->size - 12;
3479         info.fw_data = &fw_data[3];
3480
3481         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3482                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3483                                     &info);
3484         if (err)
3485                 return err;
3486
3487         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3488                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3489                                     &info);
3490         if (err)
3491                 return err;
3492
3493         /* Now startup only the RX cpu. */
3494         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3495         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3496
3497         for (i = 0; i < 5; i++) {
3498                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3499                         break;
3500                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3501                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3502                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3503                 udelay(1000);
3504         }
3505         if (i >= 5) {
3506                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3507                            "should be %08x\n", __func__,
3508                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3509                 return -ENODEV;
3510         }
3511         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3512         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3513
3514         return 0;
3515 }
3516
3517 /* tp->lock is held. */
3518 static int tg3_load_tso_firmware(struct tg3 *tp)
3519 {
3520         struct fw_info info;
3521         const __be32 *fw_data;
3522         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3523         int err, i;
3524
3525         if (tg3_flag(tp, HW_TSO_1) ||
3526             tg3_flag(tp, HW_TSO_2) ||
3527             tg3_flag(tp, HW_TSO_3))
3528                 return 0;
3529
3530         fw_data = (void *)tp->fw->data;
3531
3532         /* Firmware blob starts with version numbers, followed by
3533            start address and length. We are setting complete length.
3534            length = end_address_of_bss - start_address_of_text.
3535            Remainder is the blob to be loaded contiguously
3536            from start address. */
3537
3538         info.fw_base = be32_to_cpu(fw_data[1]);
3539         cpu_scratch_size = tp->fw_len;
3540         info.fw_len = tp->fw->size - 12;
3541         info.fw_data = &fw_data[3];
3542
3543         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3544                 cpu_base = RX_CPU_BASE;
3545                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3546         } else {
3547                 cpu_base = TX_CPU_BASE;
3548                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3549                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3550         }
3551
3552         err = tg3_load_firmware_cpu(tp, cpu_base,
3553                                     cpu_scratch_base, cpu_scratch_size,
3554                                     &info);
3555         if (err)
3556                 return err;
3557
3558         /* Now startup the cpu. */
3559         tw32(cpu_base + CPU_STATE, 0xffffffff);
3560         tw32_f(cpu_base + CPU_PC, info.fw_base);
3561
3562         for (i = 0; i < 5; i++) {
3563                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3564                         break;
3565                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3566                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3567                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3568                 udelay(1000);
3569         }
3570         if (i >= 5) {
3571                 netdev_err(tp->dev,
3572                            "%s fails to set CPU PC, is %08x should be %08x\n",
3573                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3574                 return -ENODEV;
3575         }
3576         tw32(cpu_base + CPU_STATE, 0xffffffff);
3577         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3578         return 0;
3579 }
3580
3581
3582 /* tp->lock is held. */
3583 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3584 {
3585         u32 addr_high, addr_low;
3586         int i;
3587
3588         addr_high = ((tp->dev->dev_addr[0] << 8) |
3589                      tp->dev->dev_addr[1]);
3590         addr_low = ((tp->dev->dev_addr[2] << 24) |
3591                     (tp->dev->dev_addr[3] << 16) |
3592                     (tp->dev->dev_addr[4] <<  8) |
3593                     (tp->dev->dev_addr[5] <<  0));
3594         for (i = 0; i < 4; i++) {
3595                 if (i == 1 && skip_mac_1)
3596                         continue;
3597                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3598                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3599         }
3600
3601         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3602             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3603                 for (i = 0; i < 12; i++) {
3604                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3605                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3606                 }
3607         }
3608
3609         addr_high = (tp->dev->dev_addr[0] +
3610                      tp->dev->dev_addr[1] +
3611                      tp->dev->dev_addr[2] +
3612                      tp->dev->dev_addr[3] +
3613                      tp->dev->dev_addr[4] +
3614                      tp->dev->dev_addr[5]) &
3615                 TX_BACKOFF_SEED_MASK;
3616         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3617 }
3618
3619 static void tg3_enable_register_access(struct tg3 *tp)
3620 {
3621         /*
3622          * Make sure register accesses (indirect or otherwise) will function
3623          * correctly.
3624          */
3625         pci_write_config_dword(tp->pdev,
3626                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3627 }
3628
3629 static int tg3_power_up(struct tg3 *tp)
3630 {
3631         int err;
3632
3633         tg3_enable_register_access(tp);
3634
3635         err = pci_set_power_state(tp->pdev, PCI_D0);
3636         if (!err) {
3637                 /* Switch out of Vaux if it is a NIC */
3638                 tg3_pwrsrc_switch_to_vmain(tp);
3639         } else {
3640                 netdev_err(tp->dev, "Transition to D0 failed\n");
3641         }
3642
3643         return err;
3644 }
3645
3646 static int tg3_setup_phy(struct tg3 *, int);
3647
3648 static int tg3_power_down_prepare(struct tg3 *tp)
3649 {
3650         u32 misc_host_ctrl;
3651         bool device_should_wake, do_low_power;
3652
3653         tg3_enable_register_access(tp);
3654
3655         /* Restore the CLKREQ setting. */
3656         if (tg3_flag(tp, CLKREQ_BUG)) {
3657                 u16 lnkctl;
3658
3659                 pci_read_config_word(tp->pdev,
3660                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3661                                      &lnkctl);
3662                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3663                 pci_write_config_word(tp->pdev,
3664                                       pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3665                                       lnkctl);
3666         }
3667
3668         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3669         tw32(TG3PCI_MISC_HOST_CTRL,
3670              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3671
3672         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3673                              tg3_flag(tp, WOL_ENABLE);
3674
3675         if (tg3_flag(tp, USE_PHYLIB)) {
3676                 do_low_power = false;
3677                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3678                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3679                         struct phy_device *phydev;
3680                         u32 phyid, advertising;
3681
3682                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3683
3684                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3685
3686                         tp->link_config.speed = phydev->speed;
3687                         tp->link_config.duplex = phydev->duplex;
3688                         tp->link_config.autoneg = phydev->autoneg;
3689                         tp->link_config.advertising = phydev->advertising;
3690
3691                         advertising = ADVERTISED_TP |
3692                                       ADVERTISED_Pause |
3693                                       ADVERTISED_Autoneg |
3694                                       ADVERTISED_10baseT_Half;
3695
3696                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3697                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3698                                         advertising |=
3699                                                 ADVERTISED_100baseT_Half |
3700                                                 ADVERTISED_100baseT_Full |
3701                                                 ADVERTISED_10baseT_Full;
3702                                 else
3703                                         advertising |= ADVERTISED_10baseT_Full;
3704                         }
3705
3706                         phydev->advertising = advertising;
3707
3708                         phy_start_aneg(phydev);
3709
3710                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3711                         if (phyid != PHY_ID_BCMAC131) {
3712                                 phyid &= PHY_BCM_OUI_MASK;
3713                                 if (phyid == PHY_BCM_OUI_1 ||
3714                                     phyid == PHY_BCM_OUI_2 ||
3715                                     phyid == PHY_BCM_OUI_3)
3716                                         do_low_power = true;
3717                         }
3718                 }
3719         } else {
3720                 do_low_power = true;
3721
3722                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3723                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3724
3725                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3726                         tg3_setup_phy(tp, 0);
3727         }
3728
3729         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3730                 u32 val;
3731
3732                 val = tr32(GRC_VCPU_EXT_CTRL);
3733                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3734         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3735                 int i;
3736                 u32 val;
3737
3738                 for (i = 0; i < 200; i++) {
3739                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3740                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3741                                 break;
3742                         msleep(1);
3743                 }
3744         }
3745         if (tg3_flag(tp, WOL_CAP))
3746                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3747                                                      WOL_DRV_STATE_SHUTDOWN |
3748                                                      WOL_DRV_WOL |
3749                                                      WOL_SET_MAGIC_PKT);
3750
3751         if (device_should_wake) {
3752                 u32 mac_mode;
3753
3754                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3755                         if (do_low_power &&
3756                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3757                                 tg3_phy_auxctl_write(tp,
3758                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3759                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3760                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3761                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3762                                 udelay(40);
3763                         }
3764
3765                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3766                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3767                         else
3768                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3769
3770                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3771                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3772                             ASIC_REV_5700) {
3773                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3774                                              SPEED_100 : SPEED_10;
3775                                 if (tg3_5700_link_polarity(tp, speed))
3776                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3777                                 else
3778                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3779                         }
3780                 } else {
3781                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3782                 }
3783
3784                 if (!tg3_flag(tp, 5750_PLUS))
3785                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3786
3787                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3788                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3789                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3790                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3791
3792                 if (tg3_flag(tp, ENABLE_APE))
3793                         mac_mode |= MAC_MODE_APE_TX_EN |
3794                                     MAC_MODE_APE_RX_EN |
3795                                     MAC_MODE_TDE_ENABLE;
3796
3797                 tw32_f(MAC_MODE, mac_mode);
3798                 udelay(100);
3799
3800                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3801                 udelay(10);
3802         }
3803
3804         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3805             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3806              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3807                 u32 base_val;
3808
3809                 base_val = tp->pci_clock_ctrl;
3810                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3811                              CLOCK_CTRL_TXCLK_DISABLE);
3812
3813                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3814                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3815         } else if (tg3_flag(tp, 5780_CLASS) ||
3816                    tg3_flag(tp, CPMU_PRESENT) ||
3817                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3818                 /* do nothing */
3819         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3820                 u32 newbits1, newbits2;
3821
3822                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3823                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3824                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3825                                     CLOCK_CTRL_TXCLK_DISABLE |
3826                                     CLOCK_CTRL_ALTCLK);
3827                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3828                 } else if (tg3_flag(tp, 5705_PLUS)) {
3829                         newbits1 = CLOCK_CTRL_625_CORE;
3830                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3831                 } else {
3832                         newbits1 = CLOCK_CTRL_ALTCLK;
3833                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3834                 }
3835
3836                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3837                             40);
3838
3839                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3840                             40);
3841
3842                 if (!tg3_flag(tp, 5705_PLUS)) {
3843                         u32 newbits3;
3844
3845                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3846                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3847                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3848                                             CLOCK_CTRL_TXCLK_DISABLE |
3849                                             CLOCK_CTRL_44MHZ_CORE);
3850                         } else {
3851                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3852                         }
3853
3854                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3855                                     tp->pci_clock_ctrl | newbits3, 40);
3856                 }
3857         }
3858
3859         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3860                 tg3_power_down_phy(tp, do_low_power);
3861
3862         tg3_frob_aux_power(tp, true);
3863
3864         /* Workaround for unstable PLL clock */
3865         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3866             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3867                 u32 val = tr32(0x7d00);
3868
3869                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3870                 tw32(0x7d00, val);
3871                 if (!tg3_flag(tp, ENABLE_ASF)) {
3872                         int err;
3873
3874                         err = tg3_nvram_lock(tp);
3875                         tg3_halt_cpu(tp, RX_CPU_BASE);
3876                         if (!err)
3877                                 tg3_nvram_unlock(tp);
3878                 }
3879         }
3880
3881         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3882
3883         return 0;
3884 }
3885
3886 static void tg3_power_down(struct tg3 *tp)
3887 {
3888         tg3_power_down_prepare(tp);
3889
3890         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3891         pci_set_power_state(tp->pdev, PCI_D3hot);
3892 }
3893
3894 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3895 {
3896         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3897         case MII_TG3_AUX_STAT_10HALF:
3898                 *speed = SPEED_10;
3899                 *duplex = DUPLEX_HALF;
3900                 break;
3901
3902         case MII_TG3_AUX_STAT_10FULL:
3903                 *speed = SPEED_10;
3904                 *duplex = DUPLEX_FULL;
3905                 break;
3906
3907         case MII_TG3_AUX_STAT_100HALF:
3908                 *speed = SPEED_100;
3909                 *duplex = DUPLEX_HALF;
3910                 break;
3911
3912         case MII_TG3_AUX_STAT_100FULL:
3913                 *speed = SPEED_100;
3914                 *duplex = DUPLEX_FULL;
3915                 break;
3916
3917         case MII_TG3_AUX_STAT_1000HALF:
3918                 *speed = SPEED_1000;
3919                 *duplex = DUPLEX_HALF;
3920                 break;
3921
3922         case MII_TG3_AUX_STAT_1000FULL:
3923                 *speed = SPEED_1000;
3924                 *duplex = DUPLEX_FULL;
3925                 break;
3926
3927         default:
3928                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3929                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3930                                  SPEED_10;
3931                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3932                                   DUPLEX_HALF;
3933                         break;
3934                 }
3935                 *speed = SPEED_UNKNOWN;
3936                 *duplex = DUPLEX_UNKNOWN;
3937                 break;
3938         }
3939 }
3940
3941 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3942 {
3943         int err = 0;
3944         u32 val, new_adv;
3945
3946         new_adv = ADVERTISE_CSMA;
3947         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3948         new_adv |= mii_advertise_flowctrl(flowctrl);
3949
3950         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3951         if (err)
3952                 goto done;
3953
3954         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3955                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3956
3957                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3958                     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3959                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3960
3961                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3962                 if (err)
3963                         goto done;
3964         }
3965
3966         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3967                 goto done;
3968
3969         tw32(TG3_CPMU_EEE_MODE,
3970              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3971
3972         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3973         if (!err) {
3974                 u32 err2;
3975
3976                 val = 0;
3977                 /* Advertise 100-BaseTX EEE ability */
3978                 if (advertise & ADVERTISED_100baseT_Full)
3979                         val |= MDIO_AN_EEE_ADV_100TX;
3980                 /* Advertise 1000-BaseT EEE ability */
3981                 if (advertise & ADVERTISED_1000baseT_Full)
3982                         val |= MDIO_AN_EEE_ADV_1000T;
3983                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3984                 if (err)
3985                         val = 0;
3986
3987                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3988                 case ASIC_REV_5717:
3989                 case ASIC_REV_57765:
3990                 case ASIC_REV_57766:
3991                 case ASIC_REV_5719:
3992                         /* If we advertised any eee advertisements above... */
3993                         if (val)
3994                                 val = MII_TG3_DSP_TAP26_ALNOKO |
3995                                       MII_TG3_DSP_TAP26_RMRXSTO |
3996                                       MII_TG3_DSP_TAP26_OPCSINPT;
3997                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3998                         /* Fall through */
3999                 case ASIC_REV_5720:
4000                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4001                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4002                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4003                 }
4004
4005                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
4006                 if (!err)
4007                         err = err2;
4008         }
4009
4010 done:
4011         return err;
4012 }
4013
4014 static void tg3_phy_copper_begin(struct tg3 *tp)
4015 {
4016         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4017             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4018                 u32 adv, fc;
4019
4020                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4021                         adv = ADVERTISED_10baseT_Half |
4022                               ADVERTISED_10baseT_Full;
4023                         if (tg3_flag(tp, WOL_SPEED_100MB))
4024                                 adv |= ADVERTISED_100baseT_Half |
4025                                        ADVERTISED_100baseT_Full;
4026
4027                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4028                 } else {
4029                         adv = tp->link_config.advertising;
4030                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4031                                 adv &= ~(ADVERTISED_1000baseT_Half |
4032                                          ADVERTISED_1000baseT_Full);
4033
4034                         fc = tp->link_config.flowctrl;
4035                 }
4036
4037                 tg3_phy_autoneg_cfg(tp, adv, fc);
4038
4039                 tg3_writephy(tp, MII_BMCR,
4040                              BMCR_ANENABLE | BMCR_ANRESTART);
4041         } else {
4042                 int i;
4043                 u32 bmcr, orig_bmcr;
4044
4045                 tp->link_config.active_speed = tp->link_config.speed;
4046                 tp->link_config.active_duplex = tp->link_config.duplex;
4047
4048                 bmcr = 0;
4049                 switch (tp->link_config.speed) {
4050                 default:
4051                 case SPEED_10:
4052                         break;
4053
4054                 case SPEED_100:
4055                         bmcr |= BMCR_SPEED100;
4056                         break;
4057
4058                 case SPEED_1000:
4059                         bmcr |= BMCR_SPEED1000;
4060                         break;
4061                 }
4062
4063                 if (tp->link_config.duplex == DUPLEX_FULL)
4064                         bmcr |= BMCR_FULLDPLX;
4065
4066                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4067                     (bmcr != orig_bmcr)) {
4068                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4069                         for (i = 0; i < 1500; i++) {
4070                                 u32 tmp;
4071
4072                                 udelay(10);
4073                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4074                                     tg3_readphy(tp, MII_BMSR, &tmp))
4075                                         continue;
4076                                 if (!(tmp & BMSR_LSTATUS)) {
4077                                         udelay(40);
4078                                         break;
4079                                 }
4080                         }
4081                         tg3_writephy(tp, MII_BMCR, bmcr);
4082                         udelay(40);
4083                 }
4084         }
4085 }
4086
4087 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4088 {
4089         int err;
4090
4091         /* Turn off tap power management. */
4092         /* Set Extended packet length bit */
4093         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4094
4095         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4096         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4097         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4098         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4099         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4100
4101         udelay(40);
4102
4103         return err;
4104 }
4105
4106 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4107 {
4108         u32 advmsk, tgtadv, advertising;
4109
4110         advertising = tp->link_config.advertising;
4111         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4112
4113         advmsk = ADVERTISE_ALL;
4114         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4115                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4116                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4117         }
4118
4119         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4120                 return false;
4121
4122         if ((*lcladv & advmsk) != tgtadv)
4123                 return false;
4124
4125         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4126                 u32 tg3_ctrl;
4127
4128                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4129
4130                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4131                         return false;
4132
4133                 if (tgtadv &&
4134                     (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4135                      tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4136                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4137                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4138                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4139                 } else {
4140                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4141                 }
4142
4143                 if (tg3_ctrl != tgtadv)
4144                         return false;
4145         }
4146
4147         return true;
4148 }
4149
4150 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4151 {
4152         u32 lpeth = 0;
4153
4154         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4155                 u32 val;
4156
4157                 if (tg3_readphy(tp, MII_STAT1000, &val))
4158                         return false;
4159
4160                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4161         }
4162
4163         if (tg3_readphy(tp, MII_LPA, rmtadv))
4164                 return false;
4165
4166         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4167         tp->link_config.rmt_adv = lpeth;
4168
4169         return true;
4170 }
4171
4172 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4173 {
4174         int current_link_up;
4175         u32 bmsr, val;
4176         u32 lcl_adv, rmt_adv;
4177         u16 current_speed;
4178         u8 current_duplex;
4179         int i, err;
4180
4181         tw32(MAC_EVENT, 0);
4182
4183         tw32_f(MAC_STATUS,
4184              (MAC_STATUS_SYNC_CHANGED |
4185               MAC_STATUS_CFG_CHANGED |
4186               MAC_STATUS_MI_COMPLETION |
4187               MAC_STATUS_LNKSTATE_CHANGED));
4188         udelay(40);
4189
4190         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4191                 tw32_f(MAC_MI_MODE,
4192                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4193                 udelay(80);
4194         }
4195
4196         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4197
4198         /* Some third-party PHYs need to be reset on link going
4199          * down.
4200          */
4201         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4202              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4203              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4204             netif_carrier_ok(tp->dev)) {
4205                 tg3_readphy(tp, MII_BMSR, &bmsr);
4206                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4207                     !(bmsr & BMSR_LSTATUS))
4208                         force_reset = 1;
4209         }
4210         if (force_reset)
4211                 tg3_phy_reset(tp);
4212
4213         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4214                 tg3_readphy(tp, MII_BMSR, &bmsr);
4215                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4216                     !tg3_flag(tp, INIT_COMPLETE))
4217                         bmsr = 0;
4218
4219                 if (!(bmsr & BMSR_LSTATUS)) {
4220                         err = tg3_init_5401phy_dsp(tp);
4221                         if (err)
4222                                 return err;
4223
4224                         tg3_readphy(tp, MII_BMSR, &bmsr);
4225                         for (i = 0; i < 1000; i++) {
4226                                 udelay(10);
4227                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4228                                     (bmsr & BMSR_LSTATUS)) {
4229                                         udelay(40);
4230                                         break;
4231                                 }
4232                         }
4233
4234                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4235                             TG3_PHY_REV_BCM5401_B0 &&
4236                             !(bmsr & BMSR_LSTATUS) &&
4237                             tp->link_config.active_speed == SPEED_1000) {
4238                                 err = tg3_phy_reset(tp);
4239                                 if (!err)
4240                                         err = tg3_init_5401phy_dsp(tp);
4241                                 if (err)
4242                                         return err;
4243                         }
4244                 }
4245         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4246                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4247                 /* 5701 {A0,B0} CRC bug workaround */
4248                 tg3_writephy(tp, 0x15, 0x0a75);
4249                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4250                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4251                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4252         }
4253
4254         /* Clear pending interrupts... */
4255         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4256         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4257
4258         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4259                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4260         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4261                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4262
4263         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4264             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4265                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4266                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4267                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4268                 else
4269                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4270         }
4271
4272         current_link_up = 0;
4273         current_speed = SPEED_UNKNOWN;
4274         current_duplex = DUPLEX_UNKNOWN;
4275         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4276         tp->link_config.rmt_adv = 0;
4277
4278         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4279                 err = tg3_phy_auxctl_read(tp,
4280                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4281                                           &val);
4282                 if (!err && !(val & (1 << 10))) {
4283                         tg3_phy_auxctl_write(tp,
4284                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4285                                              val | (1 << 10));
4286                         goto relink;
4287                 }
4288         }
4289
4290         bmsr = 0;
4291         for (i = 0; i < 100; i++) {
4292                 tg3_readphy(tp, MII_BMSR, &bmsr);
4293                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4294                     (bmsr & BMSR_LSTATUS))
4295                         break;
4296                 udelay(40);
4297         }
4298
4299         if (bmsr & BMSR_LSTATUS) {
4300                 u32 aux_stat, bmcr;
4301
4302                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4303                 for (i = 0; i < 2000; i++) {
4304                         udelay(10);
4305                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4306                             aux_stat)
4307                                 break;
4308                 }
4309
4310                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4311                                              &current_speed,
4312                                              &current_duplex);
4313
4314                 bmcr = 0;
4315                 for (i = 0; i < 200; i++) {
4316                         tg3_readphy(tp, MII_BMCR, &bmcr);
4317                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4318                                 continue;
4319                         if (bmcr && bmcr != 0x7fff)
4320                                 break;
4321                         udelay(10);
4322                 }
4323
4324                 lcl_adv = 0;
4325                 rmt_adv = 0;
4326
4327                 tp->link_config.active_speed = current_speed;
4328                 tp->link_config.active_duplex = current_duplex;
4329
4330                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4331                         if ((bmcr & BMCR_ANENABLE) &&
4332                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4333                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4334                                 current_link_up = 1;
4335                 } else {
4336                         if (!(bmcr & BMCR_ANENABLE) &&
4337                             tp->link_config.speed == current_speed &&
4338                             tp->link_config.duplex == current_duplex &&
4339                             tp->link_config.flowctrl ==
4340                             tp->link_config.active_flowctrl) {
4341                                 current_link_up = 1;
4342                         }
4343                 }
4344
4345                 if (current_link_up == 1 &&
4346                     tp->link_config.active_duplex == DUPLEX_FULL) {
4347                         u32 reg, bit;
4348
4349                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4350                                 reg = MII_TG3_FET_GEN_STAT;
4351                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4352                         } else {
4353                                 reg = MII_TG3_EXT_STAT;
4354                                 bit = MII_TG3_EXT_STAT_MDIX;
4355                         }
4356
4357                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4358                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4359
4360                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4361                 }
4362         }
4363
4364 relink:
4365         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4366                 tg3_phy_copper_begin(tp);
4367
4368                 tg3_readphy(tp, MII_BMSR, &bmsr);
4369                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4370                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4371                         current_link_up = 1;
4372         }
4373
4374         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4375         if (current_link_up == 1) {
4376                 if (tp->link_config.active_speed == SPEED_100 ||
4377                     tp->link_config.active_speed == SPEED_10)
4378                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4379                 else
4380                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4381         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4382                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4383         else
4384                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4385
4386         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4387         if (tp->link_config.active_duplex == DUPLEX_HALF)
4388                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4389
4390         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4391                 if (current_link_up == 1 &&
4392                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4393                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4394                 else
4395                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4396         }
4397
4398         /* ??? Without this setting Netgear GA302T PHY does not
4399          * ??? send/receive packets...
4400          */
4401         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4402             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4403                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4404                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4405                 udelay(80);
4406         }
4407
4408         tw32_f(MAC_MODE, tp->mac_mode);
4409         udelay(40);
4410
4411         tg3_phy_eee_adjust(tp, current_link_up);
4412
4413         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4414                 /* Polled via timer. */
4415                 tw32_f(MAC_EVENT, 0);
4416         } else {
4417                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4418         }
4419         udelay(40);
4420
4421         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4422             current_link_up == 1 &&
4423             tp->link_config.active_speed == SPEED_1000 &&
4424             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4425                 udelay(120);
4426                 tw32_f(MAC_STATUS,
4427                      (MAC_STATUS_SYNC_CHANGED |
4428                       MAC_STATUS_CFG_CHANGED));
4429                 udelay(40);
4430                 tg3_write_mem(tp,
4431                               NIC_SRAM_FIRMWARE_MBOX,
4432                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4433         }
4434
4435         /* Prevent send BD corruption. */
4436         if (tg3_flag(tp, CLKREQ_BUG)) {
4437                 u16 oldlnkctl, newlnkctl;
4438
4439                 pci_read_config_word(tp->pdev,
4440                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4441                                      &oldlnkctl);
4442                 if (tp->link_config.active_speed == SPEED_100 ||
4443                     tp->link_config.active_speed == SPEED_10)
4444                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4445                 else
4446                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4447                 if (newlnkctl != oldlnkctl)
4448                         pci_write_config_word(tp->pdev,
4449                                               pci_pcie_cap(tp->pdev) +
4450                                               PCI_EXP_LNKCTL, newlnkctl);
4451         }
4452
4453         if (current_link_up != netif_carrier_ok(tp->dev)) {
4454                 if (current_link_up)
4455                         netif_carrier_on(tp->dev);
4456                 else
4457                         netif_carrier_off(tp->dev);
4458                 tg3_link_report(tp);
4459         }
4460
4461         return 0;
4462 }
4463
4464 struct tg3_fiber_aneginfo {
4465         int state;
4466 #define ANEG_STATE_UNKNOWN              0
4467 #define ANEG_STATE_AN_ENABLE            1
4468 #define ANEG_STATE_RESTART_INIT         2
4469 #define ANEG_STATE_RESTART              3
4470 #define ANEG_STATE_DISABLE_LINK_OK      4
4471 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4472 #define ANEG_STATE_ABILITY_DETECT       6
4473 #define ANEG_STATE_ACK_DETECT_INIT      7
4474 #define ANEG_STATE_ACK_DETECT           8
4475 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4476 #define ANEG_STATE_COMPLETE_ACK         10
4477 #define ANEG_STATE_IDLE_DETECT_INIT     11
4478 #define ANEG_STATE_IDLE_DETECT          12
4479 #define ANEG_STATE_LINK_OK              13
4480 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4481 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4482
4483         u32 flags;
4484 #define MR_AN_ENABLE            0x00000001
4485 #define MR_RESTART_AN           0x00000002
4486 #define MR_AN_COMPLETE          0x00000004
4487 #define MR_PAGE_RX              0x00000008
4488 #define MR_NP_LOADED            0x00000010
4489 #define MR_TOGGLE_TX            0x00000020
4490 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4491 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4492 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4493 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4494 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4495 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4496 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4497 #define MR_TOGGLE_RX            0x00002000
4498 #define MR_NP_RX                0x00004000
4499
4500 #define MR_LINK_OK              0x80000000
4501
4502         unsigned long link_time, cur_time;
4503
4504         u32 ability_match_cfg;
4505         int ability_match_count;
4506
4507         char ability_match, idle_match, ack_match;
4508
4509         u32 txconfig, rxconfig;
4510 #define ANEG_CFG_NP             0x00000080
4511 #define ANEG_CFG_ACK            0x00000040
4512 #define ANEG_CFG_RF2            0x00000020
4513 #define ANEG_CFG_RF1            0x00000010
4514 #define ANEG_CFG_PS2            0x00000001
4515 #define ANEG_CFG_PS1            0x00008000
4516 #define ANEG_CFG_HD             0x00004000
4517 #define ANEG_CFG_FD             0x00002000
4518 #define ANEG_CFG_INVAL          0x00001f06
4519
4520 };
4521 #define ANEG_OK         0
4522 #define ANEG_DONE       1
4523 #define ANEG_TIMER_ENAB 2
4524 #define ANEG_FAILED     -1
4525
4526 #define ANEG_STATE_SETTLE_TIME  10000
4527
4528 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4529                                    struct tg3_fiber_aneginfo *ap)
4530 {
4531         u16 flowctrl;
4532         unsigned long delta;
4533         u32 rx_cfg_reg;
4534         int ret;
4535
4536         if (ap->state == ANEG_STATE_UNKNOWN) {
4537                 ap->rxconfig = 0;
4538                 ap->link_time = 0;
4539                 ap->cur_time = 0;
4540                 ap->ability_match_cfg = 0;
4541                 ap->ability_match_count = 0;
4542                 ap->ability_match = 0;
4543                 ap->idle_match = 0;
4544                 ap->ack_match = 0;
4545         }
4546         ap->cur_time++;
4547
4548         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4549                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4550
4551                 if (rx_cfg_reg != ap->ability_match_cfg) {
4552                         ap->ability_match_cfg = rx_cfg_reg;
4553                         ap->ability_match = 0;
4554                         ap->ability_match_count = 0;
4555                 } else {
4556                         if (++ap->ability_match_count > 1) {
4557                                 ap->ability_match = 1;
4558                                 ap->ability_match_cfg = rx_cfg_reg;
4559                         }
4560                 }
4561                 if (rx_cfg_reg & ANEG_CFG_ACK)
4562                         ap->ack_match = 1;
4563                 else
4564                         ap->ack_match = 0;
4565
4566                 ap->idle_match = 0;
4567         } else {
4568                 ap->idle_match = 1;
4569                 ap->ability_match_cfg = 0;
4570                 ap->ability_match_count = 0;
4571                 ap->ability_match = 0;
4572                 ap->ack_match = 0;
4573
4574                 rx_cfg_reg = 0;
4575         }
4576
4577         ap->rxconfig = rx_cfg_reg;
4578         ret = ANEG_OK;
4579
4580         switch (ap->state) {
4581         case ANEG_STATE_UNKNOWN:
4582                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4583                         ap->state = ANEG_STATE_AN_ENABLE;
4584
4585                 /* fallthru */
4586         case ANEG_STATE_AN_ENABLE:
4587                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4588                 if (ap->flags & MR_AN_ENABLE) {
4589                         ap->link_time = 0;
4590                         ap->cur_time = 0;
4591                         ap->ability_match_cfg = 0;
4592                         ap->ability_match_count = 0;
4593                         ap->ability_match = 0;
4594                         ap->idle_match = 0;
4595                         ap->ack_match = 0;
4596
4597                         ap->state = ANEG_STATE_RESTART_INIT;
4598                 } else {
4599                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4600                 }
4601                 break;
4602
4603         case ANEG_STATE_RESTART_INIT:
4604                 ap->link_time = ap->cur_time;
4605                 ap->flags &= ~(MR_NP_LOADED);
4606                 ap->txconfig = 0;
4607                 tw32(MAC_TX_AUTO_NEG, 0);
4608                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4609                 tw32_f(MAC_MODE, tp->mac_mode);
4610                 udelay(40);
4611
4612                 ret = ANEG_TIMER_ENAB;
4613                 ap->state = ANEG_STATE_RESTART;
4614
4615                 /* fallthru */
4616         case ANEG_STATE_RESTART:
4617                 delta = ap->cur_time - ap->link_time;
4618                 if (delta > ANEG_STATE_SETTLE_TIME)
4619                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4620                 else
4621                         ret = ANEG_TIMER_ENAB;
4622                 break;
4623
4624         case ANEG_STATE_DISABLE_LINK_OK:
4625                 ret = ANEG_DONE;
4626                 break;
4627
4628         case ANEG_STATE_ABILITY_DETECT_INIT:
4629                 ap->flags &= ~(MR_TOGGLE_TX);
4630                 ap->txconfig = ANEG_CFG_FD;
4631                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4632                 if (flowctrl & ADVERTISE_1000XPAUSE)
4633                         ap->txconfig |= ANEG_CFG_PS1;
4634                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4635                         ap->txconfig |= ANEG_CFG_PS2;
4636                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4637                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4638                 tw32_f(MAC_MODE, tp->mac_mode);
4639                 udelay(40);
4640
4641                 ap->state = ANEG_STATE_ABILITY_DETECT;
4642                 break;
4643
4644         case ANEG_STATE_ABILITY_DETECT:
4645                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4646                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4647                 break;
4648
4649         case ANEG_STATE_ACK_DETECT_INIT:
4650                 ap->txconfig |= ANEG_CFG_ACK;
4651                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4652                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4653                 tw32_f(MAC_MODE, tp->mac_mode);
4654                 udelay(40);
4655
4656                 ap->state = ANEG_STATE_ACK_DETECT;
4657
4658                 /* fallthru */
4659         case ANEG_STATE_ACK_DETECT:
4660                 if (ap->ack_match != 0) {
4661                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4662                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4663                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4664                         } else {
4665                                 ap->state = ANEG_STATE_AN_ENABLE;
4666                         }
4667                 } else if (ap->ability_match != 0 &&
4668                            ap->rxconfig == 0) {
4669                         ap->state = ANEG_STATE_AN_ENABLE;
4670                 }
4671                 break;
4672
4673         case ANEG_STATE_COMPLETE_ACK_INIT:
4674                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4675                         ret = ANEG_FAILED;
4676                         break;
4677                 }
4678                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4679                                MR_LP_ADV_HALF_DUPLEX |
4680                                MR_LP_ADV_SYM_PAUSE |
4681                                MR_LP_ADV_ASYM_PAUSE |
4682                                MR_LP_ADV_REMOTE_FAULT1 |
4683                                MR_LP_ADV_REMOTE_FAULT2 |
4684                                MR_LP_ADV_NEXT_PAGE |
4685                                MR_TOGGLE_RX |
4686                                MR_NP_RX);
4687                 if (ap->rxconfig & ANEG_CFG_FD)
4688                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4689                 if (ap->rxconfig & ANEG_CFG_HD)
4690                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4691                 if (ap->rxconfig & ANEG_CFG_PS1)
4692                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4693                 if (ap->rxconfig & ANEG_CFG_PS2)
4694                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4695                 if (ap->rxconfig & ANEG_CFG_RF1)
4696                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4697                 if (ap->rxconfig & ANEG_CFG_RF2)
4698                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4699                 if (ap->rxconfig & ANEG_CFG_NP)
4700                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4701
4702                 ap->link_time = ap->cur_time;
4703
4704                 ap->flags ^= (MR_TOGGLE_TX);
4705                 if (ap->rxconfig & 0x0008)
4706                         ap->flags |= MR_TOGGLE_RX;
4707                 if (ap->rxconfig & ANEG_CFG_NP)
4708                         ap->flags |= MR_NP_RX;
4709                 ap->flags |= MR_PAGE_RX;
4710
4711                 ap->state = ANEG_STATE_COMPLETE_ACK;
4712                 ret = ANEG_TIMER_ENAB;
4713                 break;
4714
4715         case ANEG_STATE_COMPLETE_ACK:
4716                 if (ap->ability_match != 0 &&
4717                     ap->rxconfig == 0) {
4718                         ap->state = ANEG_STATE_AN_ENABLE;
4719                         break;
4720                 }
4721                 delta = ap->cur_time - ap->link_time;
4722                 if (delta > ANEG_STATE_SETTLE_TIME) {
4723                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4724                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4725                         } else {
4726                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4727                                     !(ap->flags & MR_NP_RX)) {
4728                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4729                                 } else {
4730                                         ret = ANEG_FAILED;
4731                                 }
4732                         }
4733                 }
4734                 break;
4735
4736         case ANEG_STATE_IDLE_DETECT_INIT:
4737                 ap->link_time = ap->cur_time;
4738                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4739                 tw32_f(MAC_MODE, tp->mac_mode);
4740                 udelay(40);
4741
4742                 ap->state = ANEG_STATE_IDLE_DETECT;
4743                 ret = ANEG_TIMER_ENAB;
4744                 break;
4745
4746         case ANEG_STATE_IDLE_DETECT:
4747                 if (ap->ability_match != 0 &&
4748                     ap->rxconfig == 0) {
4749                         ap->state = ANEG_STATE_AN_ENABLE;
4750                         break;
4751                 }
4752                 delta = ap->cur_time - ap->link_time;
4753                 if (delta > ANEG_STATE_SETTLE_TIME) {
4754                         /* XXX another gem from the Broadcom driver :( */
4755                         ap->state = ANEG_STATE_LINK_OK;
4756                 }
4757                 break;
4758
4759         case ANEG_STATE_LINK_OK:
4760                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4761                 ret = ANEG_DONE;
4762                 break;
4763
4764         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4765                 /* ??? unimplemented */
4766                 break;
4767
4768         case ANEG_STATE_NEXT_PAGE_WAIT:
4769                 /* ??? unimplemented */
4770                 break;
4771
4772         default:
4773                 ret = ANEG_FAILED;
4774                 break;
4775         }
4776
4777         return ret;
4778 }
4779
4780 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4781 {
4782         int res = 0;
4783         struct tg3_fiber_aneginfo aninfo;
4784         int status = ANEG_FAILED;
4785         unsigned int tick;
4786         u32 tmp;
4787
4788         tw32_f(MAC_TX_AUTO_NEG, 0);
4789
4790         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4791         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4792         udelay(40);
4793
4794         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4795         udelay(40);
4796
4797         memset(&aninfo, 0, sizeof(aninfo));
4798         aninfo.flags |= MR_AN_ENABLE;
4799         aninfo.state = ANEG_STATE_UNKNOWN;
4800         aninfo.cur_time = 0;
4801         tick = 0;
4802         while (++tick < 195000) {
4803                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4804                 if (status == ANEG_DONE || status == ANEG_FAILED)
4805                         break;
4806
4807                 udelay(1);
4808         }
4809
4810         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4811         tw32_f(MAC_MODE, tp->mac_mode);
4812         udelay(40);
4813
4814         *txflags = aninfo.txconfig;
4815         *rxflags = aninfo.flags;
4816
4817         if (status == ANEG_DONE &&
4818             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4819                              MR_LP_ADV_FULL_DUPLEX)))
4820                 res = 1;
4821
4822         return res;
4823 }
4824
4825 static void tg3_init_bcm8002(struct tg3 *tp)
4826 {
4827         u32 mac_status = tr32(MAC_STATUS);
4828         int i;
4829
4830         /* Reset when initting first time or we have a link. */
4831         if (tg3_flag(tp, INIT_COMPLETE) &&
4832             !(mac_status & MAC_STATUS_PCS_SYNCED))
4833                 return;
4834
4835         /* Set PLL lock range. */
4836         tg3_writephy(tp, 0x16, 0x8007);
4837
4838         /* SW reset */
4839         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4840
4841         /* Wait for reset to complete. */
4842         /* XXX schedule_timeout() ... */
4843         for (i = 0; i < 500; i++)
4844                 udelay(10);
4845
4846         /* Config mode; select PMA/Ch 1 regs. */
4847         tg3_writephy(tp, 0x10, 0x8411);
4848
4849         /* Enable auto-lock and comdet, select txclk for tx. */
4850         tg3_writephy(tp, 0x11, 0x0a10);
4851
4852         tg3_writephy(tp, 0x18, 0x00a0);
4853         tg3_writephy(tp, 0x16, 0x41ff);
4854
4855         /* Assert and deassert POR. */
4856         tg3_writephy(tp, 0x13, 0x0400);
4857         udelay(40);
4858         tg3_writephy(tp, 0x13, 0x0000);
4859
4860         tg3_writephy(tp, 0x11, 0x0a50);
4861         udelay(40);
4862         tg3_writephy(tp, 0x11, 0x0a10);
4863
4864         /* Wait for signal to stabilize */
4865         /* XXX schedule_timeout() ... */
4866         for (i = 0; i < 15000; i++)
4867                 udelay(10);
4868
4869         /* Deselect the channel register so we can read the PHYID
4870          * later.
4871          */
4872         tg3_writephy(tp, 0x10, 0x8011);
4873 }
4874
4875 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4876 {
4877         u16 flowctrl;
4878         u32 sg_dig_ctrl, sg_dig_status;
4879         u32 serdes_cfg, expected_sg_dig_ctrl;
4880         int workaround, port_a;
4881         int current_link_up;
4882
4883         serdes_cfg = 0;
4884         expected_sg_dig_ctrl = 0;
4885         workaround = 0;
4886         port_a = 1;
4887         current_link_up = 0;
4888
4889         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4890             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4891                 workaround = 1;
4892                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4893                         port_a = 0;
4894
4895                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4896                 /* preserve bits 20-23 for voltage regulator */
4897                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4898         }
4899
4900         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4901
4902         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4903                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4904                         if (workaround) {
4905                                 u32 val = serdes_cfg;
4906
4907                                 if (port_a)
4908                                         val |= 0xc010000;
4909                                 else
4910                                         val |= 0x4010000;
4911                                 tw32_f(MAC_SERDES_CFG, val);
4912                         }
4913
4914                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4915                 }
4916                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4917                         tg3_setup_flow_control(tp, 0, 0);
4918                         current_link_up = 1;
4919                 }
4920                 goto out;
4921         }
4922
4923         /* Want auto-negotiation.  */
4924         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4925
4926         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4927         if (flowctrl & ADVERTISE_1000XPAUSE)
4928                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4929         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4930                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4931
4932         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4933                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4934                     tp->serdes_counter &&
4935                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4936                                     MAC_STATUS_RCVD_CFG)) ==
4937                      MAC_STATUS_PCS_SYNCED)) {
4938                         tp->serdes_counter--;
4939                         current_link_up = 1;
4940                         goto out;
4941                 }
4942 restart_autoneg:
4943                 if (workaround)
4944                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4945                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4946                 udelay(5);
4947                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4948
4949                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4950                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4951         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4952                                  MAC_STATUS_SIGNAL_DET)) {
4953                 sg_dig_status = tr32(SG_DIG_STATUS);
4954                 mac_status = tr32(MAC_STATUS);
4955
4956                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4957                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4958                         u32 local_adv = 0, remote_adv = 0;
4959
4960                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4961                                 local_adv |= ADVERTISE_1000XPAUSE;
4962                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4963                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4964
4965                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4966                                 remote_adv |= LPA_1000XPAUSE;
4967                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4968                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4969
4970                         tp->link_config.rmt_adv =
4971                                            mii_adv_to_ethtool_adv_x(remote_adv);
4972
4973                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4974                         current_link_up = 1;
4975                         tp->serdes_counter = 0;
4976                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4977                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4978                         if (tp->serdes_counter)
4979                                 tp->serdes_counter--;
4980                         else {
4981                                 if (workaround) {
4982                                         u32 val = serdes_cfg;
4983
4984                                         if (port_a)
4985                                                 val |= 0xc010000;
4986                                         else
4987                                                 val |= 0x4010000;
4988
4989                                         tw32_f(MAC_SERDES_CFG, val);
4990                                 }
4991
4992                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4993                                 udelay(40);
4994
4995                                 /* Link parallel detection - link is up */
4996                                 /* only if we have PCS_SYNC and not */
4997                                 /* receiving config code words */
4998                                 mac_status = tr32(MAC_STATUS);
4999                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5000                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5001                                         tg3_setup_flow_control(tp, 0, 0);
5002                                         current_link_up = 1;
5003                                         tp->phy_flags |=
5004                                                 TG3_PHYFLG_PARALLEL_DETECT;
5005                                         tp->serdes_counter =
5006                                                 SERDES_PARALLEL_DET_TIMEOUT;
5007                                 } else
5008                                         goto restart_autoneg;
5009                         }
5010                 }
5011         } else {
5012                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5013                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5014         }
5015
5016 out:
5017         return current_link_up;
5018 }
5019
5020 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5021 {
5022         int current_link_up = 0;
5023
5024         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5025                 goto out;
5026
5027         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5028                 u32 txflags, rxflags;
5029                 int i;
5030
5031                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5032                         u32 local_adv = 0, remote_adv = 0;
5033
5034                         if (txflags & ANEG_CFG_PS1)
5035                                 local_adv |= ADVERTISE_1000XPAUSE;
5036                         if (txflags & ANEG_CFG_PS2)
5037                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5038
5039                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5040                                 remote_adv |= LPA_1000XPAUSE;
5041                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5042                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5043
5044                         tp->link_config.rmt_adv =
5045                                            mii_adv_to_ethtool_adv_x(remote_adv);
5046
5047                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5048
5049                         current_link_up = 1;
5050                 }
5051                 for (i = 0; i < 30; i++) {
5052                         udelay(20);
5053                         tw32_f(MAC_STATUS,
5054                                (MAC_STATUS_SYNC_CHANGED |
5055                                 MAC_STATUS_CFG_CHANGED));
5056                         udelay(40);
5057                         if ((tr32(MAC_STATUS) &
5058                              (MAC_STATUS_SYNC_CHANGED |
5059                               MAC_STATUS_CFG_CHANGED)) == 0)
5060                                 break;
5061                 }
5062
5063                 mac_status = tr32(MAC_STATUS);
5064                 if (current_link_up == 0 &&
5065                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5066                     !(mac_status & MAC_STATUS_RCVD_CFG))
5067                         current_link_up = 1;
5068         } else {
5069                 tg3_setup_flow_control(tp, 0, 0);
5070
5071                 /* Forcing 1000FD link up. */
5072                 current_link_up = 1;
5073
5074                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5075                 udelay(40);
5076
5077                 tw32_f(MAC_MODE, tp->mac_mode);
5078                 udelay(40);
5079         }
5080
5081 out:
5082         return current_link_up;
5083 }
5084
5085 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5086 {
5087         u32 orig_pause_cfg;
5088         u16 orig_active_speed;
5089         u8 orig_active_duplex;
5090         u32 mac_status;
5091         int current_link_up;
5092         int i;
5093
5094         orig_pause_cfg = tp->link_config.active_flowctrl;
5095         orig_active_speed = tp->link_config.active_speed;
5096         orig_active_duplex = tp->link_config.active_duplex;
5097
5098         if (!tg3_flag(tp, HW_AUTONEG) &&
5099             netif_carrier_ok(tp->dev) &&
5100             tg3_flag(tp, INIT_COMPLETE)) {
5101                 mac_status = tr32(MAC_STATUS);
5102                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5103                                MAC_STATUS_SIGNAL_DET |
5104                                MAC_STATUS_CFG_CHANGED |
5105                                MAC_STATUS_RCVD_CFG);
5106                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5107                                    MAC_STATUS_SIGNAL_DET)) {
5108                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5109                                             MAC_STATUS_CFG_CHANGED));
5110                         return 0;
5111                 }
5112         }
5113
5114         tw32_f(MAC_TX_AUTO_NEG, 0);
5115
5116         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5117         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5118         tw32_f(MAC_MODE, tp->mac_mode);
5119         udelay(40);
5120
5121         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5122                 tg3_init_bcm8002(tp);
5123
5124         /* Enable link change event even when serdes polling.  */
5125         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5126         udelay(40);
5127
5128         current_link_up = 0;
5129         tp->link_config.rmt_adv = 0;
5130         mac_status = tr32(MAC_STATUS);
5131
5132         if (tg3_flag(tp, HW_AUTONEG))
5133                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5134         else
5135                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5136
5137         tp->napi[0].hw_status->status =
5138                 (SD_STATUS_UPDATED |
5139                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5140
5141         for (i = 0; i < 100; i++) {
5142                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5143                                     MAC_STATUS_CFG_CHANGED));
5144                 udelay(5);
5145                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5146                                          MAC_STATUS_CFG_CHANGED |
5147                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5148                         break;
5149         }
5150
5151         mac_status = tr32(MAC_STATUS);
5152         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5153                 current_link_up = 0;
5154                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5155                     tp->serdes_counter == 0) {
5156                         tw32_f(MAC_MODE, (tp->mac_mode |
5157                                           MAC_MODE_SEND_CONFIGS));
5158                         udelay(1);
5159                         tw32_f(MAC_MODE, tp->mac_mode);
5160                 }
5161         }
5162
5163         if (current_link_up == 1) {
5164                 tp->link_config.active_speed = SPEED_1000;
5165                 tp->link_config.active_duplex = DUPLEX_FULL;
5166                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5167                                     LED_CTRL_LNKLED_OVERRIDE |
5168                                     LED_CTRL_1000MBPS_ON));
5169         } else {
5170                 tp->link_config.active_speed = SPEED_UNKNOWN;
5171                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5172                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5173                                     LED_CTRL_LNKLED_OVERRIDE |
5174                                     LED_CTRL_TRAFFIC_OVERRIDE));
5175         }
5176
5177         if (current_link_up != netif_carrier_ok(tp->dev)) {
5178                 if (current_link_up)
5179                         netif_carrier_on(tp->dev);
5180                 else
5181                         netif_carrier_off(tp->dev);
5182                 tg3_link_report(tp);
5183         } else {
5184                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5185                 if (orig_pause_cfg != now_pause_cfg ||
5186                     orig_active_speed != tp->link_config.active_speed ||
5187                     orig_active_duplex != tp->link_config.active_duplex)
5188                         tg3_link_report(tp);
5189         }
5190
5191         return 0;
5192 }
5193
5194 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5195 {
5196         int current_link_up, err = 0;
5197         u32 bmsr, bmcr;
5198         u16 current_speed;
5199         u8 current_duplex;
5200         u32 local_adv, remote_adv;
5201
5202         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5203         tw32_f(MAC_MODE, tp->mac_mode);
5204         udelay(40);
5205
5206         tw32(MAC_EVENT, 0);
5207
5208         tw32_f(MAC_STATUS,
5209              (MAC_STATUS_SYNC_CHANGED |
5210               MAC_STATUS_CFG_CHANGED |
5211               MAC_STATUS_MI_COMPLETION |
5212               MAC_STATUS_LNKSTATE_CHANGED));
5213         udelay(40);
5214
5215         if (force_reset)
5216                 tg3_phy_reset(tp);
5217
5218         current_link_up = 0;
5219         current_speed = SPEED_UNKNOWN;
5220         current_duplex = DUPLEX_UNKNOWN;
5221         tp->link_config.rmt_adv = 0;
5222
5223         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5224         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5225         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5226                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5227                         bmsr |= BMSR_LSTATUS;
5228                 else
5229                         bmsr &= ~BMSR_LSTATUS;
5230         }
5231
5232         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5233
5234         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5235             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5236                 /* do nothing, just check for link up at the end */
5237         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5238                 u32 adv, newadv;
5239
5240                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5241                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5242                                  ADVERTISE_1000XPAUSE |
5243                                  ADVERTISE_1000XPSE_ASYM |
5244                                  ADVERTISE_SLCT);
5245
5246                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5247                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5248
5249                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5250                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5251                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5252                         tg3_writephy(tp, MII_BMCR, bmcr);
5253
5254                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5255                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5256                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5257
5258                         return err;
5259                 }
5260         } else {
5261                 u32 new_bmcr;
5262
5263                 bmcr &= ~BMCR_SPEED1000;
5264                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5265
5266                 if (tp->link_config.duplex == DUPLEX_FULL)
5267                         new_bmcr |= BMCR_FULLDPLX;
5268
5269                 if (new_bmcr != bmcr) {
5270                         /* BMCR_SPEED1000 is a reserved bit that needs
5271                          * to be set on write.
5272                          */
5273                         new_bmcr |= BMCR_SPEED1000;
5274
5275                         /* Force a linkdown */
5276                         if (netif_carrier_ok(tp->dev)) {
5277                                 u32 adv;
5278
5279                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5280                                 adv &= ~(ADVERTISE_1000XFULL |
5281                                          ADVERTISE_1000XHALF |
5282                                          ADVERTISE_SLCT);
5283                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5284                                 tg3_writephy(tp, MII_BMCR, bmcr |
5285                                                            BMCR_ANRESTART |
5286                                                            BMCR_ANENABLE);
5287                                 udelay(10);
5288                                 netif_carrier_off(tp->dev);
5289                         }
5290                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5291                         bmcr = new_bmcr;
5292                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5293                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5294                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5295                             ASIC_REV_5714) {
5296                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5297                                         bmsr |= BMSR_LSTATUS;
5298                                 else
5299                                         bmsr &= ~BMSR_LSTATUS;
5300                         }
5301                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5302                 }
5303         }
5304
5305         if (bmsr & BMSR_LSTATUS) {
5306                 current_speed = SPEED_1000;
5307                 current_link_up = 1;
5308                 if (bmcr & BMCR_FULLDPLX)
5309                         current_duplex = DUPLEX_FULL;
5310                 else
5311                         current_duplex = DUPLEX_HALF;
5312
5313                 local_adv = 0;
5314                 remote_adv = 0;
5315
5316                 if (bmcr & BMCR_ANENABLE) {
5317                         u32 common;
5318
5319                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5320                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5321                         common = local_adv & remote_adv;
5322                         if (common & (ADVERTISE_1000XHALF |
5323                                       ADVERTISE_1000XFULL)) {
5324                                 if (common & ADVERTISE_1000XFULL)
5325                                         current_duplex = DUPLEX_FULL;
5326                                 else
5327                                         current_duplex = DUPLEX_HALF;
5328
5329                                 tp->link_config.rmt_adv =
5330                                            mii_adv_to_ethtool_adv_x(remote_adv);
5331                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5332                                 /* Link is up via parallel detect */
5333                         } else {
5334                                 current_link_up = 0;
5335                         }
5336                 }
5337         }
5338
5339         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5340                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5341
5342         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5343         if (tp->link_config.active_duplex == DUPLEX_HALF)
5344                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5345
5346         tw32_f(MAC_MODE, tp->mac_mode);
5347         udelay(40);
5348
5349         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5350
5351         tp->link_config.active_speed = current_speed;
5352         tp->link_config.active_duplex = current_duplex;
5353
5354         if (current_link_up != netif_carrier_ok(tp->dev)) {
5355                 if (current_link_up)
5356                         netif_carrier_on(tp->dev);
5357                 else {
5358                         netif_carrier_off(tp->dev);
5359                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5360                 }
5361                 tg3_link_report(tp);
5362         }
5363         return err;
5364 }
5365
5366 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5367 {
5368         if (tp->serdes_counter) {
5369                 /* Give autoneg time to complete. */
5370                 tp->serdes_counter--;
5371                 return;
5372         }
5373
5374         if (!netif_carrier_ok(tp->dev) &&
5375             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5376                 u32 bmcr;
5377
5378                 tg3_readphy(tp, MII_BMCR, &bmcr);
5379                 if (bmcr & BMCR_ANENABLE) {
5380                         u32 phy1, phy2;
5381
5382                         /* Select shadow register 0x1f */
5383                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5384                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5385
5386                         /* Select expansion interrupt status register */
5387                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5388                                          MII_TG3_DSP_EXP1_INT_STAT);
5389                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5390                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5391
5392                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5393                                 /* We have signal detect and not receiving
5394                                  * config code words, link is up by parallel
5395                                  * detection.
5396                                  */
5397
5398                                 bmcr &= ~BMCR_ANENABLE;
5399                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5400                                 tg3_writephy(tp, MII_BMCR, bmcr);
5401                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5402                         }
5403                 }
5404         } else if (netif_carrier_ok(tp->dev) &&
5405                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5406                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5407                 u32 phy2;
5408
5409                 /* Select expansion interrupt status register */
5410                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5411                                  MII_TG3_DSP_EXP1_INT_STAT);
5412                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5413                 if (phy2 & 0x20) {
5414                         u32 bmcr;
5415
5416                         /* Config code words received, turn on autoneg. */
5417                         tg3_readphy(tp, MII_BMCR, &bmcr);
5418                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5419
5420                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5421
5422                 }
5423         }
5424 }
5425
5426 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5427 {
5428         u32 val;
5429         int err;
5430
5431         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5432                 err = tg3_setup_fiber_phy(tp, force_reset);
5433         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5434                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5435         else
5436                 err = tg3_setup_copper_phy(tp, force_reset);
5437
5438         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5439                 u32 scale;
5440
5441                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5442                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5443                         scale = 65;
5444                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5445                         scale = 6;
5446                 else
5447                         scale = 12;
5448
5449                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5450                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5451                 tw32(GRC_MISC_CFG, val);
5452         }
5453
5454         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5455               (6 << TX_LENGTHS_IPG_SHIFT);
5456         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5457                 val |= tr32(MAC_TX_LENGTHS) &
5458                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5459                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5460
5461         if (tp->link_config.active_speed == SPEED_1000 &&
5462             tp->link_config.active_duplex == DUPLEX_HALF)
5463                 tw32(MAC_TX_LENGTHS, val |
5464                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5465         else
5466                 tw32(MAC_TX_LENGTHS, val |
5467                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5468
5469         if (!tg3_flag(tp, 5705_PLUS)) {
5470                 if (netif_carrier_ok(tp->dev)) {
5471                         tw32(HOSTCC_STAT_COAL_TICKS,
5472                              tp->coal.stats_block_coalesce_usecs);
5473                 } else {
5474                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5475                 }
5476         }
5477
5478         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5479                 val = tr32(PCIE_PWR_MGMT_THRESH);
5480                 if (!netif_carrier_ok(tp->dev))
5481                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5482                               tp->pwrmgmt_thresh;
5483                 else
5484                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5485                 tw32(PCIE_PWR_MGMT_THRESH, val);
5486         }
5487
5488         return err;
5489 }
5490
5491 static inline int tg3_irq_sync(struct tg3 *tp)
5492 {
5493         return tp->irq_sync;
5494 }
5495
5496 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5497 {
5498         int i;
5499
5500         dst = (u32 *)((u8 *)dst + off);
5501         for (i = 0; i < len; i += sizeof(u32))
5502                 *dst++ = tr32(off + i);
5503 }
5504
5505 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5506 {
5507         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5508         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5509         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5510         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5511         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5512         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5513         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5514         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5515         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5516         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5517         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5518         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5519         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5520         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5521         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5522         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5523         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5524         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5525         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5526
5527         if (tg3_flag(tp, SUPPORT_MSIX))
5528                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5529
5530         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5531         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5532         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5533         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5534         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5535         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5536         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5537         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5538
5539         if (!tg3_flag(tp, 5705_PLUS)) {
5540                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5541                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5542                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5543         }
5544
5545         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5546         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5547         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5548         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5549         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5550
5551         if (tg3_flag(tp, NVRAM))
5552                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5553 }
5554
5555 static void tg3_dump_state(struct tg3 *tp)
5556 {
5557         int i;
5558         u32 *regs;
5559
5560         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5561         if (!regs) {
5562                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5563                 return;
5564         }
5565
5566         if (tg3_flag(tp, PCI_EXPRESS)) {
5567                 /* Read up to but not including private PCI registers */
5568                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5569                         regs[i / sizeof(u32)] = tr32(i);
5570         } else
5571                 tg3_dump_legacy_regs(tp, regs);
5572
5573         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5574                 if (!regs[i + 0] && !regs[i + 1] &&
5575                     !regs[i + 2] && !regs[i + 3])
5576                         continue;
5577
5578                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5579                            i * 4,
5580                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5581         }
5582
5583         kfree(regs);
5584
5585         for (i = 0; i < tp->irq_cnt; i++) {
5586                 struct tg3_napi *tnapi = &tp->napi[i];
5587
5588                 /* SW status block */
5589                 netdev_err(tp->dev,
5590                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5591                            i,
5592                            tnapi->hw_status->status,
5593                            tnapi->hw_status->status_tag,
5594                            tnapi->hw_status->rx_jumbo_consumer,
5595                            tnapi->hw_status->rx_consumer,
5596                            tnapi->hw_status->rx_mini_consumer,
5597                            tnapi->hw_status->idx[0].rx_producer,
5598                            tnapi->hw_status->idx[0].tx_consumer);
5599
5600                 netdev_err(tp->dev,
5601                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5602                            i,
5603                            tnapi->last_tag, tnapi->last_irq_tag,
5604                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5605                            tnapi->rx_rcb_ptr,
5606                            tnapi->prodring.rx_std_prod_idx,
5607                            tnapi->prodring.rx_std_cons_idx,
5608                            tnapi->prodring.rx_jmb_prod_idx,
5609                            tnapi->prodring.rx_jmb_cons_idx);
5610         }
5611 }
5612
5613 /* This is called whenever we suspect that the system chipset is re-
5614  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5615  * is bogus tx completions. We try to recover by setting the
5616  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5617  * in the workqueue.
5618  */
5619 static void tg3_tx_recover(struct tg3 *tp)
5620 {
5621         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5622                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5623
5624         netdev_warn(tp->dev,
5625                     "The system may be re-ordering memory-mapped I/O "
5626                     "cycles to the network device, attempting to recover. "
5627                     "Please report the problem to the driver maintainer "
5628                     "and include system chipset information.\n");
5629
5630         spin_lock(&tp->lock);
5631         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5632         spin_unlock(&tp->lock);
5633 }
5634
5635 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5636 {
5637         /* Tell compiler to fetch tx indices from memory. */
5638         barrier();
5639         return tnapi->tx_pending -
5640                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5641 }
5642
5643 /* Tigon3 never reports partial packet sends.  So we do not
5644  * need special logic to handle SKBs that have not had all
5645  * of their frags sent yet, like SunGEM does.
5646  */
5647 static void tg3_tx(struct tg3_napi *tnapi)
5648 {
5649         struct tg3 *tp = tnapi->tp;
5650         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5651         u32 sw_idx = tnapi->tx_cons;
5652         struct netdev_queue *txq;
5653         int index = tnapi - tp->napi;
5654         unsigned int pkts_compl = 0, bytes_compl = 0;
5655
5656         if (tg3_flag(tp, ENABLE_TSS))
5657                 index--;
5658
5659         txq = netdev_get_tx_queue(tp->dev, index);
5660
5661         while (sw_idx != hw_idx) {
5662                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5663                 struct sk_buff *skb = ri->skb;
5664                 int i, tx_bug = 0;
5665
5666                 if (unlikely(skb == NULL)) {
5667                         tg3_tx_recover(tp);
5668                         return;
5669                 }
5670
5671                 pci_unmap_single(tp->pdev,
5672                                  dma_unmap_addr(ri, mapping),
5673                                  skb_headlen(skb),
5674                                  PCI_DMA_TODEVICE);
5675
5676                 ri->skb = NULL;
5677
5678                 while (ri->fragmented) {
5679                         ri->fragmented = false;
5680                         sw_idx = NEXT_TX(sw_idx);
5681                         ri = &tnapi->tx_buffers[sw_idx];
5682                 }
5683
5684                 sw_idx = NEXT_TX(sw_idx);
5685
5686                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5687                         ri = &tnapi->tx_buffers[sw_idx];
5688                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5689                                 tx_bug = 1;
5690
5691                         pci_unmap_page(tp->pdev,
5692                                        dma_unmap_addr(ri, mapping),
5693                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5694                                        PCI_DMA_TODEVICE);
5695
5696                         while (ri->fragmented) {
5697                                 ri->fragmented = false;
5698                                 sw_idx = NEXT_TX(sw_idx);
5699                                 ri = &tnapi->tx_buffers[sw_idx];
5700                         }
5701
5702                         sw_idx = NEXT_TX(sw_idx);
5703                 }
5704
5705                 pkts_compl++;
5706                 bytes_compl += skb->len;
5707
5708                 dev_kfree_skb(skb);
5709
5710                 if (unlikely(tx_bug)) {
5711                         tg3_tx_recover(tp);
5712                         return;
5713                 }
5714         }
5715
5716         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
5717
5718         tnapi->tx_cons = sw_idx;
5719
5720         /* Need to make the tx_cons update visible to tg3_start_xmit()
5721          * before checking for netif_queue_stopped().  Without the
5722          * memory barrier, there is a small possibility that tg3_start_xmit()
5723          * will miss it and cause the queue to be stopped forever.
5724          */
5725         smp_mb();
5726
5727         if (unlikely(netif_tx_queue_stopped(txq) &&
5728                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5729                 __netif_tx_lock(txq, smp_processor_id());
5730                 if (netif_tx_queue_stopped(txq) &&
5731                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5732                         netif_tx_wake_queue(txq);
5733                 __netif_tx_unlock(txq);
5734         }
5735 }
5736
5737 static void tg3_frag_free(bool is_frag, void *data)
5738 {
5739         if (is_frag)
5740                 put_page(virt_to_head_page(data));
5741         else
5742                 kfree(data);
5743 }
5744
5745 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5746 {
5747         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
5748                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5749
5750         if (!ri->data)
5751                 return;
5752
5753         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5754                          map_sz, PCI_DMA_FROMDEVICE);
5755         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
5756         ri->data = NULL;
5757 }
5758
5759
5760 /* Returns size of skb allocated or < 0 on error.
5761  *
5762  * We only need to fill in the address because the other members
5763  * of the RX descriptor are invariant, see tg3_init_rings.
5764  *
5765  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5766  * posting buffers we only dirty the first cache line of the RX
5767  * descriptor (containing the address).  Whereas for the RX status
5768  * buffers the cpu only reads the last cacheline of the RX descriptor
5769  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5770  */
5771 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5772                              u32 opaque_key, u32 dest_idx_unmasked,
5773                              unsigned int *frag_size)
5774 {
5775         struct tg3_rx_buffer_desc *desc;
5776         struct ring_info *map;
5777         u8 *data;
5778         dma_addr_t mapping;
5779         int skb_size, data_size, dest_idx;
5780
5781         switch (opaque_key) {
5782         case RXD_OPAQUE_RING_STD:
5783                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5784                 desc = &tpr->rx_std[dest_idx];
5785                 map = &tpr->rx_std_buffers[dest_idx];
5786                 data_size = tp->rx_pkt_map_sz;
5787                 break;
5788
5789         case RXD_OPAQUE_RING_JUMBO:
5790                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5791                 desc = &tpr->rx_jmb[dest_idx].std;
5792                 map = &tpr->rx_jmb_buffers[dest_idx];
5793                 data_size = TG3_RX_JMB_MAP_SZ;
5794                 break;
5795
5796         default:
5797                 return -EINVAL;
5798         }
5799
5800         /* Do not overwrite any of the map or rp information
5801          * until we are sure we can commit to a new buffer.
5802          *
5803          * Callers depend upon this behavior and assume that
5804          * we leave everything unchanged if we fail.
5805          */
5806         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5807                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5808         if (skb_size <= PAGE_SIZE) {
5809                 data = netdev_alloc_frag(skb_size);
5810                 *frag_size = skb_size;
5811         } else {
5812                 data = kmalloc(skb_size, GFP_ATOMIC);
5813                 *frag_size = 0;
5814         }
5815         if (!data)
5816                 return -ENOMEM;
5817
5818         mapping = pci_map_single(tp->pdev,
5819                                  data + TG3_RX_OFFSET(tp),
5820                                  data_size,
5821                                  PCI_DMA_FROMDEVICE);
5822         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
5823                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
5824                 return -EIO;
5825         }
5826
5827         map->data = data;
5828         dma_unmap_addr_set(map, mapping, mapping);
5829
5830         desc->addr_hi = ((u64)mapping >> 32);
5831         desc->addr_lo = ((u64)mapping & 0xffffffff);
5832
5833         return data_size;
5834 }
5835
5836 /* We only need to move over in the address because the other
5837  * members of the RX descriptor are invariant.  See notes above
5838  * tg3_alloc_rx_data for full details.
5839  */
5840 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5841                            struct tg3_rx_prodring_set *dpr,
5842                            u32 opaque_key, int src_idx,
5843                            u32 dest_idx_unmasked)
5844 {
5845         struct tg3 *tp = tnapi->tp;
5846         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5847         struct ring_info *src_map, *dest_map;
5848         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5849         int dest_idx;
5850
5851         switch (opaque_key) {
5852         case RXD_OPAQUE_RING_STD:
5853                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5854                 dest_desc = &dpr->rx_std[dest_idx];
5855                 dest_map = &dpr->rx_std_buffers[dest_idx];
5856                 src_desc = &spr->rx_std[src_idx];
5857                 src_map = &spr->rx_std_buffers[src_idx];
5858                 break;
5859
5860         case RXD_OPAQUE_RING_JUMBO:
5861                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5862                 dest_desc = &dpr->rx_jmb[dest_idx].std;
5863                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5864                 src_desc = &spr->rx_jmb[src_idx].std;
5865                 src_map = &spr->rx_jmb_buffers[src_idx];
5866                 break;
5867
5868         default:
5869                 return;
5870         }
5871
5872         dest_map->data = src_map->data;
5873         dma_unmap_addr_set(dest_map, mapping,
5874                            dma_unmap_addr(src_map, mapping));
5875         dest_desc->addr_hi = src_desc->addr_hi;
5876         dest_desc->addr_lo = src_desc->addr_lo;
5877
5878         /* Ensure that the update to the skb happens after the physical
5879          * addresses have been transferred to the new BD location.
5880          */
5881         smp_wmb();
5882
5883         src_map->data = NULL;
5884 }
5885
5886 /* The RX ring scheme is composed of multiple rings which post fresh
5887  * buffers to the chip, and one special ring the chip uses to report
5888  * status back to the host.
5889  *
5890  * The special ring reports the status of received packets to the
5891  * host.  The chip does not write into the original descriptor the
5892  * RX buffer was obtained from.  The chip simply takes the original
5893  * descriptor as provided by the host, updates the status and length
5894  * field, then writes this into the next status ring entry.
5895  *
5896  * Each ring the host uses to post buffers to the chip is described
5897  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5898  * it is first placed into the on-chip ram.  When the packet's length
5899  * is known, it walks down the TG3_BDINFO entries to select the ring.
5900  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5901  * which is within the range of the new packet's length is chosen.
5902  *
5903  * The "separate ring for rx status" scheme may sound queer, but it makes
5904  * sense from a cache coherency perspective.  If only the host writes
5905  * to the buffer post rings, and only the chip writes to the rx status
5906  * rings, then cache lines never move beyond shared-modified state.
5907  * If both the host and chip were to write into the same ring, cache line
5908  * eviction could occur since both entities want it in an exclusive state.
5909  */
5910 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5911 {
5912         struct tg3 *tp = tnapi->tp;
5913         u32 work_mask, rx_std_posted = 0;
5914         u32 std_prod_idx, jmb_prod_idx;
5915         u32 sw_idx = tnapi->rx_rcb_ptr;
5916         u16 hw_idx;
5917         int received;
5918         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5919
5920         hw_idx = *(tnapi->rx_rcb_prod_idx);
5921         /*
5922          * We need to order the read of hw_idx and the read of
5923          * the opaque cookie.
5924          */
5925         rmb();
5926         work_mask = 0;
5927         received = 0;
5928         std_prod_idx = tpr->rx_std_prod_idx;
5929         jmb_prod_idx = tpr->rx_jmb_prod_idx;
5930         while (sw_idx != hw_idx && budget > 0) {
5931                 struct ring_info *ri;
5932                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5933                 unsigned int len;
5934                 struct sk_buff *skb;
5935                 dma_addr_t dma_addr;
5936                 u32 opaque_key, desc_idx, *post_ptr;
5937                 u8 *data;
5938
5939                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5940                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5941                 if (opaque_key == RXD_OPAQUE_RING_STD) {
5942                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5943                         dma_addr = dma_unmap_addr(ri, mapping);
5944                         data = ri->data;
5945                         post_ptr = &std_prod_idx;
5946                         rx_std_posted++;
5947                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5948                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5949                         dma_addr = dma_unmap_addr(ri, mapping);
5950                         data = ri->data;
5951                         post_ptr = &jmb_prod_idx;
5952                 } else
5953                         goto next_pkt_nopost;
5954
5955                 work_mask |= opaque_key;
5956
5957                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5958                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5959                 drop_it:
5960                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5961                                        desc_idx, *post_ptr);
5962                 drop_it_no_recycle:
5963                         /* Other statistics kept track of by card. */
5964                         tp->rx_dropped++;
5965                         goto next_pkt;
5966                 }
5967
5968                 prefetch(data + TG3_RX_OFFSET(tp));
5969                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5970                       ETH_FCS_LEN;
5971
5972                 if (len > TG3_RX_COPY_THRESH(tp)) {
5973                         int skb_size;
5974                         unsigned int frag_size;
5975
5976                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5977                                                     *post_ptr, &frag_size);
5978                         if (skb_size < 0)
5979                                 goto drop_it;
5980
5981                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
5982                                          PCI_DMA_FROMDEVICE);
5983
5984                         skb = build_skb(data, frag_size);
5985                         if (!skb) {
5986                                 tg3_frag_free(frag_size != 0, data);
5987                                 goto drop_it_no_recycle;
5988                         }
5989                         skb_reserve(skb, TG3_RX_OFFSET(tp));
5990                         /* Ensure that the update to the data happens
5991                          * after the usage of the old DMA mapping.
5992                          */
5993                         smp_wmb();
5994
5995                         ri->data = NULL;
5996
5997                 } else {
5998                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5999                                        desc_idx, *post_ptr);
6000
6001                         skb = netdev_alloc_skb(tp->dev,
6002                                                len + TG3_RAW_IP_ALIGN);
6003                         if (skb == NULL)
6004                                 goto drop_it_no_recycle;
6005
6006                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6007                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6008                         memcpy(skb->data,
6009                                data + TG3_RX_OFFSET(tp),
6010                                len);
6011                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6012                 }
6013
6014                 skb_put(skb, len);
6015                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6016                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6017                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6018                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6019                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6020                 else
6021                         skb_checksum_none_assert(skb);
6022
6023                 skb->protocol = eth_type_trans(skb, tp->dev);
6024
6025                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6026                     skb->protocol != htons(ETH_P_8021Q)) {
6027                         dev_kfree_skb(skb);
6028                         goto drop_it_no_recycle;
6029                 }
6030
6031                 if (desc->type_flags & RXD_FLAG_VLAN &&
6032                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6033                         __vlan_hwaccel_put_tag(skb,
6034                                                desc->err_vlan & RXD_VLAN_MASK);
6035
6036                 napi_gro_receive(&tnapi->napi, skb);
6037
6038                 received++;
6039                 budget--;
6040
6041 next_pkt:
6042                 (*post_ptr)++;
6043
6044                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6045                         tpr->rx_std_prod_idx = std_prod_idx &
6046                                                tp->rx_std_ring_mask;
6047                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6048                                      tpr->rx_std_prod_idx);
6049                         work_mask &= ~RXD_OPAQUE_RING_STD;
6050                         rx_std_posted = 0;
6051                 }
6052 next_pkt_nopost:
6053                 sw_idx++;
6054                 sw_idx &= tp->rx_ret_ring_mask;
6055
6056                 /* Refresh hw_idx to see if there is new work */
6057                 if (sw_idx == hw_idx) {
6058                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6059                         rmb();
6060                 }
6061         }
6062
6063         /* ACK the status ring. */
6064         tnapi->rx_rcb_ptr = sw_idx;
6065         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6066
6067         /* Refill RX ring(s). */
6068         if (!tg3_flag(tp, ENABLE_RSS)) {
6069                 /* Sync BD data before updating mailbox */
6070                 wmb();
6071
6072                 if (work_mask & RXD_OPAQUE_RING_STD) {
6073                         tpr->rx_std_prod_idx = std_prod_idx &
6074                                                tp->rx_std_ring_mask;
6075                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6076                                      tpr->rx_std_prod_idx);
6077                 }
6078                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6079                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6080                                                tp->rx_jmb_ring_mask;
6081                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6082                                      tpr->rx_jmb_prod_idx);
6083                 }
6084                 mmiowb();
6085         } else if (work_mask) {
6086                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6087                  * updated before the producer indices can be updated.
6088                  */
6089                 smp_wmb();
6090
6091                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6092                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6093
6094                 if (tnapi != &tp->napi[1]) {
6095                         tp->rx_refill = true;
6096                         napi_schedule(&tp->napi[1].napi);
6097                 }
6098         }
6099
6100         return received;
6101 }
6102
6103 static void tg3_poll_link(struct tg3 *tp)
6104 {
6105         /* handle link change and other phy events */
6106         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6107                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6108
6109                 if (sblk->status & SD_STATUS_LINK_CHG) {
6110                         sblk->status = SD_STATUS_UPDATED |
6111                                        (sblk->status & ~SD_STATUS_LINK_CHG);
6112                         spin_lock(&tp->lock);
6113                         if (tg3_flag(tp, USE_PHYLIB)) {
6114                                 tw32_f(MAC_STATUS,
6115                                      (MAC_STATUS_SYNC_CHANGED |
6116                                       MAC_STATUS_CFG_CHANGED |
6117                                       MAC_STATUS_MI_COMPLETION |
6118                                       MAC_STATUS_LNKSTATE_CHANGED));
6119                                 udelay(40);
6120                         } else
6121                                 tg3_setup_phy(tp, 0);
6122                         spin_unlock(&tp->lock);
6123                 }
6124         }
6125 }
6126
6127 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6128                                 struct tg3_rx_prodring_set *dpr,
6129                                 struct tg3_rx_prodring_set *spr)
6130 {
6131         u32 si, di, cpycnt, src_prod_idx;
6132         int i, err = 0;
6133
6134         while (1) {
6135                 src_prod_idx = spr->rx_std_prod_idx;
6136
6137                 /* Make sure updates to the rx_std_buffers[] entries and the
6138                  * standard producer index are seen in the correct order.
6139                  */
6140                 smp_rmb();
6141
6142                 if (spr->rx_std_cons_idx == src_prod_idx)
6143                         break;
6144
6145                 if (spr->rx_std_cons_idx < src_prod_idx)
6146                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6147                 else
6148                         cpycnt = tp->rx_std_ring_mask + 1 -
6149                                  spr->rx_std_cons_idx;
6150
6151                 cpycnt = min(cpycnt,
6152                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6153
6154                 si = spr->rx_std_cons_idx;
6155                 di = dpr->rx_std_prod_idx;
6156
6157                 for (i = di; i < di + cpycnt; i++) {
6158                         if (dpr->rx_std_buffers[i].data) {
6159                                 cpycnt = i - di;
6160                                 err = -ENOSPC;
6161                                 break;
6162                         }
6163                 }
6164
6165                 if (!cpycnt)
6166                         break;
6167
6168                 /* Ensure that updates to the rx_std_buffers ring and the
6169                  * shadowed hardware producer ring from tg3_recycle_skb() are
6170                  * ordered correctly WRT the skb check above.
6171                  */
6172                 smp_rmb();
6173
6174                 memcpy(&dpr->rx_std_buffers[di],
6175                        &spr->rx_std_buffers[si],
6176                        cpycnt * sizeof(struct ring_info));
6177
6178                 for (i = 0; i < cpycnt; i++, di++, si++) {
6179                         struct tg3_rx_buffer_desc *sbd, *dbd;
6180                         sbd = &spr->rx_std[si];
6181                         dbd = &dpr->rx_std[di];
6182                         dbd->addr_hi = sbd->addr_hi;
6183                         dbd->addr_lo = sbd->addr_lo;
6184                 }
6185
6186                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6187                                        tp->rx_std_ring_mask;
6188                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6189                                        tp->rx_std_ring_mask;
6190         }
6191
6192         while (1) {
6193                 src_prod_idx = spr->rx_jmb_prod_idx;
6194
6195                 /* Make sure updates to the rx_jmb_buffers[] entries and
6196                  * the jumbo producer index are seen in the correct order.
6197                  */
6198                 smp_rmb();
6199
6200                 if (spr->rx_jmb_cons_idx == src_prod_idx)
6201                         break;
6202
6203                 if (spr->rx_jmb_cons_idx < src_prod_idx)
6204                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6205                 else
6206                         cpycnt = tp->rx_jmb_ring_mask + 1 -
6207                                  spr->rx_jmb_cons_idx;
6208
6209                 cpycnt = min(cpycnt,
6210                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6211
6212                 si = spr->rx_jmb_cons_idx;
6213                 di = dpr->rx_jmb_prod_idx;
6214
6215                 for (i = di; i < di + cpycnt; i++) {
6216                         if (dpr->rx_jmb_buffers[i].data) {
6217                                 cpycnt = i - di;
6218                                 err = -ENOSPC;
6219                                 break;
6220                         }
6221                 }
6222
6223                 if (!cpycnt)
6224                         break;
6225
6226                 /* Ensure that updates to the rx_jmb_buffers ring and the
6227                  * shadowed hardware producer ring from tg3_recycle_skb() are
6228                  * ordered correctly WRT the skb check above.
6229                  */
6230                 smp_rmb();
6231
6232                 memcpy(&dpr->rx_jmb_buffers[di],
6233                        &spr->rx_jmb_buffers[si],
6234                        cpycnt * sizeof(struct ring_info));
6235
6236                 for (i = 0; i < cpycnt; i++, di++, si++) {
6237                         struct tg3_rx_buffer_desc *sbd, *dbd;
6238                         sbd = &spr->rx_jmb[si].std;
6239                         dbd = &dpr->rx_jmb[di].std;
6240                         dbd->addr_hi = sbd->addr_hi;
6241                         dbd->addr_lo = sbd->addr_lo;
6242                 }
6243
6244                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6245                                        tp->rx_jmb_ring_mask;
6246                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6247                                        tp->rx_jmb_ring_mask;
6248         }
6249
6250         return err;
6251 }
6252
6253 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6254 {
6255         struct tg3 *tp = tnapi->tp;
6256
6257         /* run TX completion thread */
6258         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6259                 tg3_tx(tnapi);
6260                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6261                         return work_done;
6262         }
6263
6264         if (!tnapi->rx_rcb_prod_idx)
6265                 return work_done;
6266
6267         /* run RX thread, within the bounds set by NAPI.
6268          * All RX "locking" is done by ensuring outside
6269          * code synchronizes with tg3->napi.poll()
6270          */
6271         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6272                 work_done += tg3_rx(tnapi, budget - work_done);
6273
6274         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6275                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6276                 int i, err = 0;
6277                 u32 std_prod_idx = dpr->rx_std_prod_idx;
6278                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6279
6280                 tp->rx_refill = false;
6281                 for (i = 1; i <= tp->rxq_cnt; i++)
6282                         err |= tg3_rx_prodring_xfer(tp, dpr,
6283                                                     &tp->napi[i].prodring);
6284
6285                 wmb();
6286
6287                 if (std_prod_idx != dpr->rx_std_prod_idx)
6288                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6289                                      dpr->rx_std_prod_idx);
6290
6291                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6292                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6293                                      dpr->rx_jmb_prod_idx);
6294
6295                 mmiowb();
6296
6297                 if (err)
6298                         tw32_f(HOSTCC_MODE, tp->coal_now);
6299         }
6300
6301         return work_done;
6302 }
6303
6304 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6305 {
6306         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6307                 schedule_work(&tp->reset_task);
6308 }
6309
6310 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6311 {
6312         cancel_work_sync(&tp->reset_task);
6313         tg3_flag_clear(tp, RESET_TASK_PENDING);
6314         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6315 }
6316
6317 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6318 {
6319         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6320         struct tg3 *tp = tnapi->tp;
6321         int work_done = 0;
6322         struct tg3_hw_status *sblk = tnapi->hw_status;
6323
6324         while (1) {
6325                 work_done = tg3_poll_work(tnapi, work_done, budget);
6326
6327                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6328                         goto tx_recovery;
6329
6330                 if (unlikely(work_done >= budget))
6331                         break;
6332
6333                 /* tp->last_tag is used in tg3_int_reenable() below
6334                  * to tell the hw how much work has been processed,
6335                  * so we must read it before checking for more work.
6336                  */
6337                 tnapi->last_tag = sblk->status_tag;
6338                 tnapi->last_irq_tag = tnapi->last_tag;
6339                 rmb();
6340
6341                 /* check for RX/TX work to do */
6342                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6343                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6344
6345                         /* This test here is not race free, but will reduce
6346                          * the number of interrupts by looping again.
6347                          */
6348                         if (tnapi == &tp->napi[1] && tp->rx_refill)
6349                                 continue;
6350
6351                         napi_complete(napi);
6352                         /* Reenable interrupts. */
6353                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6354
6355                         /* This test here is synchronized by napi_schedule()
6356                          * and napi_complete() to close the race condition.
6357                          */
6358                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6359                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
6360                                                   HOSTCC_MODE_ENABLE |
6361                                                   tnapi->coal_now);
6362                         }
6363                         mmiowb();
6364                         break;
6365                 }
6366         }
6367
6368         return work_done;
6369
6370 tx_recovery:
6371         /* work_done is guaranteed to be less than budget. */
6372         napi_complete(napi);
6373         tg3_reset_task_schedule(tp);
6374         return work_done;
6375 }
6376
6377 static void tg3_process_error(struct tg3 *tp)
6378 {
6379         u32 val;
6380         bool real_error = false;
6381
6382         if (tg3_flag(tp, ERROR_PROCESSED))
6383                 return;
6384
6385         /* Check Flow Attention register */
6386         val = tr32(HOSTCC_FLOW_ATTN);
6387         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6388                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6389                 real_error = true;
6390         }
6391
6392         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6393                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6394                 real_error = true;
6395         }
6396
6397         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6398                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6399                 real_error = true;
6400         }
6401
6402         if (!real_error)
6403                 return;
6404
6405         tg3_dump_state(tp);
6406
6407         tg3_flag_set(tp, ERROR_PROCESSED);
6408         tg3_reset_task_schedule(tp);
6409 }
6410
6411 static int tg3_poll(struct napi_struct *napi, int budget)
6412 {
6413         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6414         struct tg3 *tp = tnapi->tp;
6415         int work_done = 0;
6416         struct tg3_hw_status *sblk = tnapi->hw_status;
6417
6418         while (1) {
6419                 if (sblk->status & SD_STATUS_ERROR)
6420                         tg3_process_error(tp);
6421
6422                 tg3_poll_link(tp);
6423
6424                 work_done = tg3_poll_work(tnapi, work_done, budget);
6425
6426                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6427                         goto tx_recovery;
6428
6429                 if (unlikely(work_done >= budget))
6430                         break;
6431
6432                 if (tg3_flag(tp, TAGGED_STATUS)) {
6433                         /* tp->last_tag is used in tg3_int_reenable() below
6434                          * to tell the hw how much work has been processed,
6435                          * so we must read it before checking for more work.
6436                          */
6437                         tnapi->last_tag = sblk->status_tag;
6438                         tnapi->last_irq_tag = tnapi->last_tag;
6439                         rmb();
6440                 } else
6441                         sblk->status &= ~SD_STATUS_UPDATED;
6442
6443                 if (likely(!tg3_has_work(tnapi))) {
6444                         napi_complete(napi);
6445                         tg3_int_reenable(tnapi);
6446                         break;
6447                 }
6448         }
6449
6450         return work_done;
6451
6452 tx_recovery:
6453         /* work_done is guaranteed to be less than budget. */
6454         napi_complete(napi);
6455         tg3_reset_task_schedule(tp);
6456         return work_done;
6457 }
6458
6459 static void tg3_napi_disable(struct tg3 *tp)
6460 {
6461         int i;
6462
6463         for (i = tp->irq_cnt - 1; i >= 0; i--)
6464                 napi_disable(&tp->napi[i].napi);
6465 }
6466
6467 static void tg3_napi_enable(struct tg3 *tp)
6468 {
6469         int i;
6470
6471         for (i = 0; i < tp->irq_cnt; i++)
6472                 napi_enable(&tp->napi[i].napi);
6473 }
6474
6475 static void tg3_napi_init(struct tg3 *tp)
6476 {
6477         int i;
6478
6479         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6480         for (i = 1; i < tp->irq_cnt; i++)
6481                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6482 }
6483
6484 static void tg3_napi_fini(struct tg3 *tp)
6485 {
6486         int i;
6487
6488         for (i = 0; i < tp->irq_cnt; i++)
6489                 netif_napi_del(&tp->napi[i].napi);
6490 }
6491
6492 static inline void tg3_netif_stop(struct tg3 *tp)
6493 {
6494         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6495         tg3_napi_disable(tp);
6496         netif_tx_disable(tp->dev);
6497 }
6498
6499 static inline void tg3_netif_start(struct tg3 *tp)
6500 {
6501         /* NOTE: unconditional netif_tx_wake_all_queues is only
6502          * appropriate so long as all callers are assured to
6503          * have free tx slots (such as after tg3_init_hw)
6504          */
6505         netif_tx_wake_all_queues(tp->dev);
6506
6507         tg3_napi_enable(tp);
6508         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6509         tg3_enable_ints(tp);
6510 }
6511
6512 static void tg3_irq_quiesce(struct tg3 *tp)
6513 {
6514         int i;
6515
6516         BUG_ON(tp->irq_sync);
6517
6518         tp->irq_sync = 1;
6519         smp_mb();
6520
6521         for (i = 0; i < tp->irq_cnt; i++)
6522                 synchronize_irq(tp->napi[i].irq_vec);
6523 }
6524
6525 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6526  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6527  * with as well.  Most of the time, this is not necessary except when
6528  * shutting down the device.
6529  */
6530 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6531 {
6532         spin_lock_bh(&tp->lock);
6533         if (irq_sync)
6534                 tg3_irq_quiesce(tp);
6535 }
6536
6537 static inline void tg3_full_unlock(struct tg3 *tp)
6538 {
6539         spin_unlock_bh(&tp->lock);
6540 }
6541
6542 /* One-shot MSI handler - Chip automatically disables interrupt
6543  * after sending MSI so driver doesn't have to do it.
6544  */
6545 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6546 {
6547         struct tg3_napi *tnapi = dev_id;
6548         struct tg3 *tp = tnapi->tp;
6549
6550         prefetch(tnapi->hw_status);
6551         if (tnapi->rx_rcb)
6552                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6553
6554         if (likely(!tg3_irq_sync(tp)))
6555                 napi_schedule(&tnapi->napi);
6556
6557         return IRQ_HANDLED;
6558 }
6559
6560 /* MSI ISR - No need to check for interrupt sharing and no need to
6561  * flush status block and interrupt mailbox. PCI ordering rules
6562  * guarantee that MSI will arrive after the status block.
6563  */
6564 static irqreturn_t tg3_msi(int irq, void *dev_id)
6565 {
6566         struct tg3_napi *tnapi = dev_id;
6567         struct tg3 *tp = tnapi->tp;
6568
6569         prefetch(tnapi->hw_status);
6570         if (tnapi->rx_rcb)
6571                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6572         /*
6573          * Writing any value to intr-mbox-0 clears PCI INTA# and
6574          * chip-internal interrupt pending events.
6575          * Writing non-zero to intr-mbox-0 additional tells the
6576          * NIC to stop sending us irqs, engaging "in-intr-handler"
6577          * event coalescing.
6578          */
6579         tw32_mailbox(tnapi->int_mbox, 0x00000001);
6580         if (likely(!tg3_irq_sync(tp)))
6581                 napi_schedule(&tnapi->napi);
6582
6583         return IRQ_RETVAL(1);
6584 }
6585
6586 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6587 {
6588         struct tg3_napi *tnapi = dev_id;
6589         struct tg3 *tp = tnapi->tp;
6590         struct tg3_hw_status *sblk = tnapi->hw_status;
6591         unsigned int handled = 1;
6592
6593         /* In INTx mode, it is possible for the interrupt to arrive at
6594          * the CPU before the status block posted prior to the interrupt.
6595          * Reading the PCI State register will confirm whether the
6596          * interrupt is ours and will flush the status block.
6597          */
6598         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6599                 if (tg3_flag(tp, CHIP_RESETTING) ||
6600                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6601                         handled = 0;
6602                         goto out;
6603                 }
6604         }
6605
6606         /*
6607          * Writing any value to intr-mbox-0 clears PCI INTA# and
6608          * chip-internal interrupt pending events.
6609          * Writing non-zero to intr-mbox-0 additional tells the
6610          * NIC to stop sending us irqs, engaging "in-intr-handler"
6611          * event coalescing.
6612          *
6613          * Flush the mailbox to de-assert the IRQ immediately to prevent
6614          * spurious interrupts.  The flush impacts performance but
6615          * excessive spurious interrupts can be worse in some cases.
6616          */
6617         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6618         if (tg3_irq_sync(tp))
6619                 goto out;
6620         sblk->status &= ~SD_STATUS_UPDATED;
6621         if (likely(tg3_has_work(tnapi))) {
6622                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6623                 napi_schedule(&tnapi->napi);
6624         } else {
6625                 /* No work, shared interrupt perhaps?  re-enable
6626                  * interrupts, and flush that PCI write
6627                  */
6628                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6629                                0x00000000);
6630         }
6631 out:
6632         return IRQ_RETVAL(handled);
6633 }
6634
6635 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6636 {
6637         struct tg3_napi *tnapi = dev_id;
6638         struct tg3 *tp = tnapi->tp;
6639         struct tg3_hw_status *sblk = tnapi->hw_status;
6640         unsigned int handled = 1;
6641
6642         /* In INTx mode, it is possible for the interrupt to arrive at
6643          * the CPU before the status block posted prior to the interrupt.
6644          * Reading the PCI State register will confirm whether the
6645          * interrupt is ours and will flush the status block.
6646          */
6647         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6648                 if (tg3_flag(tp, CHIP_RESETTING) ||
6649                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6650                         handled = 0;
6651                         goto out;
6652                 }
6653         }
6654
6655         /*
6656          * writing any value to intr-mbox-0 clears PCI INTA# and
6657          * chip-internal interrupt pending events.
6658          * writing non-zero to intr-mbox-0 additional tells the
6659          * NIC to stop sending us irqs, engaging "in-intr-handler"
6660          * event coalescing.
6661          *
6662          * Flush the mailbox to de-assert the IRQ immediately to prevent
6663          * spurious interrupts.  The flush impacts performance but
6664          * excessive spurious interrupts can be worse in some cases.
6665          */
6666         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6667
6668         /*
6669          * In a shared interrupt configuration, sometimes other devices'
6670          * interrupts will scream.  We record the current status tag here
6671          * so that the above check can report that the screaming interrupts
6672          * are unhandled.  Eventually they will be silenced.
6673          */
6674         tnapi->last_irq_tag = sblk->status_tag;
6675
6676         if (tg3_irq_sync(tp))
6677                 goto out;
6678
6679         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6680
6681         napi_schedule(&tnapi->napi);
6682
6683 out:
6684         return IRQ_RETVAL(handled);
6685 }
6686
6687 /* ISR for interrupt test */
6688 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6689 {
6690         struct tg3_napi *tnapi = dev_id;
6691         struct tg3 *tp = tnapi->tp;
6692         struct tg3_hw_status *sblk = tnapi->hw_status;
6693
6694         if ((sblk->status & SD_STATUS_UPDATED) ||
6695             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6696                 tg3_disable_ints(tp);
6697                 return IRQ_RETVAL(1);
6698         }
6699         return IRQ_RETVAL(0);
6700 }
6701
6702 #ifdef CONFIG_NET_POLL_CONTROLLER
6703 static void tg3_poll_controller(struct net_device *dev)
6704 {
6705         int i;
6706         struct tg3 *tp = netdev_priv(dev);
6707
6708         for (i = 0; i < tp->irq_cnt; i++)
6709                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6710 }
6711 #endif
6712
6713 static void tg3_tx_timeout(struct net_device *dev)
6714 {
6715         struct tg3 *tp = netdev_priv(dev);
6716
6717         if (netif_msg_tx_err(tp)) {
6718                 netdev_err(dev, "transmit timed out, resetting\n");
6719                 tg3_dump_state(tp);
6720         }
6721
6722         tg3_reset_task_schedule(tp);
6723 }
6724
6725 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6726 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6727 {
6728         u32 base = (u32) mapping & 0xffffffff;
6729
6730         return (base > 0xffffdcc0) && (base + len + 8 < base);
6731 }
6732
6733 /* Test for DMA addresses > 40-bit */
6734 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6735                                           int len)
6736 {
6737 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6738         if (tg3_flag(tp, 40BIT_DMA_BUG))
6739                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6740         return 0;
6741 #else
6742         return 0;
6743 #endif
6744 }
6745
6746 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6747                                  dma_addr_t mapping, u32 len, u32 flags,
6748                                  u32 mss, u32 vlan)
6749 {
6750         txbd->addr_hi = ((u64) mapping >> 32);
6751         txbd->addr_lo = ((u64) mapping & 0xffffffff);
6752         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6753         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6754 }
6755
6756 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6757                             dma_addr_t map, u32 len, u32 flags,
6758                             u32 mss, u32 vlan)
6759 {
6760         struct tg3 *tp = tnapi->tp;
6761         bool hwbug = false;
6762
6763         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6764                 hwbug = true;
6765
6766         if (tg3_4g_overflow_test(map, len))
6767                 hwbug = true;
6768
6769         if (tg3_40bit_overflow_test(tp, map, len))
6770                 hwbug = true;
6771
6772         if (tp->dma_limit) {
6773                 u32 prvidx = *entry;
6774                 u32 tmp_flag = flags & ~TXD_FLAG_END;
6775                 while (len > tp->dma_limit && *budget) {
6776                         u32 frag_len = tp->dma_limit;
6777                         len -= tp->dma_limit;
6778
6779                         /* Avoid the 8byte DMA problem */
6780                         if (len <= 8) {
6781                                 len += tp->dma_limit / 2;
6782                                 frag_len = tp->dma_limit / 2;
6783                         }
6784
6785                         tnapi->tx_buffers[*entry].fragmented = true;
6786
6787                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6788                                       frag_len, tmp_flag, mss, vlan);
6789                         *budget -= 1;
6790                         prvidx = *entry;
6791                         *entry = NEXT_TX(*entry);
6792
6793                         map += frag_len;
6794                 }
6795
6796                 if (len) {
6797                         if (*budget) {
6798                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6799                                               len, flags, mss, vlan);
6800                                 *budget -= 1;
6801                                 *entry = NEXT_TX(*entry);
6802                         } else {
6803                                 hwbug = true;
6804                                 tnapi->tx_buffers[prvidx].fragmented = false;
6805                         }
6806                 }
6807         } else {
6808                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6809                               len, flags, mss, vlan);
6810                 *entry = NEXT_TX(*entry);
6811         }
6812
6813         return hwbug;
6814 }
6815
6816 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6817 {
6818         int i;
6819         struct sk_buff *skb;
6820         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6821
6822         skb = txb->skb;
6823         txb->skb = NULL;
6824
6825         pci_unmap_single(tnapi->tp->pdev,
6826                          dma_unmap_addr(txb, mapping),
6827                          skb_headlen(skb),
6828                          PCI_DMA_TODEVICE);
6829
6830         while (txb->fragmented) {
6831                 txb->fragmented = false;
6832                 entry = NEXT_TX(entry);
6833                 txb = &tnapi->tx_buffers[entry];
6834         }
6835
6836         for (i = 0; i <= last; i++) {
6837                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6838
6839                 entry = NEXT_TX(entry);
6840                 txb = &tnapi->tx_buffers[entry];
6841
6842                 pci_unmap_page(tnapi->tp->pdev,
6843                                dma_unmap_addr(txb, mapping),
6844                                skb_frag_size(frag), PCI_DMA_TODEVICE);
6845
6846                 while (txb->fragmented) {
6847                         txb->fragmented = false;
6848                         entry = NEXT_TX(entry);
6849                         txb = &tnapi->tx_buffers[entry];
6850                 }
6851         }
6852 }
6853
6854 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6855 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6856                                        struct sk_buff **pskb,
6857                                        u32 *entry, u32 *budget,
6858                                        u32 base_flags, u32 mss, u32 vlan)
6859 {
6860         struct tg3 *tp = tnapi->tp;
6861         struct sk_buff *new_skb, *skb = *pskb;
6862         dma_addr_t new_addr = 0;
6863         int ret = 0;
6864
6865         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6866                 new_skb = skb_copy(skb, GFP_ATOMIC);
6867         else {
6868                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6869
6870                 new_skb = skb_copy_expand(skb,
6871                                           skb_headroom(skb) + more_headroom,
6872                                           skb_tailroom(skb), GFP_ATOMIC);
6873         }
6874
6875         if (!new_skb) {
6876                 ret = -1;
6877         } else {
6878                 /* New SKB is guaranteed to be linear. */
6879                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6880                                           PCI_DMA_TODEVICE);
6881                 /* Make sure the mapping succeeded */
6882                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6883                         dev_kfree_skb(new_skb);
6884                         ret = -1;
6885                 } else {
6886                         u32 save_entry = *entry;
6887
6888                         base_flags |= TXD_FLAG_END;
6889
6890                         tnapi->tx_buffers[*entry].skb = new_skb;
6891                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6892                                            mapping, new_addr);
6893
6894                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6895                                             new_skb->len, base_flags,
6896                                             mss, vlan)) {
6897                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6898                                 dev_kfree_skb(new_skb);
6899                                 ret = -1;
6900                         }
6901                 }
6902         }
6903
6904         dev_kfree_skb(skb);
6905         *pskb = new_skb;
6906         return ret;
6907 }
6908
6909 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6910
6911 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6912  * TSO header is greater than 80 bytes.
6913  */
6914 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6915 {
6916         struct sk_buff *segs, *nskb;
6917         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6918
6919         /* Estimate the number of fragments in the worst case */
6920         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6921                 netif_stop_queue(tp->dev);
6922
6923                 /* netif_tx_stop_queue() must be done before checking
6924                  * checking tx index in tg3_tx_avail() below, because in
6925                  * tg3_tx(), we update tx index before checking for
6926                  * netif_tx_queue_stopped().
6927                  */
6928                 smp_mb();
6929                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6930                         return NETDEV_TX_BUSY;
6931
6932                 netif_wake_queue(tp->dev);
6933         }
6934
6935         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6936         if (IS_ERR(segs))
6937                 goto tg3_tso_bug_end;
6938
6939         do {
6940                 nskb = segs;
6941                 segs = segs->next;
6942                 nskb->next = NULL;
6943                 tg3_start_xmit(nskb, tp->dev);
6944         } while (segs);
6945
6946 tg3_tso_bug_end:
6947         dev_kfree_skb(skb);
6948
6949         return NETDEV_TX_OK;
6950 }
6951
6952 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6953  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6954  */
6955 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6956 {
6957         struct tg3 *tp = netdev_priv(dev);
6958         u32 len, entry, base_flags, mss, vlan = 0;
6959         u32 budget;
6960         int i = -1, would_hit_hwbug;
6961         dma_addr_t mapping;
6962         struct tg3_napi *tnapi;
6963         struct netdev_queue *txq;
6964         unsigned int last;
6965
6966         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6967         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6968         if (tg3_flag(tp, ENABLE_TSS))
6969                 tnapi++;
6970
6971         budget = tg3_tx_avail(tnapi);
6972
6973         /* We are running in BH disabled context with netif_tx_lock
6974          * and TX reclaim runs via tp->napi.poll inside of a software
6975          * interrupt.  Furthermore, IRQ processing runs lockless so we have
6976          * no IRQ context deadlocks to worry about either.  Rejoice!
6977          */
6978         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6979                 if (!netif_tx_queue_stopped(txq)) {
6980                         netif_tx_stop_queue(txq);
6981
6982                         /* This is a hard error, log it. */
6983                         netdev_err(dev,
6984                                    "BUG! Tx Ring full when queue awake!\n");
6985                 }
6986                 return NETDEV_TX_BUSY;
6987         }
6988
6989         entry = tnapi->tx_prod;
6990         base_flags = 0;
6991         if (skb->ip_summed == CHECKSUM_PARTIAL)
6992                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6993
6994         mss = skb_shinfo(skb)->gso_size;
6995         if (mss) {
6996                 struct iphdr *iph;
6997                 u32 tcp_opt_len, hdr_len;
6998
6999                 if (skb_header_cloned(skb) &&
7000                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7001                         goto drop;
7002
7003                 iph = ip_hdr(skb);
7004                 tcp_opt_len = tcp_optlen(skb);
7005
7006                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7007
7008                 if (!skb_is_gso_v6(skb)) {
7009                         iph->check = 0;
7010                         iph->tot_len = htons(mss + hdr_len);
7011                 }
7012
7013                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7014                     tg3_flag(tp, TSO_BUG))
7015                         return tg3_tso_bug(tp, skb);
7016
7017                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7018                                TXD_FLAG_CPU_POST_DMA);
7019
7020                 if (tg3_flag(tp, HW_TSO_1) ||
7021                     tg3_flag(tp, HW_TSO_2) ||
7022                     tg3_flag(tp, HW_TSO_3)) {
7023                         tcp_hdr(skb)->check = 0;
7024                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7025                 } else
7026                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7027                                                                  iph->daddr, 0,
7028                                                                  IPPROTO_TCP,
7029                                                                  0);
7030
7031                 if (tg3_flag(tp, HW_TSO_3)) {
7032                         mss |= (hdr_len & 0xc) << 12;
7033                         if (hdr_len & 0x10)
7034                                 base_flags |= 0x00000010;
7035                         base_flags |= (hdr_len & 0x3e0) << 5;
7036                 } else if (tg3_flag(tp, HW_TSO_2))
7037                         mss |= hdr_len << 9;
7038                 else if (tg3_flag(tp, HW_TSO_1) ||
7039                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7040                         if (tcp_opt_len || iph->ihl > 5) {
7041                                 int tsflags;
7042
7043                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7044                                 mss |= (tsflags << 11);
7045                         }
7046                 } else {
7047                         if (tcp_opt_len || iph->ihl > 5) {
7048                                 int tsflags;
7049
7050                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7051                                 base_flags |= tsflags << 12;
7052                         }
7053                 }
7054         }
7055
7056         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7057             !mss && skb->len > VLAN_ETH_FRAME_LEN)
7058                 base_flags |= TXD_FLAG_JMB_PKT;
7059
7060         if (vlan_tx_tag_present(skb)) {
7061                 base_flags |= TXD_FLAG_VLAN;
7062                 vlan = vlan_tx_tag_get(skb);
7063         }
7064
7065         len = skb_headlen(skb);
7066
7067         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7068         if (pci_dma_mapping_error(tp->pdev, mapping))
7069                 goto drop;
7070
7071
7072         tnapi->tx_buffers[entry].skb = skb;
7073         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7074
7075         would_hit_hwbug = 0;
7076
7077         if (tg3_flag(tp, 5701_DMA_BUG))
7078                 would_hit_hwbug = 1;
7079
7080         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7081                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7082                             mss, vlan)) {
7083                 would_hit_hwbug = 1;
7084         } else if (skb_shinfo(skb)->nr_frags > 0) {
7085                 u32 tmp_mss = mss;
7086
7087                 if (!tg3_flag(tp, HW_TSO_1) &&
7088                     !tg3_flag(tp, HW_TSO_2) &&
7089                     !tg3_flag(tp, HW_TSO_3))
7090                         tmp_mss = 0;
7091
7092                 /* Now loop through additional data
7093                  * fragments, and queue them.
7094                  */
7095                 last = skb_shinfo(skb)->nr_frags - 1;
7096                 for (i = 0; i <= last; i++) {
7097                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7098
7099                         len = skb_frag_size(frag);
7100                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7101                                                    len, DMA_TO_DEVICE);
7102
7103                         tnapi->tx_buffers[entry].skb = NULL;
7104                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7105                                            mapping);
7106                         if (dma_mapping_error(&tp->pdev->dev, mapping))
7107                                 goto dma_error;
7108
7109                         if (!budget ||
7110                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7111                                             len, base_flags |
7112                                             ((i == last) ? TXD_FLAG_END : 0),
7113                                             tmp_mss, vlan)) {
7114                                 would_hit_hwbug = 1;
7115                                 break;
7116                         }
7117                 }
7118         }
7119
7120         if (would_hit_hwbug) {
7121                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7122
7123                 /* If the workaround fails due to memory/mapping
7124                  * failure, silently drop this packet.
7125                  */
7126                 entry = tnapi->tx_prod;
7127                 budget = tg3_tx_avail(tnapi);
7128                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7129                                                 base_flags, mss, vlan))
7130                         goto drop_nofree;
7131         }
7132
7133         skb_tx_timestamp(skb);
7134         netdev_tx_sent_queue(txq, skb->len);
7135
7136         /* Sync BD data before updating mailbox */
7137         wmb();
7138
7139         /* Packets are ready, update Tx producer idx local and on card. */
7140         tw32_tx_mbox(tnapi->prodmbox, entry);
7141
7142         tnapi->tx_prod = entry;
7143         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7144                 netif_tx_stop_queue(txq);
7145
7146                 /* netif_tx_stop_queue() must be done before checking
7147                  * checking tx index in tg3_tx_avail() below, because in
7148                  * tg3_tx(), we update tx index before checking for
7149                  * netif_tx_queue_stopped().
7150                  */
7151                 smp_mb();
7152                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7153                         netif_tx_wake_queue(txq);
7154         }
7155
7156         mmiowb();
7157         return NETDEV_TX_OK;
7158
7159 dma_error:
7160         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7161         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7162 drop:
7163         dev_kfree_skb(skb);
7164 drop_nofree:
7165         tp->tx_dropped++;
7166         return NETDEV_TX_OK;
7167 }
7168
7169 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7170 {
7171         if (enable) {
7172                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7173                                   MAC_MODE_PORT_MODE_MASK);
7174
7175                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7176
7177                 if (!tg3_flag(tp, 5705_PLUS))
7178                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7179
7180                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7181                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7182                 else
7183                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7184         } else {
7185                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7186
7187                 if (tg3_flag(tp, 5705_PLUS) ||
7188                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7189                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7190                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7191         }
7192
7193         tw32(MAC_MODE, tp->mac_mode);
7194         udelay(40);
7195 }
7196
7197 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7198 {
7199         u32 val, bmcr, mac_mode, ptest = 0;
7200
7201         tg3_phy_toggle_apd(tp, false);
7202         tg3_phy_toggle_automdix(tp, 0);
7203
7204         if (extlpbk && tg3_phy_set_extloopbk(tp))
7205                 return -EIO;
7206
7207         bmcr = BMCR_FULLDPLX;
7208         switch (speed) {
7209         case SPEED_10:
7210                 break;
7211         case SPEED_100:
7212                 bmcr |= BMCR_SPEED100;
7213                 break;
7214         case SPEED_1000:
7215         default:
7216                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7217                         speed = SPEED_100;
7218                         bmcr |= BMCR_SPEED100;
7219                 } else {
7220                         speed = SPEED_1000;
7221                         bmcr |= BMCR_SPEED1000;
7222                 }
7223         }
7224
7225         if (extlpbk) {
7226                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7227                         tg3_readphy(tp, MII_CTRL1000, &val);
7228                         val |= CTL1000_AS_MASTER |
7229                                CTL1000_ENABLE_MASTER;
7230                         tg3_writephy(tp, MII_CTRL1000, val);
7231                 } else {
7232                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7233                                 MII_TG3_FET_PTEST_TRIM_2;
7234                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7235                 }
7236         } else
7237                 bmcr |= BMCR_LOOPBACK;
7238
7239         tg3_writephy(tp, MII_BMCR, bmcr);
7240
7241         /* The write needs to be flushed for the FETs */
7242         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7243                 tg3_readphy(tp, MII_BMCR, &bmcr);
7244
7245         udelay(40);
7246
7247         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7248             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7249                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7250                              MII_TG3_FET_PTEST_FRC_TX_LINK |
7251                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
7252
7253                 /* The write needs to be flushed for the AC131 */
7254                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7255         }
7256
7257         /* Reset to prevent losing 1st rx packet intermittently */
7258         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7259             tg3_flag(tp, 5780_CLASS)) {
7260                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7261                 udelay(10);
7262                 tw32_f(MAC_RX_MODE, tp->rx_mode);
7263         }
7264
7265         mac_mode = tp->mac_mode &
7266                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7267         if (speed == SPEED_1000)
7268                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7269         else
7270                 mac_mode |= MAC_MODE_PORT_MODE_MII;
7271
7272         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7273                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7274
7275                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7276                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
7277                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7278                         mac_mode |= MAC_MODE_LINK_POLARITY;
7279
7280                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7281                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7282         }
7283
7284         tw32(MAC_MODE, mac_mode);
7285         udelay(40);
7286
7287         return 0;
7288 }
7289
7290 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7291 {
7292         struct tg3 *tp = netdev_priv(dev);
7293
7294         if (features & NETIF_F_LOOPBACK) {
7295                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7296                         return;
7297
7298                 spin_lock_bh(&tp->lock);
7299                 tg3_mac_loopback(tp, true);
7300                 netif_carrier_on(tp->dev);
7301                 spin_unlock_bh(&tp->lock);
7302                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7303         } else {
7304                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7305                         return;
7306
7307                 spin_lock_bh(&tp->lock);
7308                 tg3_mac_loopback(tp, false);
7309                 /* Force link status check */
7310                 tg3_setup_phy(tp, 1);
7311                 spin_unlock_bh(&tp->lock);
7312                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7313         }
7314 }
7315
7316 static netdev_features_t tg3_fix_features(struct net_device *dev,
7317         netdev_features_t features)
7318 {
7319         struct tg3 *tp = netdev_priv(dev);
7320
7321         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7322                 features &= ~NETIF_F_ALL_TSO;
7323
7324         return features;
7325 }
7326
7327 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7328 {
7329         netdev_features_t changed = dev->features ^ features;
7330
7331         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7332                 tg3_set_loopback(dev, features);
7333
7334         return 0;
7335 }
7336
7337 static void tg3_rx_prodring_free(struct tg3 *tp,
7338                                  struct tg3_rx_prodring_set *tpr)
7339 {
7340         int i;
7341
7342         if (tpr != &tp->napi[0].prodring) {
7343                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7344                      i = (i + 1) & tp->rx_std_ring_mask)
7345                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7346                                         tp->rx_pkt_map_sz);
7347
7348                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7349                         for (i = tpr->rx_jmb_cons_idx;
7350                              i != tpr->rx_jmb_prod_idx;
7351                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7352                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7353                                                 TG3_RX_JMB_MAP_SZ);
7354                         }
7355                 }
7356
7357                 return;
7358         }
7359
7360         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7361                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7362                                 tp->rx_pkt_map_sz);
7363
7364         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7365                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7366                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7367                                         TG3_RX_JMB_MAP_SZ);
7368         }
7369 }
7370
7371 /* Initialize rx rings for packet processing.
7372  *
7373  * The chip has been shut down and the driver detached from
7374  * the networking, so no interrupts or new tx packets will
7375  * end up in the driver.  tp->{tx,}lock are held and thus
7376  * we may not sleep.
7377  */
7378 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7379                                  struct tg3_rx_prodring_set *tpr)
7380 {
7381         u32 i, rx_pkt_dma_sz;
7382
7383         tpr->rx_std_cons_idx = 0;
7384         tpr->rx_std_prod_idx = 0;
7385         tpr->rx_jmb_cons_idx = 0;
7386         tpr->rx_jmb_prod_idx = 0;
7387
7388         if (tpr != &tp->napi[0].prodring) {
7389                 memset(&tpr->rx_std_buffers[0], 0,
7390                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7391                 if (tpr->rx_jmb_buffers)
7392                         memset(&tpr->rx_jmb_buffers[0], 0,
7393                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7394                 goto done;
7395         }
7396
7397         /* Zero out all descriptors. */
7398         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7399
7400         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7401         if (tg3_flag(tp, 5780_CLASS) &&
7402             tp->dev->mtu > ETH_DATA_LEN)
7403                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7404         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7405
7406         /* Initialize invariants of the rings, we only set this
7407          * stuff once.  This works because the card does not
7408          * write into the rx buffer posting rings.
7409          */
7410         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7411                 struct tg3_rx_buffer_desc *rxd;
7412
7413                 rxd = &tpr->rx_std[i];
7414                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7415                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7416                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7417                                (i << RXD_OPAQUE_INDEX_SHIFT));
7418         }
7419
7420         /* Now allocate fresh SKBs for each rx ring. */
7421         for (i = 0; i < tp->rx_pending; i++) {
7422                 unsigned int frag_size;
7423
7424                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7425                                       &frag_size) < 0) {
7426                         netdev_warn(tp->dev,
7427                                     "Using a smaller RX standard ring. Only "
7428                                     "%d out of %d buffers were allocated "
7429                                     "successfully\n", i, tp->rx_pending);
7430                         if (i == 0)
7431                                 goto initfail;
7432                         tp->rx_pending = i;
7433                         break;
7434                 }
7435         }
7436
7437         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7438                 goto done;
7439
7440         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7441
7442         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7443                 goto done;
7444
7445         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7446                 struct tg3_rx_buffer_desc *rxd;
7447
7448                 rxd = &tpr->rx_jmb[i].std;
7449                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7450                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7451                                   RXD_FLAG_JUMBO;
7452                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7453                        (i << RXD_OPAQUE_INDEX_SHIFT));
7454         }
7455
7456         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7457                 unsigned int frag_size;
7458
7459                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7460                                       &frag_size) < 0) {
7461                         netdev_warn(tp->dev,
7462                                     "Using a smaller RX jumbo ring. Only %d "
7463                                     "out of %d buffers were allocated "
7464                                     "successfully\n", i, tp->rx_jumbo_pending);
7465                         if (i == 0)
7466                                 goto initfail;
7467                         tp->rx_jumbo_pending = i;
7468                         break;
7469                 }
7470         }
7471
7472 done:
7473         return 0;
7474
7475 initfail:
7476         tg3_rx_prodring_free(tp, tpr);
7477         return -ENOMEM;
7478 }
7479
7480 static void tg3_rx_prodring_fini(struct tg3 *tp,
7481                                  struct tg3_rx_prodring_set *tpr)
7482 {
7483         kfree(tpr->rx_std_buffers);
7484         tpr->rx_std_buffers = NULL;
7485         kfree(tpr->rx_jmb_buffers);
7486         tpr->rx_jmb_buffers = NULL;
7487         if (tpr->rx_std) {
7488                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7489                                   tpr->rx_std, tpr->rx_std_mapping);
7490                 tpr->rx_std = NULL;
7491         }
7492         if (tpr->rx_jmb) {
7493                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7494                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7495                 tpr->rx_jmb = NULL;
7496         }
7497 }
7498
7499 static int tg3_rx_prodring_init(struct tg3 *tp,
7500                                 struct tg3_rx_prodring_set *tpr)
7501 {
7502         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7503                                       GFP_KERNEL);
7504         if (!tpr->rx_std_buffers)
7505                 return -ENOMEM;
7506
7507         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7508                                          TG3_RX_STD_RING_BYTES(tp),
7509                                          &tpr->rx_std_mapping,
7510                                          GFP_KERNEL);
7511         if (!tpr->rx_std)
7512                 goto err_out;
7513
7514         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7515                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7516                                               GFP_KERNEL);
7517                 if (!tpr->rx_jmb_buffers)
7518                         goto err_out;
7519
7520                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7521                                                  TG3_RX_JMB_RING_BYTES(tp),
7522                                                  &tpr->rx_jmb_mapping,
7523                                                  GFP_KERNEL);
7524                 if (!tpr->rx_jmb)
7525                         goto err_out;
7526         }
7527
7528         return 0;
7529
7530 err_out:
7531         tg3_rx_prodring_fini(tp, tpr);
7532         return -ENOMEM;
7533 }
7534
7535 /* Free up pending packets in all rx/tx rings.
7536  *
7537  * The chip has been shut down and the driver detached from
7538  * the networking, so no interrupts or new tx packets will
7539  * end up in the driver.  tp->{tx,}lock is not held and we are not
7540  * in an interrupt context and thus may sleep.
7541  */
7542 static void tg3_free_rings(struct tg3 *tp)
7543 {
7544         int i, j;
7545
7546         for (j = 0; j < tp->irq_cnt; j++) {
7547                 struct tg3_napi *tnapi = &tp->napi[j];
7548
7549                 tg3_rx_prodring_free(tp, &tnapi->prodring);
7550
7551                 if (!tnapi->tx_buffers)
7552                         continue;
7553
7554                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7555                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7556
7557                         if (!skb)
7558                                 continue;
7559
7560                         tg3_tx_skb_unmap(tnapi, i,
7561                                          skb_shinfo(skb)->nr_frags - 1);
7562
7563                         dev_kfree_skb_any(skb);
7564                 }
7565                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7566         }
7567 }
7568
7569 /* Initialize tx/rx rings for packet processing.
7570  *
7571  * The chip has been shut down and the driver detached from
7572  * the networking, so no interrupts or new tx packets will
7573  * end up in the driver.  tp->{tx,}lock are held and thus
7574  * we may not sleep.
7575  */
7576 static int tg3_init_rings(struct tg3 *tp)
7577 {
7578         int i;
7579
7580         /* Free up all the SKBs. */
7581         tg3_free_rings(tp);
7582
7583         for (i = 0; i < tp->irq_cnt; i++) {
7584                 struct tg3_napi *tnapi = &tp->napi[i];
7585
7586                 tnapi->last_tag = 0;
7587                 tnapi->last_irq_tag = 0;
7588                 tnapi->hw_status->status = 0;
7589                 tnapi->hw_status->status_tag = 0;
7590                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7591
7592                 tnapi->tx_prod = 0;
7593                 tnapi->tx_cons = 0;
7594                 if (tnapi->tx_ring)
7595                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7596
7597                 tnapi->rx_rcb_ptr = 0;
7598                 if (tnapi->rx_rcb)
7599                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7600
7601                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7602                         tg3_free_rings(tp);
7603                         return -ENOMEM;
7604                 }
7605         }
7606
7607         return 0;
7608 }
7609
7610 static void tg3_mem_tx_release(struct tg3 *tp)
7611 {
7612         int i;
7613
7614         for (i = 0; i < tp->irq_max; i++) {
7615                 struct tg3_napi *tnapi = &tp->napi[i];
7616
7617                 if (tnapi->tx_ring) {
7618                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7619                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
7620                         tnapi->tx_ring = NULL;
7621                 }
7622
7623                 kfree(tnapi->tx_buffers);
7624                 tnapi->tx_buffers = NULL;
7625         }
7626 }
7627
7628 static int tg3_mem_tx_acquire(struct tg3 *tp)
7629 {
7630         int i;
7631         struct tg3_napi *tnapi = &tp->napi[0];
7632
7633         /* If multivector TSS is enabled, vector 0 does not handle
7634          * tx interrupts.  Don't allocate any resources for it.
7635          */
7636         if (tg3_flag(tp, ENABLE_TSS))
7637                 tnapi++;
7638
7639         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
7640                 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
7641                                             TG3_TX_RING_SIZE, GFP_KERNEL);
7642                 if (!tnapi->tx_buffers)
7643                         goto err_out;
7644
7645                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7646                                                     TG3_TX_RING_BYTES,
7647                                                     &tnapi->tx_desc_mapping,
7648                                                     GFP_KERNEL);
7649                 if (!tnapi->tx_ring)
7650                         goto err_out;
7651         }
7652
7653         return 0;
7654
7655 err_out:
7656         tg3_mem_tx_release(tp);
7657         return -ENOMEM;
7658 }
7659
7660 static void tg3_mem_rx_release(struct tg3 *tp)
7661 {
7662         int i;
7663
7664         for (i = 0; i < tp->irq_max; i++) {
7665                 struct tg3_napi *tnapi = &tp->napi[i];
7666
7667                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7668
7669                 if (!tnapi->rx_rcb)
7670                         continue;
7671
7672                 dma_free_coherent(&tp->pdev->dev,
7673                                   TG3_RX_RCB_RING_BYTES(tp),
7674                                   tnapi->rx_rcb,
7675                                   tnapi->rx_rcb_mapping);
7676                 tnapi->rx_rcb = NULL;
7677         }
7678 }
7679
7680 static int tg3_mem_rx_acquire(struct tg3 *tp)
7681 {
7682         unsigned int i, limit;
7683
7684         limit = tp->rxq_cnt;
7685
7686         /* If RSS is enabled, we need a (dummy) producer ring
7687          * set on vector zero.  This is the true hw prodring.
7688          */
7689         if (tg3_flag(tp, ENABLE_RSS))
7690                 limit++;
7691
7692         for (i = 0; i < limit; i++) {
7693                 struct tg3_napi *tnapi = &tp->napi[i];
7694
7695                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7696                         goto err_out;
7697
7698                 /* If multivector RSS is enabled, vector 0
7699                  * does not handle rx or tx interrupts.
7700                  * Don't allocate any resources for it.
7701                  */
7702                 if (!i && tg3_flag(tp, ENABLE_RSS))
7703                         continue;
7704
7705                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7706                                                    TG3_RX_RCB_RING_BYTES(tp),
7707                                                    &tnapi->rx_rcb_mapping,
7708                                                    GFP_KERNEL);
7709                 if (!tnapi->rx_rcb)
7710                         goto err_out;
7711
7712                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7713         }
7714
7715         return 0;
7716
7717 err_out:
7718         tg3_mem_rx_release(tp);
7719         return -ENOMEM;
7720 }
7721
7722 /*
7723  * Must not be invoked with interrupt sources disabled and
7724  * the hardware shutdown down.
7725  */
7726 static void tg3_free_consistent(struct tg3 *tp)
7727 {
7728         int i;
7729
7730         for (i = 0; i < tp->irq_cnt; i++) {
7731                 struct tg3_napi *tnapi = &tp->napi[i];
7732
7733                 if (tnapi->hw_status) {
7734                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7735                                           tnapi->hw_status,
7736                                           tnapi->status_mapping);
7737                         tnapi->hw_status = NULL;
7738                 }
7739         }
7740
7741         tg3_mem_rx_release(tp);
7742         tg3_mem_tx_release(tp);
7743
7744         if (tp->hw_stats) {
7745                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7746                                   tp->hw_stats, tp->stats_mapping);
7747                 tp->hw_stats = NULL;
7748         }
7749 }
7750
7751 /*
7752  * Must not be invoked with interrupt sources disabled and
7753  * the hardware shutdown down.  Can sleep.
7754  */
7755 static int tg3_alloc_consistent(struct tg3 *tp)
7756 {
7757         int i;
7758
7759         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7760                                           sizeof(struct tg3_hw_stats),
7761                                           &tp->stats_mapping,
7762                                           GFP_KERNEL);
7763         if (!tp->hw_stats)
7764                 goto err_out;
7765
7766         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7767
7768         for (i = 0; i < tp->irq_cnt; i++) {
7769                 struct tg3_napi *tnapi = &tp->napi[i];
7770                 struct tg3_hw_status *sblk;
7771
7772                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7773                                                       TG3_HW_STATUS_SIZE,
7774                                                       &tnapi->status_mapping,
7775                                                       GFP_KERNEL);
7776                 if (!tnapi->hw_status)
7777                         goto err_out;
7778
7779                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7780                 sblk = tnapi->hw_status;
7781
7782                 if (tg3_flag(tp, ENABLE_RSS)) {
7783                         u16 *prodptr = 0;
7784
7785                         /*
7786                          * When RSS is enabled, the status block format changes
7787                          * slightly.  The "rx_jumbo_consumer", "reserved",
7788                          * and "rx_mini_consumer" members get mapped to the
7789                          * other three rx return ring producer indexes.
7790                          */
7791                         switch (i) {
7792                         case 1:
7793                                 prodptr = &sblk->idx[0].rx_producer;
7794                                 break;
7795                         case 2:
7796                                 prodptr = &sblk->rx_jumbo_consumer;
7797                                 break;
7798                         case 3:
7799                                 prodptr = &sblk->reserved;
7800                                 break;
7801                         case 4:
7802                                 prodptr = &sblk->rx_mini_consumer;
7803                                 break;
7804                         }
7805                         tnapi->rx_rcb_prod_idx = prodptr;
7806                 } else {
7807                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7808                 }
7809         }
7810
7811         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
7812                 goto err_out;
7813
7814         return 0;
7815
7816 err_out:
7817         tg3_free_consistent(tp);
7818         return -ENOMEM;
7819 }
7820
7821 #define MAX_WAIT_CNT 1000
7822
7823 /* To stop a block, clear the enable bit and poll till it
7824  * clears.  tp->lock is held.
7825  */
7826 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7827 {
7828         unsigned int i;
7829         u32 val;
7830
7831         if (tg3_flag(tp, 5705_PLUS)) {
7832                 switch (ofs) {
7833                 case RCVLSC_MODE:
7834                 case DMAC_MODE:
7835                 case MBFREE_MODE:
7836                 case BUFMGR_MODE:
7837                 case MEMARB_MODE:
7838                         /* We can't enable/disable these bits of the
7839                          * 5705/5750, just say success.
7840                          */
7841                         return 0;
7842
7843                 default:
7844                         break;
7845                 }
7846         }
7847
7848         val = tr32(ofs);
7849         val &= ~enable_bit;
7850         tw32_f(ofs, val);
7851
7852         for (i = 0; i < MAX_WAIT_CNT; i++) {
7853                 udelay(100);
7854                 val = tr32(ofs);
7855                 if ((val & enable_bit) == 0)
7856                         break;
7857         }
7858
7859         if (i == MAX_WAIT_CNT && !silent) {
7860                 dev_err(&tp->pdev->dev,
7861                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7862                         ofs, enable_bit);
7863                 return -ENODEV;
7864         }
7865
7866         return 0;
7867 }
7868
7869 /* tp->lock is held. */
7870 static int tg3_abort_hw(struct tg3 *tp, int silent)
7871 {
7872         int i, err;
7873
7874         tg3_disable_ints(tp);
7875
7876         tp->rx_mode &= ~RX_MODE_ENABLE;
7877         tw32_f(MAC_RX_MODE, tp->rx_mode);
7878         udelay(10);
7879
7880         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7881         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7882         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7883         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7884         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7885         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7886
7887         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7888         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7889         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7890         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7891         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7892         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7893         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7894
7895         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7896         tw32_f(MAC_MODE, tp->mac_mode);
7897         udelay(40);
7898
7899         tp->tx_mode &= ~TX_MODE_ENABLE;
7900         tw32_f(MAC_TX_MODE, tp->tx_mode);
7901
7902         for (i = 0; i < MAX_WAIT_CNT; i++) {
7903                 udelay(100);
7904                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7905                         break;
7906         }
7907         if (i >= MAX_WAIT_CNT) {
7908                 dev_err(&tp->pdev->dev,
7909                         "%s timed out, TX_MODE_ENABLE will not clear "
7910                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7911                 err |= -ENODEV;
7912         }
7913
7914         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7915         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7916         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7917
7918         tw32(FTQ_RESET, 0xffffffff);
7919         tw32(FTQ_RESET, 0x00000000);
7920
7921         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7922         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7923
7924         for (i = 0; i < tp->irq_cnt; i++) {
7925                 struct tg3_napi *tnapi = &tp->napi[i];
7926                 if (tnapi->hw_status)
7927                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7928         }
7929
7930         return err;
7931 }
7932
7933 /* Save PCI command register before chip reset */
7934 static void tg3_save_pci_state(struct tg3 *tp)
7935 {
7936         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7937 }
7938
7939 /* Restore PCI state after chip reset */
7940 static void tg3_restore_pci_state(struct tg3 *tp)
7941 {
7942         u32 val;
7943
7944         /* Re-enable indirect register accesses. */
7945         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7946                                tp->misc_host_ctrl);
7947
7948         /* Set MAX PCI retry to zero. */
7949         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7950         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7951             tg3_flag(tp, PCIX_MODE))
7952                 val |= PCISTATE_RETRY_SAME_DMA;
7953         /* Allow reads and writes to the APE register and memory space. */
7954         if (tg3_flag(tp, ENABLE_APE))
7955                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7956                        PCISTATE_ALLOW_APE_SHMEM_WR |
7957                        PCISTATE_ALLOW_APE_PSPACE_WR;
7958         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7959
7960         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7961
7962         if (!tg3_flag(tp, PCI_EXPRESS)) {
7963                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7964                                       tp->pci_cacheline_sz);
7965                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7966                                       tp->pci_lat_timer);
7967         }
7968
7969         /* Make sure PCI-X relaxed ordering bit is clear. */
7970         if (tg3_flag(tp, PCIX_MODE)) {
7971                 u16 pcix_cmd;
7972
7973                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7974                                      &pcix_cmd);
7975                 pcix_cmd &= ~PCI_X_CMD_ERO;
7976                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7977                                       pcix_cmd);
7978         }
7979
7980         if (tg3_flag(tp, 5780_CLASS)) {
7981
7982                 /* Chip reset on 5780 will reset MSI enable bit,
7983                  * so need to restore it.
7984                  */
7985                 if (tg3_flag(tp, USING_MSI)) {
7986                         u16 ctrl;
7987
7988                         pci_read_config_word(tp->pdev,
7989                                              tp->msi_cap + PCI_MSI_FLAGS,
7990                                              &ctrl);
7991                         pci_write_config_word(tp->pdev,
7992                                               tp->msi_cap + PCI_MSI_FLAGS,
7993                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7994                         val = tr32(MSGINT_MODE);
7995                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7996                 }
7997         }
7998 }
7999
8000 /* tp->lock is held. */
8001 static int tg3_chip_reset(struct tg3 *tp)
8002 {
8003         u32 val;
8004         void (*write_op)(struct tg3 *, u32, u32);
8005         int i, err;
8006
8007         tg3_nvram_lock(tp);
8008
8009         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8010
8011         /* No matching tg3_nvram_unlock() after this because
8012          * chip reset below will undo the nvram lock.
8013          */
8014         tp->nvram_lock_cnt = 0;
8015
8016         /* GRC_MISC_CFG core clock reset will clear the memory
8017          * enable bit in PCI register 4 and the MSI enable bit
8018          * on some chips, so we save relevant registers here.
8019          */
8020         tg3_save_pci_state(tp);
8021
8022         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8023             tg3_flag(tp, 5755_PLUS))
8024                 tw32(GRC_FASTBOOT_PC, 0);
8025
8026         /*
8027          * We must avoid the readl() that normally takes place.
8028          * It locks machines, causes machine checks, and other
8029          * fun things.  So, temporarily disable the 5701
8030          * hardware workaround, while we do the reset.
8031          */
8032         write_op = tp->write32;
8033         if (write_op == tg3_write_flush_reg32)
8034                 tp->write32 = tg3_write32;
8035
8036         /* Prevent the irq handler from reading or writing PCI registers
8037          * during chip reset when the memory enable bit in the PCI command
8038          * register may be cleared.  The chip does not generate interrupt
8039          * at this time, but the irq handler may still be called due to irq
8040          * sharing or irqpoll.
8041          */
8042         tg3_flag_set(tp, CHIP_RESETTING);
8043         for (i = 0; i < tp->irq_cnt; i++) {
8044                 struct tg3_napi *tnapi = &tp->napi[i];
8045                 if (tnapi->hw_status) {
8046                         tnapi->hw_status->status = 0;
8047                         tnapi->hw_status->status_tag = 0;
8048                 }
8049                 tnapi->last_tag = 0;
8050                 tnapi->last_irq_tag = 0;
8051         }
8052         smp_mb();
8053
8054         for (i = 0; i < tp->irq_cnt; i++)
8055                 synchronize_irq(tp->napi[i].irq_vec);
8056
8057         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8058                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8059                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8060         }
8061
8062         /* do the reset */
8063         val = GRC_MISC_CFG_CORECLK_RESET;
8064
8065         if (tg3_flag(tp, PCI_EXPRESS)) {
8066                 /* Force PCIe 1.0a mode */
8067                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8068                     !tg3_flag(tp, 57765_PLUS) &&
8069                     tr32(TG3_PCIE_PHY_TSTCTL) ==
8070                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8071                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8072
8073                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
8074                         tw32(GRC_MISC_CFG, (1 << 29));
8075                         val |= (1 << 29);
8076                 }
8077         }
8078
8079         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8080                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8081                 tw32(GRC_VCPU_EXT_CTRL,
8082                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8083         }
8084
8085         /* Manage gphy power for all CPMU absent PCIe devices. */
8086         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8087                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8088
8089         tw32(GRC_MISC_CFG, val);
8090
8091         /* restore 5701 hardware bug workaround write method */
8092         tp->write32 = write_op;
8093
8094         /* Unfortunately, we have to delay before the PCI read back.
8095          * Some 575X chips even will not respond to a PCI cfg access
8096          * when the reset command is given to the chip.
8097          *
8098          * How do these hardware designers expect things to work
8099          * properly if the PCI write is posted for a long period
8100          * of time?  It is always necessary to have some method by
8101          * which a register read back can occur to push the write
8102          * out which does the reset.
8103          *
8104          * For most tg3 variants the trick below was working.
8105          * Ho hum...
8106          */
8107         udelay(120);
8108
8109         /* Flush PCI posted writes.  The normal MMIO registers
8110          * are inaccessible at this time so this is the only
8111          * way to make this reliably (actually, this is no longer
8112          * the case, see above).  I tried to use indirect
8113          * register read/write but this upset some 5701 variants.
8114          */
8115         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8116
8117         udelay(120);
8118
8119         if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
8120                 u16 val16;
8121
8122                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
8123                         int i;
8124                         u32 cfg_val;
8125
8126                         /* Wait for link training to complete.  */
8127                         for (i = 0; i < 5000; i++)
8128                                 udelay(100);
8129
8130                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8131                         pci_write_config_dword(tp->pdev, 0xc4,
8132                                                cfg_val | (1 << 15));
8133                 }
8134
8135                 /* Clear the "no snoop" and "relaxed ordering" bits. */
8136                 pci_read_config_word(tp->pdev,
8137                                      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
8138                                      &val16);
8139                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
8140                            PCI_EXP_DEVCTL_NOSNOOP_EN);
8141                 /*
8142                  * Older PCIe devices only support the 128 byte
8143                  * MPS setting.  Enforce the restriction.
8144                  */
8145                 if (!tg3_flag(tp, CPMU_PRESENT))
8146                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
8147                 pci_write_config_word(tp->pdev,
8148                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
8149                                       val16);
8150
8151                 /* Clear error status */
8152                 pci_write_config_word(tp->pdev,
8153                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
8154                                       PCI_EXP_DEVSTA_CED |
8155                                       PCI_EXP_DEVSTA_NFED |
8156                                       PCI_EXP_DEVSTA_FED |
8157                                       PCI_EXP_DEVSTA_URD);
8158         }
8159
8160         tg3_restore_pci_state(tp);
8161
8162         tg3_flag_clear(tp, CHIP_RESETTING);
8163         tg3_flag_clear(tp, ERROR_PROCESSED);
8164
8165         val = 0;
8166         if (tg3_flag(tp, 5780_CLASS))
8167                 val = tr32(MEMARB_MODE);
8168         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8169
8170         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
8171                 tg3_stop_fw(tp);
8172                 tw32(0x5000, 0x400);
8173         }
8174
8175         tw32(GRC_MODE, tp->grc_mode);
8176
8177         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
8178                 val = tr32(0xc4);
8179
8180                 tw32(0xc4, val | (1 << 15));
8181         }
8182
8183         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8184             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8185                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8186                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
8187                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8188                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8189         }
8190
8191         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8192                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8193                 val = tp->mac_mode;
8194         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8195                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8196                 val = tp->mac_mode;
8197         } else
8198                 val = 0;
8199
8200         tw32_f(MAC_MODE, val);
8201         udelay(40);
8202
8203         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8204
8205         err = tg3_poll_fw(tp);
8206         if (err)
8207                 return err;
8208
8209         tg3_mdio_start(tp);
8210
8211         if (tg3_flag(tp, PCI_EXPRESS) &&
8212             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8213             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8214             !tg3_flag(tp, 57765_PLUS)) {
8215                 val = tr32(0x7c00);
8216
8217                 tw32(0x7c00, val | (1 << 25));
8218         }
8219
8220         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8221                 val = tr32(TG3_CPMU_CLCK_ORIDE);
8222                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8223         }
8224
8225         /* Reprobe ASF enable state.  */
8226         tg3_flag_clear(tp, ENABLE_ASF);
8227         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8228         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8229         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8230                 u32 nic_cfg;
8231
8232                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8233                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8234                         tg3_flag_set(tp, ENABLE_ASF);
8235                         tp->last_event_jiffies = jiffies;
8236                         if (tg3_flag(tp, 5750_PLUS))
8237                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8238                 }
8239         }
8240
8241         return 0;
8242 }
8243
8244 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8245 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8246
8247 /* tp->lock is held. */
8248 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8249 {
8250         int err;
8251
8252         tg3_stop_fw(tp);
8253
8254         tg3_write_sig_pre_reset(tp, kind);
8255
8256         tg3_abort_hw(tp, silent);
8257         err = tg3_chip_reset(tp);
8258
8259         __tg3_set_mac_addr(tp, 0);
8260
8261         tg3_write_sig_legacy(tp, kind);
8262         tg3_write_sig_post_reset(tp, kind);
8263
8264         if (tp->hw_stats) {
8265                 /* Save the stats across chip resets... */
8266                 tg3_get_nstats(tp, &tp->net_stats_prev);
8267                 tg3_get_estats(tp, &tp->estats_prev);
8268
8269                 /* And make sure the next sample is new data */
8270                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8271         }
8272
8273         if (err)
8274                 return err;
8275
8276         return 0;
8277 }
8278
8279 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8280 {
8281         struct tg3 *tp = netdev_priv(dev);
8282         struct sockaddr *addr = p;
8283         int err = 0, skip_mac_1 = 0;
8284
8285         if (!is_valid_ether_addr(addr->sa_data))
8286                 return -EADDRNOTAVAIL;
8287
8288         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8289
8290         if (!netif_running(dev))
8291                 return 0;
8292
8293         if (tg3_flag(tp, ENABLE_ASF)) {
8294                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8295
8296                 addr0_high = tr32(MAC_ADDR_0_HIGH);
8297                 addr0_low = tr32(MAC_ADDR_0_LOW);
8298                 addr1_high = tr32(MAC_ADDR_1_HIGH);
8299                 addr1_low = tr32(MAC_ADDR_1_LOW);
8300
8301                 /* Skip MAC addr 1 if ASF is using it. */
8302                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8303                     !(addr1_high == 0 && addr1_low == 0))
8304                         skip_mac_1 = 1;
8305         }
8306         spin_lock_bh(&tp->lock);
8307         __tg3_set_mac_addr(tp, skip_mac_1);
8308         spin_unlock_bh(&tp->lock);
8309
8310         return err;
8311 }
8312
8313 /* tp->lock is held. */
8314 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8315                            dma_addr_t mapping, u32 maxlen_flags,
8316                            u32 nic_addr)
8317 {
8318         tg3_write_mem(tp,
8319                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8320                       ((u64) mapping >> 32));
8321         tg3_write_mem(tp,
8322                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8323                       ((u64) mapping & 0xffffffff));
8324         tg3_write_mem(tp,
8325                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8326                        maxlen_flags);
8327
8328         if (!tg3_flag(tp, 5705_PLUS))
8329                 tg3_write_mem(tp,
8330                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8331                               nic_addr);
8332 }
8333
8334
8335 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8336 {
8337         int i = 0;
8338
8339         if (!tg3_flag(tp, ENABLE_TSS)) {
8340                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8341                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8342                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8343         } else {
8344                 tw32(HOSTCC_TXCOL_TICKS, 0);
8345                 tw32(HOSTCC_TXMAX_FRAMES, 0);
8346                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8347
8348                 for (; i < tp->txq_cnt; i++) {
8349                         u32 reg;
8350
8351                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8352                         tw32(reg, ec->tx_coalesce_usecs);
8353                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8354                         tw32(reg, ec->tx_max_coalesced_frames);
8355                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8356                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8357                 }
8358         }
8359
8360         for (; i < tp->irq_max - 1; i++) {
8361                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8362                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8363                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8364         }
8365 }
8366
8367 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8368 {
8369         int i = 0;
8370         u32 limit = tp->rxq_cnt;
8371
8372         if (!tg3_flag(tp, ENABLE_RSS)) {
8373                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8374                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8375                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8376                 limit--;
8377         } else {
8378                 tw32(HOSTCC_RXCOL_TICKS, 0);
8379                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8380                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8381         }
8382
8383         for (; i < limit; i++) {
8384                 u32 reg;
8385
8386                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8387                 tw32(reg, ec->rx_coalesce_usecs);
8388                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8389                 tw32(reg, ec->rx_max_coalesced_frames);
8390                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8391                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8392         }
8393
8394         for (; i < tp->irq_max - 1; i++) {
8395                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8396                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8397                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8398         }
8399 }
8400
8401 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8402 {
8403         tg3_coal_tx_init(tp, ec);
8404         tg3_coal_rx_init(tp, ec);
8405
8406         if (!tg3_flag(tp, 5705_PLUS)) {
8407                 u32 val = ec->stats_block_coalesce_usecs;
8408
8409                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8410                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8411
8412                 if (!netif_carrier_ok(tp->dev))
8413                         val = 0;
8414
8415                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8416         }
8417 }
8418
8419 /* tp->lock is held. */
8420 static void tg3_rings_reset(struct tg3 *tp)
8421 {
8422         int i;
8423         u32 stblk, txrcb, rxrcb, limit;
8424         struct tg3_napi *tnapi = &tp->napi[0];
8425
8426         /* Disable all transmit rings but the first. */
8427         if (!tg3_flag(tp, 5705_PLUS))
8428                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8429         else if (tg3_flag(tp, 5717_PLUS))
8430                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8431         else if (tg3_flag(tp, 57765_CLASS))
8432                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8433         else
8434                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8435
8436         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8437              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8438                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8439                               BDINFO_FLAGS_DISABLED);
8440
8441
8442         /* Disable all receive return rings but the first. */
8443         if (tg3_flag(tp, 5717_PLUS))
8444                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8445         else if (!tg3_flag(tp, 5705_PLUS))
8446                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8447         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8448                  tg3_flag(tp, 57765_CLASS))
8449                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8450         else
8451                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8452
8453         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8454              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8455                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8456                               BDINFO_FLAGS_DISABLED);
8457
8458         /* Disable interrupts */
8459         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8460         tp->napi[0].chk_msi_cnt = 0;
8461         tp->napi[0].last_rx_cons = 0;
8462         tp->napi[0].last_tx_cons = 0;
8463
8464         /* Zero mailbox registers. */
8465         if (tg3_flag(tp, SUPPORT_MSIX)) {
8466                 for (i = 1; i < tp->irq_max; i++) {
8467                         tp->napi[i].tx_prod = 0;
8468                         tp->napi[i].tx_cons = 0;
8469                         if (tg3_flag(tp, ENABLE_TSS))
8470                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8471                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8472                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8473                         tp->napi[i].chk_msi_cnt = 0;
8474                         tp->napi[i].last_rx_cons = 0;
8475                         tp->napi[i].last_tx_cons = 0;
8476                 }
8477                 if (!tg3_flag(tp, ENABLE_TSS))
8478                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8479         } else {
8480                 tp->napi[0].tx_prod = 0;
8481                 tp->napi[0].tx_cons = 0;
8482                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8483                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8484         }
8485
8486         /* Make sure the NIC-based send BD rings are disabled. */
8487         if (!tg3_flag(tp, 5705_PLUS)) {
8488                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8489                 for (i = 0; i < 16; i++)
8490                         tw32_tx_mbox(mbox + i * 8, 0);
8491         }
8492
8493         txrcb = NIC_SRAM_SEND_RCB;
8494         rxrcb = NIC_SRAM_RCV_RET_RCB;
8495
8496         /* Clear status block in ram. */
8497         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8498
8499         /* Set status block DMA address */
8500         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8501              ((u64) tnapi->status_mapping >> 32));
8502         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8503              ((u64) tnapi->status_mapping & 0xffffffff));
8504
8505         if (tnapi->tx_ring) {
8506                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8507                                (TG3_TX_RING_SIZE <<
8508                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8509                                NIC_SRAM_TX_BUFFER_DESC);
8510                 txrcb += TG3_BDINFO_SIZE;
8511         }
8512
8513         if (tnapi->rx_rcb) {
8514                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8515                                (tp->rx_ret_ring_mask + 1) <<
8516                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8517                 rxrcb += TG3_BDINFO_SIZE;
8518         }
8519
8520         stblk = HOSTCC_STATBLCK_RING1;
8521
8522         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8523                 u64 mapping = (u64)tnapi->status_mapping;
8524                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8525                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8526
8527                 /* Clear status block in ram. */
8528                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8529
8530                 if (tnapi->tx_ring) {
8531                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8532                                        (TG3_TX_RING_SIZE <<
8533                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8534                                        NIC_SRAM_TX_BUFFER_DESC);
8535                         txrcb += TG3_BDINFO_SIZE;
8536                 }
8537
8538                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8539                                ((tp->rx_ret_ring_mask + 1) <<
8540                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8541
8542                 stblk += 8;
8543                 rxrcb += TG3_BDINFO_SIZE;
8544         }
8545 }
8546
8547 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8548 {
8549         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8550
8551         if (!tg3_flag(tp, 5750_PLUS) ||
8552             tg3_flag(tp, 5780_CLASS) ||
8553             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8554             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8555             tg3_flag(tp, 57765_PLUS))
8556                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8557         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8558                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8559                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8560         else
8561                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8562
8563         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8564         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8565
8566         val = min(nic_rep_thresh, host_rep_thresh);
8567         tw32(RCVBDI_STD_THRESH, val);
8568
8569         if (tg3_flag(tp, 57765_PLUS))
8570                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8571
8572         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8573                 return;
8574
8575         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8576
8577         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8578
8579         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8580         tw32(RCVBDI_JUMBO_THRESH, val);
8581
8582         if (tg3_flag(tp, 57765_PLUS))
8583                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8584 }
8585
8586 static inline u32 calc_crc(unsigned char *buf, int len)
8587 {
8588         u32 reg;
8589         u32 tmp;
8590         int j, k;
8591
8592         reg = 0xffffffff;
8593
8594         for (j = 0; j < len; j++) {
8595                 reg ^= buf[j];
8596
8597                 for (k = 0; k < 8; k++) {
8598                         tmp = reg & 0x01;
8599
8600                         reg >>= 1;
8601
8602                         if (tmp)
8603                                 reg ^= 0xedb88320;
8604                 }
8605         }
8606
8607         return ~reg;
8608 }
8609
8610 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8611 {
8612         /* accept or reject all multicast frames */
8613         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8614         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8615         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8616         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8617 }
8618
8619 static void __tg3_set_rx_mode(struct net_device *dev)
8620 {
8621         struct tg3 *tp = netdev_priv(dev);
8622         u32 rx_mode;
8623
8624         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8625                                   RX_MODE_KEEP_VLAN_TAG);
8626
8627 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8628         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8629          * flag clear.
8630          */
8631         if (!tg3_flag(tp, ENABLE_ASF))
8632                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8633 #endif
8634
8635         if (dev->flags & IFF_PROMISC) {
8636                 /* Promiscuous mode. */
8637                 rx_mode |= RX_MODE_PROMISC;
8638         } else if (dev->flags & IFF_ALLMULTI) {
8639                 /* Accept all multicast. */
8640                 tg3_set_multi(tp, 1);
8641         } else if (netdev_mc_empty(dev)) {
8642                 /* Reject all multicast. */
8643                 tg3_set_multi(tp, 0);
8644         } else {
8645                 /* Accept one or more multicast(s). */
8646                 struct netdev_hw_addr *ha;
8647                 u32 mc_filter[4] = { 0, };
8648                 u32 regidx;
8649                 u32 bit;
8650                 u32 crc;
8651
8652                 netdev_for_each_mc_addr(ha, dev) {
8653                         crc = calc_crc(ha->addr, ETH_ALEN);
8654                         bit = ~crc & 0x7f;
8655                         regidx = (bit & 0x60) >> 5;
8656                         bit &= 0x1f;
8657                         mc_filter[regidx] |= (1 << bit);
8658                 }
8659
8660                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8661                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8662                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8663                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8664         }
8665
8666         if (rx_mode != tp->rx_mode) {
8667                 tp->rx_mode = rx_mode;
8668                 tw32_f(MAC_RX_MODE, rx_mode);
8669                 udelay(10);
8670         }
8671 }
8672
8673 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
8674 {
8675         int i;
8676
8677         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8678                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
8679 }
8680
8681 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8682 {
8683         int i;
8684
8685         if (!tg3_flag(tp, SUPPORT_MSIX))
8686                 return;
8687
8688         if (tp->irq_cnt <= 2) {
8689                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8690                 return;
8691         }
8692
8693         /* Validate table against current IRQ count */
8694         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8695                 if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8696                         break;
8697         }
8698
8699         if (i != TG3_RSS_INDIR_TBL_SIZE)
8700                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
8701 }
8702
8703 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8704 {
8705         int i = 0;
8706         u32 reg = MAC_RSS_INDIR_TBL_0;
8707
8708         while (i < TG3_RSS_INDIR_TBL_SIZE) {
8709                 u32 val = tp->rss_ind_tbl[i];
8710                 i++;
8711                 for (; i % 8; i++) {
8712                         val <<= 4;
8713                         val |= tp->rss_ind_tbl[i];
8714                 }
8715                 tw32(reg, val);
8716                 reg += 4;
8717         }
8718 }
8719
8720 /* tp->lock is held. */
8721 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8722 {
8723         u32 val, rdmac_mode;
8724         int i, err, limit;
8725         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8726
8727         tg3_disable_ints(tp);
8728
8729         tg3_stop_fw(tp);
8730
8731         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8732
8733         if (tg3_flag(tp, INIT_COMPLETE))
8734                 tg3_abort_hw(tp, 1);
8735
8736         /* Enable MAC control of LPI */
8737         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8738                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8739                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8740                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
8741
8742                 tw32_f(TG3_CPMU_EEE_CTRL,
8743                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8744
8745                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8746                       TG3_CPMU_EEEMD_LPI_IN_TX |
8747                       TG3_CPMU_EEEMD_LPI_IN_RX |
8748                       TG3_CPMU_EEEMD_EEE_ENABLE;
8749
8750                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8751                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8752
8753                 if (tg3_flag(tp, ENABLE_APE))
8754                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8755
8756                 tw32_f(TG3_CPMU_EEE_MODE, val);
8757
8758                 tw32_f(TG3_CPMU_EEE_DBTMR1,
8759                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8760                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8761
8762                 tw32_f(TG3_CPMU_EEE_DBTMR2,
8763                        TG3_CPMU_DBTMR2_APE_TX_2047US |
8764                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8765         }
8766
8767         if (reset_phy)
8768                 tg3_phy_reset(tp);
8769
8770         err = tg3_chip_reset(tp);
8771         if (err)
8772                 return err;
8773
8774         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8775
8776         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8777                 val = tr32(TG3_CPMU_CTRL);
8778                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8779                 tw32(TG3_CPMU_CTRL, val);
8780
8781                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8782                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8783                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8784                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8785
8786                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8787                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8788                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8789                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8790
8791                 val = tr32(TG3_CPMU_HST_ACC);
8792                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8793                 val |= CPMU_HST_ACC_MACCLK_6_25;
8794                 tw32(TG3_CPMU_HST_ACC, val);
8795         }
8796
8797         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8798                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8799                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8800                        PCIE_PWR_MGMT_L1_THRESH_4MS;
8801                 tw32(PCIE_PWR_MGMT_THRESH, val);
8802
8803                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8804                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8805
8806                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8807
8808                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8809                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8810         }
8811
8812         if (tg3_flag(tp, L1PLLPD_EN)) {
8813                 u32 grc_mode = tr32(GRC_MODE);
8814
8815                 /* Access the lower 1K of PL PCIE block registers. */
8816                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8817                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8818
8819                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8820                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8821                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8822
8823                 tw32(GRC_MODE, grc_mode);
8824         }
8825
8826         if (tg3_flag(tp, 57765_CLASS)) {
8827                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8828                         u32 grc_mode = tr32(GRC_MODE);
8829
8830                         /* Access the lower 1K of PL PCIE block registers. */
8831                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8832                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8833
8834                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8835                                    TG3_PCIE_PL_LO_PHYCTL5);
8836                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8837                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8838
8839                         tw32(GRC_MODE, grc_mode);
8840                 }
8841
8842                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8843                         u32 grc_mode = tr32(GRC_MODE);
8844
8845                         /* Access the lower 1K of DL PCIE block registers. */
8846                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8847                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8848
8849                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8850                                    TG3_PCIE_DL_LO_FTSMAX);
8851                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8852                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8853                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8854
8855                         tw32(GRC_MODE, grc_mode);
8856                 }
8857
8858                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8859                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8860                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8861                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8862         }
8863
8864         /* This works around an issue with Athlon chipsets on
8865          * B3 tigon3 silicon.  This bit has no effect on any
8866          * other revision.  But do not set this on PCI Express
8867          * chips and don't even touch the clocks if the CPMU is present.
8868          */
8869         if (!tg3_flag(tp, CPMU_PRESENT)) {
8870                 if (!tg3_flag(tp, PCI_EXPRESS))
8871                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8872                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8873         }
8874
8875         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8876             tg3_flag(tp, PCIX_MODE)) {
8877                 val = tr32(TG3PCI_PCISTATE);
8878                 val |= PCISTATE_RETRY_SAME_DMA;
8879                 tw32(TG3PCI_PCISTATE, val);
8880         }
8881
8882         if (tg3_flag(tp, ENABLE_APE)) {
8883                 /* Allow reads and writes to the
8884                  * APE register and memory space.
8885                  */
8886                 val = tr32(TG3PCI_PCISTATE);
8887                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8888                        PCISTATE_ALLOW_APE_SHMEM_WR |
8889                        PCISTATE_ALLOW_APE_PSPACE_WR;
8890                 tw32(TG3PCI_PCISTATE, val);
8891         }
8892
8893         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8894                 /* Enable some hw fixes.  */
8895                 val = tr32(TG3PCI_MSI_DATA);
8896                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8897                 tw32(TG3PCI_MSI_DATA, val);
8898         }
8899
8900         /* Descriptor ring init may make accesses to the
8901          * NIC SRAM area to setup the TX descriptors, so we
8902          * can only do this after the hardware has been
8903          * successfully reset.
8904          */
8905         err = tg3_init_rings(tp);
8906         if (err)
8907                 return err;
8908
8909         if (tg3_flag(tp, 57765_PLUS)) {
8910                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8911                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8912                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8913                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8914                 if (!tg3_flag(tp, 57765_CLASS) &&
8915                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8916                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8917                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8918         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8919                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8920                 /* This value is determined during the probe time DMA
8921                  * engine test, tg3_test_dma.
8922                  */
8923                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8924         }
8925
8926         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8927                           GRC_MODE_4X_NIC_SEND_RINGS |
8928                           GRC_MODE_NO_TX_PHDR_CSUM |
8929                           GRC_MODE_NO_RX_PHDR_CSUM);
8930         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8931
8932         /* Pseudo-header checksum is done by hardware logic and not
8933          * the offload processers, so make the chip do the pseudo-
8934          * header checksums on receive.  For transmit it is more
8935          * convenient to do the pseudo-header checksum in software
8936          * as Linux does that on transmit for us in all cases.
8937          */
8938         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8939
8940         tw32(GRC_MODE,
8941              tp->grc_mode |
8942              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8943
8944         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8945         val = tr32(GRC_MISC_CFG);
8946         val &= ~0xff;
8947         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8948         tw32(GRC_MISC_CFG, val);
8949
8950         /* Initialize MBUF/DESC pool. */
8951         if (tg3_flag(tp, 5750_PLUS)) {
8952                 /* Do nothing.  */
8953         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8954                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8955                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8956                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8957                 else
8958                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8959                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8960                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8961         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8962                 int fw_len;
8963
8964                 fw_len = tp->fw_len;
8965                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8966                 tw32(BUFMGR_MB_POOL_ADDR,
8967                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8968                 tw32(BUFMGR_MB_POOL_SIZE,
8969                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8970         }
8971
8972         if (tp->dev->mtu <= ETH_DATA_LEN) {
8973                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8974                      tp->bufmgr_config.mbuf_read_dma_low_water);
8975                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8976                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8977                 tw32(BUFMGR_MB_HIGH_WATER,
8978                      tp->bufmgr_config.mbuf_high_water);
8979         } else {
8980                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8981                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8982                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8983                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8984                 tw32(BUFMGR_MB_HIGH_WATER,
8985                      tp->bufmgr_config.mbuf_high_water_jumbo);
8986         }
8987         tw32(BUFMGR_DMA_LOW_WATER,
8988              tp->bufmgr_config.dma_low_water);
8989         tw32(BUFMGR_DMA_HIGH_WATER,
8990              tp->bufmgr_config.dma_high_water);
8991
8992         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8993         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8994                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8995         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8996             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8997             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8998                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8999         tw32(BUFMGR_MODE, val);
9000         for (i = 0; i < 2000; i++) {
9001                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9002                         break;
9003                 udelay(10);
9004         }
9005         if (i >= 2000) {
9006                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9007                 return -ENODEV;
9008         }
9009
9010         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
9011                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9012
9013         tg3_setup_rxbd_thresholds(tp);
9014
9015         /* Initialize TG3_BDINFO's at:
9016          *  RCVDBDI_STD_BD:     standard eth size rx ring
9017          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
9018          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
9019          *
9020          * like so:
9021          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
9022          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
9023          *                              ring attribute flags
9024          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
9025          *
9026          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9027          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9028          *
9029          * The size of each ring is fixed in the firmware, but the location is
9030          * configurable.
9031          */
9032         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9033              ((u64) tpr->rx_std_mapping >> 32));
9034         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9035              ((u64) tpr->rx_std_mapping & 0xffffffff));
9036         if (!tg3_flag(tp, 5717_PLUS))
9037                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9038                      NIC_SRAM_RX_BUFFER_DESC);
9039
9040         /* Disable the mini ring */
9041         if (!tg3_flag(tp, 5705_PLUS))
9042                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9043                      BDINFO_FLAGS_DISABLED);
9044
9045         /* Program the jumbo buffer descriptor ring control
9046          * blocks on those devices that have them.
9047          */
9048         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9049             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9050
9051                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9052                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9053                              ((u64) tpr->rx_jmb_mapping >> 32));
9054                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9055                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9056                         val = TG3_RX_JMB_RING_SIZE(tp) <<
9057                               BDINFO_FLAGS_MAXLEN_SHIFT;
9058                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9059                              val | BDINFO_FLAGS_USE_EXT_RECV);
9060                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9061                             tg3_flag(tp, 57765_CLASS))
9062                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9063                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9064                 } else {
9065                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9066                              BDINFO_FLAGS_DISABLED);
9067                 }
9068
9069                 if (tg3_flag(tp, 57765_PLUS)) {
9070                         val = TG3_RX_STD_RING_SIZE(tp);
9071                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9072                         val |= (TG3_RX_STD_DMA_SZ << 2);
9073                 } else
9074                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9075         } else
9076                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9077
9078         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9079
9080         tpr->rx_std_prod_idx = tp->rx_pending;
9081         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9082
9083         tpr->rx_jmb_prod_idx =
9084                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9085         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9086
9087         tg3_rings_reset(tp);
9088
9089         /* Initialize MAC address and backoff seed. */
9090         __tg3_set_mac_addr(tp, 0);
9091
9092         /* MTU + ethernet header + FCS + optional VLAN tag */
9093         tw32(MAC_RX_MTU_SIZE,
9094              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9095
9096         /* The slot time is changed by tg3_setup_phy if we
9097          * run at gigabit with half duplex.
9098          */
9099         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9100               (6 << TX_LENGTHS_IPG_SHIFT) |
9101               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9102
9103         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9104                 val |= tr32(MAC_TX_LENGTHS) &
9105                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
9106                         TX_LENGTHS_CNT_DWN_VAL_MSK);
9107
9108         tw32(MAC_TX_LENGTHS, val);
9109
9110         /* Receive rules. */
9111         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9112         tw32(RCVLPC_CONFIG, 0x0181);
9113
9114         /* Calculate RDMAC_MODE setting early, we need it to determine
9115          * the RCVLPC_STATE_ENABLE mask.
9116          */
9117         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9118                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9119                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9120                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9121                       RDMAC_MODE_LNGREAD_ENAB);
9122
9123         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
9124                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9125
9126         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9127             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9128             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9129                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9130                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9131                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9132
9133         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9134             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9135                 if (tg3_flag(tp, TSO_CAPABLE) &&
9136                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
9137                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9138                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9139                            !tg3_flag(tp, IS_5788)) {
9140                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9141                 }
9142         }
9143
9144         if (tg3_flag(tp, PCI_EXPRESS))
9145                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9146
9147         if (tg3_flag(tp, HW_TSO_1) ||
9148             tg3_flag(tp, HW_TSO_2) ||
9149             tg3_flag(tp, HW_TSO_3))
9150                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9151
9152         if (tg3_flag(tp, 57765_PLUS) ||
9153             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9154             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9155                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9156
9157         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9158                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9159
9160         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9161             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9162             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9163             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
9164             tg3_flag(tp, 57765_PLUS)) {
9165                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
9166                 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
9167                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9168                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9169                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9170                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9171                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9172                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9173                 }
9174                 tw32(TG3_RDMA_RSRVCTRL_REG,
9175                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9176         }
9177
9178         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9179             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9180                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9181                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
9182                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9183                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9184         }
9185
9186         /* Receive/send statistics. */
9187         if (tg3_flag(tp, 5750_PLUS)) {
9188                 val = tr32(RCVLPC_STATS_ENABLE);
9189                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9190                 tw32(RCVLPC_STATS_ENABLE, val);
9191         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9192                    tg3_flag(tp, TSO_CAPABLE)) {
9193                 val = tr32(RCVLPC_STATS_ENABLE);
9194                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9195                 tw32(RCVLPC_STATS_ENABLE, val);
9196         } else {
9197                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9198         }
9199         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9200         tw32(SNDDATAI_STATSENAB, 0xffffff);
9201         tw32(SNDDATAI_STATSCTRL,
9202              (SNDDATAI_SCTRL_ENABLE |
9203               SNDDATAI_SCTRL_FASTUPD));
9204
9205         /* Setup host coalescing engine. */
9206         tw32(HOSTCC_MODE, 0);
9207         for (i = 0; i < 2000; i++) {
9208                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9209                         break;
9210                 udelay(10);
9211         }
9212
9213         __tg3_set_coalesce(tp, &tp->coal);
9214
9215         if (!tg3_flag(tp, 5705_PLUS)) {
9216                 /* Status/statistics block address.  See tg3_timer,
9217                  * the tg3_periodic_fetch_stats call there, and
9218                  * tg3_get_stats to see how this works for 5705/5750 chips.
9219                  */
9220                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9221                      ((u64) tp->stats_mapping >> 32));
9222                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9223                      ((u64) tp->stats_mapping & 0xffffffff));
9224                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9225
9226                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9227
9228                 /* Clear statistics and status block memory areas */
9229                 for (i = NIC_SRAM_STATS_BLK;
9230                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9231                      i += sizeof(u32)) {
9232                         tg3_write_mem(tp, i, 0);
9233                         udelay(40);
9234                 }
9235         }
9236
9237         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9238
9239         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9240         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9241         if (!tg3_flag(tp, 5705_PLUS))
9242                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9243
9244         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9245                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9246                 /* reset to prevent losing 1st rx packet intermittently */
9247                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9248                 udelay(10);
9249         }
9250
9251         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9252                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9253                         MAC_MODE_FHDE_ENABLE;
9254         if (tg3_flag(tp, ENABLE_APE))
9255                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9256         if (!tg3_flag(tp, 5705_PLUS) &&
9257             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9258             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9259                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9260         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9261         udelay(40);
9262
9263         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9264          * If TG3_FLAG_IS_NIC is zero, we should read the
9265          * register to preserve the GPIO settings for LOMs. The GPIOs,
9266          * whether used as inputs or outputs, are set by boot code after
9267          * reset.
9268          */
9269         if (!tg3_flag(tp, IS_NIC)) {
9270                 u32 gpio_mask;
9271
9272                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9273                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9274                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9275
9276                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9277                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9278                                      GRC_LCLCTRL_GPIO_OUTPUT3;
9279
9280                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9281                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9282
9283                 tp->grc_local_ctrl &= ~gpio_mask;
9284                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9285
9286                 /* GPIO1 must be driven high for eeprom write protect */
9287                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9288                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9289                                                GRC_LCLCTRL_GPIO_OUTPUT1);
9290         }
9291         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9292         udelay(100);
9293
9294         if (tg3_flag(tp, USING_MSIX)) {
9295                 val = tr32(MSGINT_MODE);
9296                 val |= MSGINT_MODE_ENABLE;
9297                 if (tp->irq_cnt > 1)
9298                         val |= MSGINT_MODE_MULTIVEC_EN;
9299                 if (!tg3_flag(tp, 1SHOT_MSI))
9300                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9301                 tw32(MSGINT_MODE, val);
9302         }
9303
9304         if (!tg3_flag(tp, 5705_PLUS)) {
9305                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9306                 udelay(40);
9307         }
9308
9309         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9310                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9311                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9312                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9313                WDMAC_MODE_LNGREAD_ENAB);
9314
9315         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9316             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9317                 if (tg3_flag(tp, TSO_CAPABLE) &&
9318                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9319                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9320                         /* nothing */
9321                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9322                            !tg3_flag(tp, IS_5788)) {
9323                         val |= WDMAC_MODE_RX_ACCEL;
9324                 }
9325         }
9326
9327         /* Enable host coalescing bug fix */
9328         if (tg3_flag(tp, 5755_PLUS))
9329                 val |= WDMAC_MODE_STATUS_TAG_FIX;
9330
9331         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9332                 val |= WDMAC_MODE_BURST_ALL_DATA;
9333
9334         tw32_f(WDMAC_MODE, val);
9335         udelay(40);
9336
9337         if (tg3_flag(tp, PCIX_MODE)) {
9338                 u16 pcix_cmd;
9339
9340                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9341                                      &pcix_cmd);
9342                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9343                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9344                         pcix_cmd |= PCI_X_CMD_READ_2K;
9345                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9346                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9347                         pcix_cmd |= PCI_X_CMD_READ_2K;
9348                 }
9349                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9350                                       pcix_cmd);
9351         }
9352
9353         tw32_f(RDMAC_MODE, rdmac_mode);
9354         udelay(40);
9355
9356         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
9357                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9358                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9359                                 break;
9360                 }
9361                 if (i < TG3_NUM_RDMA_CHANNELS) {
9362                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9363                         val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9364                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9365                         tg3_flag_set(tp, 5719_RDMA_BUG);
9366                 }
9367         }
9368
9369         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9370         if (!tg3_flag(tp, 5705_PLUS))
9371                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9372
9373         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9374                 tw32(SNDDATAC_MODE,
9375                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9376         else
9377                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9378
9379         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9380         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9381         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9382         if (tg3_flag(tp, LRG_PROD_RING_CAP))
9383                 val |= RCVDBDI_MODE_LRG_RING_SZ;
9384         tw32(RCVDBDI_MODE, val);
9385         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9386         if (tg3_flag(tp, HW_TSO_1) ||
9387             tg3_flag(tp, HW_TSO_2) ||
9388             tg3_flag(tp, HW_TSO_3))
9389                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9390         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9391         if (tg3_flag(tp, ENABLE_TSS))
9392                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9393         tw32(SNDBDI_MODE, val);
9394         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9395
9396         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9397                 err = tg3_load_5701_a0_firmware_fix(tp);
9398                 if (err)
9399                         return err;
9400         }
9401
9402         if (tg3_flag(tp, TSO_CAPABLE)) {
9403                 err = tg3_load_tso_firmware(tp);
9404                 if (err)
9405                         return err;
9406         }
9407
9408         tp->tx_mode = TX_MODE_ENABLE;
9409
9410         if (tg3_flag(tp, 5755_PLUS) ||
9411             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9412                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9413
9414         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9415                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9416                 tp->tx_mode &= ~val;
9417                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9418         }
9419
9420         tw32_f(MAC_TX_MODE, tp->tx_mode);
9421         udelay(100);
9422
9423         if (tg3_flag(tp, ENABLE_RSS)) {
9424                 tg3_rss_write_indir_tbl(tp);
9425
9426                 /* Setup the "secret" hash key. */
9427                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9428                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9429                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9430                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9431                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9432                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9433                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9434                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9435                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9436                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9437         }
9438
9439         tp->rx_mode = RX_MODE_ENABLE;
9440         if (tg3_flag(tp, 5755_PLUS))
9441                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9442
9443         if (tg3_flag(tp, ENABLE_RSS))
9444                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9445                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
9446                                RX_MODE_RSS_IPV6_HASH_EN |
9447                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
9448                                RX_MODE_RSS_IPV4_HASH_EN |
9449                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
9450
9451         tw32_f(MAC_RX_MODE, tp->rx_mode);
9452         udelay(10);
9453
9454         tw32(MAC_LED_CTRL, tp->led_ctrl);
9455
9456         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9457         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9458                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9459                 udelay(10);
9460         }
9461         tw32_f(MAC_RX_MODE, tp->rx_mode);
9462         udelay(10);
9463
9464         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9465                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9466                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9467                         /* Set drive transmission level to 1.2V  */
9468                         /* only if the signal pre-emphasis bit is not set  */
9469                         val = tr32(MAC_SERDES_CFG);
9470                         val &= 0xfffff000;
9471                         val |= 0x880;
9472                         tw32(MAC_SERDES_CFG, val);
9473                 }
9474                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9475                         tw32(MAC_SERDES_CFG, 0x616000);
9476         }
9477
9478         /* Prevent chip from dropping frames when flow control
9479          * is enabled.
9480          */
9481         if (tg3_flag(tp, 57765_CLASS))
9482                 val = 1;
9483         else
9484                 val = 2;
9485         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9486
9487         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9488             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9489                 /* Use hardware link auto-negotiation */
9490                 tg3_flag_set(tp, HW_AUTONEG);
9491         }
9492
9493         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9494             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9495                 u32 tmp;
9496
9497                 tmp = tr32(SERDES_RX_CTRL);
9498                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9499                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9500                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9501                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9502         }
9503
9504         if (!tg3_flag(tp, USE_PHYLIB)) {
9505                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9506                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9507
9508                 err = tg3_setup_phy(tp, 0);
9509                 if (err)
9510                         return err;
9511
9512                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9513                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9514                         u32 tmp;
9515
9516                         /* Clear CRC stats. */
9517                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9518                                 tg3_writephy(tp, MII_TG3_TEST1,
9519                                              tmp | MII_TG3_TEST1_CRC_EN);
9520                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9521                         }
9522                 }
9523         }
9524
9525         __tg3_set_rx_mode(tp->dev);
9526
9527         /* Initialize receive rules. */
9528         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9529         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9530         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9531         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9532
9533         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9534                 limit = 8;
9535         else
9536                 limit = 16;
9537         if (tg3_flag(tp, ENABLE_ASF))
9538                 limit -= 4;
9539         switch (limit) {
9540         case 16:
9541                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9542         case 15:
9543                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9544         case 14:
9545                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9546         case 13:
9547                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9548         case 12:
9549                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9550         case 11:
9551                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9552         case 10:
9553                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9554         case 9:
9555                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9556         case 8:
9557                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9558         case 7:
9559                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9560         case 6:
9561                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9562         case 5:
9563                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9564         case 4:
9565                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9566         case 3:
9567                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9568         case 2:
9569         case 1:
9570
9571         default:
9572                 break;
9573         }
9574
9575         if (tg3_flag(tp, ENABLE_APE))
9576                 /* Write our heartbeat update interval to APE. */
9577                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9578                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9579
9580         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9581
9582         return 0;
9583 }
9584
9585 /* Called at device open time to get the chip ready for
9586  * packet processing.  Invoked with tp->lock held.
9587  */
9588 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9589 {
9590         tg3_switch_clocks(tp);
9591
9592         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9593
9594         return tg3_reset_hw(tp, reset_phy);
9595 }
9596
9597 #if IS_ENABLED(CONFIG_HWMON)
9598 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
9599 {
9600         int i;
9601
9602         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
9603                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
9604
9605                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
9606                 off += len;
9607
9608                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
9609                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
9610                         memset(ocir, 0, TG3_OCIR_LEN);
9611         }
9612 }
9613
9614 /* sysfs attributes for hwmon */
9615 static ssize_t tg3_show_temp(struct device *dev,
9616                              struct device_attribute *devattr, char *buf)
9617 {
9618         struct pci_dev *pdev = to_pci_dev(dev);
9619         struct net_device *netdev = pci_get_drvdata(pdev);
9620         struct tg3 *tp = netdev_priv(netdev);
9621         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
9622         u32 temperature;
9623
9624         spin_lock_bh(&tp->lock);
9625         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
9626                                 sizeof(temperature));
9627         spin_unlock_bh(&tp->lock);
9628         return sprintf(buf, "%u\n", temperature);
9629 }
9630
9631
9632 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
9633                           TG3_TEMP_SENSOR_OFFSET);
9634 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
9635                           TG3_TEMP_CAUTION_OFFSET);
9636 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
9637                           TG3_TEMP_MAX_OFFSET);
9638
9639 static struct attribute *tg3_attributes[] = {
9640         &sensor_dev_attr_temp1_input.dev_attr.attr,
9641         &sensor_dev_attr_temp1_crit.dev_attr.attr,
9642         &sensor_dev_attr_temp1_max.dev_attr.attr,
9643         NULL
9644 };
9645
9646 static const struct attribute_group tg3_group = {
9647         .attrs = tg3_attributes,
9648 };
9649
9650 #endif
9651
9652 static void tg3_hwmon_close(struct tg3 *tp)
9653 {
9654 #if IS_ENABLED(CONFIG_HWMON)
9655         if (tp->hwmon_dev) {
9656                 hwmon_device_unregister(tp->hwmon_dev);
9657                 tp->hwmon_dev = NULL;
9658                 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
9659         }
9660 #endif
9661 }
9662
9663 static void tg3_hwmon_open(struct tg3 *tp)
9664 {
9665 #if IS_ENABLED(CONFIG_HWMON)
9666         int i, err;
9667         u32 size = 0;
9668         struct pci_dev *pdev = tp->pdev;
9669         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
9670
9671         tg3_sd_scan_scratchpad(tp, ocirs);
9672
9673         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
9674                 if (!ocirs[i].src_data_length)
9675                         continue;
9676
9677                 size += ocirs[i].src_hdr_length;
9678                 size += ocirs[i].src_data_length;
9679         }
9680
9681         if (!size)
9682                 return;
9683
9684         /* Register hwmon sysfs hooks */
9685         err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
9686         if (err) {
9687                 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
9688                 return;
9689         }
9690
9691         tp->hwmon_dev = hwmon_device_register(&pdev->dev);
9692         if (IS_ERR(tp->hwmon_dev)) {
9693                 tp->hwmon_dev = NULL;
9694                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
9695                 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
9696         }
9697 #endif
9698 }
9699
9700
9701 #define TG3_STAT_ADD32(PSTAT, REG) \
9702 do {    u32 __val = tr32(REG); \
9703         (PSTAT)->low += __val; \
9704         if ((PSTAT)->low < __val) \
9705                 (PSTAT)->high += 1; \
9706 } while (0)
9707
9708 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9709 {
9710         struct tg3_hw_stats *sp = tp->hw_stats;
9711
9712         if (!netif_carrier_ok(tp->dev))
9713                 return;
9714
9715         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9716         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9717         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9718         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9719         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9720         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9721         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9722         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9723         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9724         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9725         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9726         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9727         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9728         if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
9729                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
9730                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
9731                 u32 val;
9732
9733                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9734                 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
9735                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9736                 tg3_flag_clear(tp, 5719_RDMA_BUG);
9737         }
9738
9739         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9740         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9741         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9742         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9743         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9744         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9745         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9746         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9747         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9748         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9749         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9750         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9751         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9752         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9753
9754         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9755         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9756             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9757             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9758                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9759         } else {
9760                 u32 val = tr32(HOSTCC_FLOW_ATTN);
9761                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9762                 if (val) {
9763                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9764                         sp->rx_discards.low += val;
9765                         if (sp->rx_discards.low < val)
9766                                 sp->rx_discards.high += 1;
9767                 }
9768                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9769         }
9770         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9771 }
9772
9773 static void tg3_chk_missed_msi(struct tg3 *tp)
9774 {
9775         u32 i;
9776
9777         for (i = 0; i < tp->irq_cnt; i++) {
9778                 struct tg3_napi *tnapi = &tp->napi[i];
9779
9780                 if (tg3_has_work(tnapi)) {
9781                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9782                             tnapi->last_tx_cons == tnapi->tx_cons) {
9783                                 if (tnapi->chk_msi_cnt < 1) {
9784                                         tnapi->chk_msi_cnt++;
9785                                         return;
9786                                 }
9787                                 tg3_msi(0, tnapi);
9788                         }
9789                 }
9790                 tnapi->chk_msi_cnt = 0;
9791                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9792                 tnapi->last_tx_cons = tnapi->tx_cons;
9793         }
9794 }
9795
9796 static void tg3_timer(unsigned long __opaque)
9797 {
9798         struct tg3 *tp = (struct tg3 *) __opaque;
9799
9800         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9801                 goto restart_timer;
9802
9803         spin_lock(&tp->lock);
9804
9805         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9806             tg3_flag(tp, 57765_CLASS))
9807                 tg3_chk_missed_msi(tp);
9808
9809         if (!tg3_flag(tp, TAGGED_STATUS)) {
9810                 /* All of this garbage is because when using non-tagged
9811                  * IRQ status the mailbox/status_block protocol the chip
9812                  * uses with the cpu is race prone.
9813                  */
9814                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9815                         tw32(GRC_LOCAL_CTRL,
9816                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9817                 } else {
9818                         tw32(HOSTCC_MODE, tp->coalesce_mode |
9819                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9820                 }
9821
9822                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9823                         spin_unlock(&tp->lock);
9824                         tg3_reset_task_schedule(tp);
9825                         goto restart_timer;
9826                 }
9827         }
9828
9829         /* This part only runs once per second. */
9830         if (!--tp->timer_counter) {
9831                 if (tg3_flag(tp, 5705_PLUS))
9832                         tg3_periodic_fetch_stats(tp);
9833
9834                 if (tp->setlpicnt && !--tp->setlpicnt)
9835                         tg3_phy_eee_enable(tp);
9836
9837                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9838                         u32 mac_stat;
9839                         int phy_event;
9840
9841                         mac_stat = tr32(MAC_STATUS);
9842
9843                         phy_event = 0;
9844                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9845                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9846                                         phy_event = 1;
9847                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9848                                 phy_event = 1;
9849
9850                         if (phy_event)
9851                                 tg3_setup_phy(tp, 0);
9852                 } else if (tg3_flag(tp, POLL_SERDES)) {
9853                         u32 mac_stat = tr32(MAC_STATUS);
9854                         int need_setup = 0;
9855
9856                         if (netif_carrier_ok(tp->dev) &&
9857                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9858                                 need_setup = 1;
9859                         }
9860                         if (!netif_carrier_ok(tp->dev) &&
9861                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
9862                                          MAC_STATUS_SIGNAL_DET))) {
9863                                 need_setup = 1;
9864                         }
9865                         if (need_setup) {
9866                                 if (!tp->serdes_counter) {
9867                                         tw32_f(MAC_MODE,
9868                                              (tp->mac_mode &
9869                                               ~MAC_MODE_PORT_MODE_MASK));
9870                                         udelay(40);
9871                                         tw32_f(MAC_MODE, tp->mac_mode);
9872                                         udelay(40);
9873                                 }
9874                                 tg3_setup_phy(tp, 0);
9875                         }
9876                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9877                            tg3_flag(tp, 5780_CLASS)) {
9878                         tg3_serdes_parallel_detect(tp);
9879                 }
9880
9881                 tp->timer_counter = tp->timer_multiplier;
9882         }
9883
9884         /* Heartbeat is only sent once every 2 seconds.
9885          *
9886          * The heartbeat is to tell the ASF firmware that the host
9887          * driver is still alive.  In the event that the OS crashes,
9888          * ASF needs to reset the hardware to free up the FIFO space
9889          * that may be filled with rx packets destined for the host.
9890          * If the FIFO is full, ASF will no longer function properly.
9891          *
9892          * Unintended resets have been reported on real time kernels
9893          * where the timer doesn't run on time.  Netpoll will also have
9894          * same problem.
9895          *
9896          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9897          * to check the ring condition when the heartbeat is expiring
9898          * before doing the reset.  This will prevent most unintended
9899          * resets.
9900          */
9901         if (!--tp->asf_counter) {
9902                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9903                         tg3_wait_for_event_ack(tp);
9904
9905                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9906                                       FWCMD_NICDRV_ALIVE3);
9907                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9908                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9909                                       TG3_FW_UPDATE_TIMEOUT_SEC);
9910
9911                         tg3_generate_fw_event(tp);
9912                 }
9913                 tp->asf_counter = tp->asf_multiplier;
9914         }
9915
9916         spin_unlock(&tp->lock);
9917
9918 restart_timer:
9919         tp->timer.expires = jiffies + tp->timer_offset;
9920         add_timer(&tp->timer);
9921 }
9922
9923 static void __devinit tg3_timer_init(struct tg3 *tp)
9924 {
9925         if (tg3_flag(tp, TAGGED_STATUS) &&
9926             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9927             !tg3_flag(tp, 57765_CLASS))
9928                 tp->timer_offset = HZ;
9929         else
9930                 tp->timer_offset = HZ / 10;
9931
9932         BUG_ON(tp->timer_offset > HZ);
9933
9934         tp->timer_multiplier = (HZ / tp->timer_offset);
9935         tp->asf_multiplier = (HZ / tp->timer_offset) *
9936                              TG3_FW_UPDATE_FREQ_SEC;
9937
9938         init_timer(&tp->timer);
9939         tp->timer.data = (unsigned long) tp;
9940         tp->timer.function = tg3_timer;
9941 }
9942
9943 static void tg3_timer_start(struct tg3 *tp)
9944 {
9945         tp->asf_counter   = tp->asf_multiplier;
9946         tp->timer_counter = tp->timer_multiplier;
9947
9948         tp->timer.expires = jiffies + tp->timer_offset;
9949         add_timer(&tp->timer);
9950 }
9951
9952 static void tg3_timer_stop(struct tg3 *tp)
9953 {
9954         del_timer_sync(&tp->timer);
9955 }
9956
9957 /* Restart hardware after configuration changes, self-test, etc.
9958  * Invoked with tp->lock held.
9959  */
9960 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9961         __releases(tp->lock)
9962         __acquires(tp->lock)
9963 {
9964         int err;
9965
9966         err = tg3_init_hw(tp, reset_phy);
9967         if (err) {
9968                 netdev_err(tp->dev,
9969                            "Failed to re-initialize device, aborting\n");
9970                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9971                 tg3_full_unlock(tp);
9972                 tg3_timer_stop(tp);
9973                 tp->irq_sync = 0;
9974                 tg3_napi_enable(tp);
9975                 dev_close(tp->dev);
9976                 tg3_full_lock(tp, 0);
9977         }
9978         return err;
9979 }
9980
9981 static void tg3_reset_task(struct work_struct *work)
9982 {
9983         struct tg3 *tp = container_of(work, struct tg3, reset_task);
9984         int err;
9985
9986         tg3_full_lock(tp, 0);
9987
9988         if (!netif_running(tp->dev)) {
9989                 tg3_flag_clear(tp, RESET_TASK_PENDING);
9990                 tg3_full_unlock(tp);
9991                 return;
9992         }
9993
9994         tg3_full_unlock(tp);
9995
9996         tg3_phy_stop(tp);
9997
9998         tg3_netif_stop(tp);
9999
10000         tg3_full_lock(tp, 1);
10001
10002         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10003                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10004                 tp->write32_rx_mbox = tg3_write_flush_reg32;
10005                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10006                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10007         }
10008
10009         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10010         err = tg3_init_hw(tp, 1);
10011         if (err)
10012                 goto out;
10013
10014         tg3_netif_start(tp);
10015
10016 out:
10017         tg3_full_unlock(tp);
10018
10019         if (!err)
10020                 tg3_phy_start(tp);
10021
10022         tg3_flag_clear(tp, RESET_TASK_PENDING);
10023 }
10024
10025 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10026 {
10027         irq_handler_t fn;
10028         unsigned long flags;
10029         char *name;
10030         struct tg3_napi *tnapi = &tp->napi[irq_num];
10031
10032         if (tp->irq_cnt == 1)
10033                 name = tp->dev->name;
10034         else {
10035                 name = &tnapi->irq_lbl[0];
10036                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10037                 name[IFNAMSIZ-1] = 0;
10038         }
10039
10040         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10041                 fn = tg3_msi;
10042                 if (tg3_flag(tp, 1SHOT_MSI))
10043                         fn = tg3_msi_1shot;
10044                 flags = 0;
10045         } else {
10046                 fn = tg3_interrupt;
10047                 if (tg3_flag(tp, TAGGED_STATUS))
10048                         fn = tg3_interrupt_tagged;
10049                 flags = IRQF_SHARED;
10050         }
10051
10052         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10053 }
10054
10055 static int tg3_test_interrupt(struct tg3 *tp)
10056 {
10057         struct tg3_napi *tnapi = &tp->napi[0];
10058         struct net_device *dev = tp->dev;
10059         int err, i, intr_ok = 0;
10060         u32 val;
10061
10062         if (!netif_running(dev))
10063                 return -ENODEV;
10064
10065         tg3_disable_ints(tp);
10066
10067         free_irq(tnapi->irq_vec, tnapi);
10068
10069         /*
10070          * Turn off MSI one shot mode.  Otherwise this test has no
10071          * observable way to know whether the interrupt was delivered.
10072          */
10073         if (tg3_flag(tp, 57765_PLUS)) {
10074                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10075                 tw32(MSGINT_MODE, val);
10076         }
10077
10078         err = request_irq(tnapi->irq_vec, tg3_test_isr,
10079                           IRQF_SHARED, dev->name, tnapi);
10080         if (err)
10081                 return err;
10082
10083         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10084         tg3_enable_ints(tp);
10085
10086         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10087                tnapi->coal_now);
10088
10089         for (i = 0; i < 5; i++) {
10090                 u32 int_mbox, misc_host_ctrl;
10091
10092                 int_mbox = tr32_mailbox(tnapi->int_mbox);
10093                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10094
10095                 if ((int_mbox != 0) ||
10096                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10097                         intr_ok = 1;
10098                         break;
10099                 }
10100
10101                 if (tg3_flag(tp, 57765_PLUS) &&
10102                     tnapi->hw_status->status_tag != tnapi->last_tag)
10103                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10104
10105                 msleep(10);
10106         }
10107
10108         tg3_disable_ints(tp);
10109
10110         free_irq(tnapi->irq_vec, tnapi);
10111
10112         err = tg3_request_irq(tp, 0);
10113
10114         if (err)
10115                 return err;
10116
10117         if (intr_ok) {
10118                 /* Reenable MSI one shot mode. */
10119                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10120                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10121                         tw32(MSGINT_MODE, val);
10122                 }
10123                 return 0;
10124         }
10125
10126         return -EIO;
10127 }
10128
10129 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10130  * successfully restored
10131  */
10132 static int tg3_test_msi(struct tg3 *tp)
10133 {
10134         int err;
10135         u16 pci_cmd;
10136
10137         if (!tg3_flag(tp, USING_MSI))
10138                 return 0;
10139
10140         /* Turn off SERR reporting in case MSI terminates with Master
10141          * Abort.
10142          */
10143         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10144         pci_write_config_word(tp->pdev, PCI_COMMAND,
10145                               pci_cmd & ~PCI_COMMAND_SERR);
10146
10147         err = tg3_test_interrupt(tp);
10148
10149         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10150
10151         if (!err)
10152                 return 0;
10153
10154         /* other failures */
10155         if (err != -EIO)
10156                 return err;
10157
10158         /* MSI test failed, go back to INTx mode */
10159         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10160                     "to INTx mode. Please report this failure to the PCI "
10161                     "maintainer and include system chipset information\n");
10162
10163         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10164
10165         pci_disable_msi(tp->pdev);
10166
10167         tg3_flag_clear(tp, USING_MSI);
10168         tp->napi[0].irq_vec = tp->pdev->irq;
10169
10170         err = tg3_request_irq(tp, 0);
10171         if (err)
10172                 return err;
10173
10174         /* Need to reset the chip because the MSI cycle may have terminated
10175          * with Master Abort.
10176          */
10177         tg3_full_lock(tp, 1);
10178
10179         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10180         err = tg3_init_hw(tp, 1);
10181
10182         tg3_full_unlock(tp);
10183
10184         if (err)
10185                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10186
10187         return err;
10188 }
10189
10190 static int tg3_request_firmware(struct tg3 *tp)
10191 {
10192         const __be32 *fw_data;
10193
10194         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10195                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10196                            tp->fw_needed);
10197                 return -ENOENT;
10198         }
10199
10200         fw_data = (void *)tp->fw->data;
10201
10202         /* Firmware blob starts with version numbers, followed by
10203          * start address and _full_ length including BSS sections
10204          * (which must be longer than the actual data, of course
10205          */
10206
10207         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
10208         if (tp->fw_len < (tp->fw->size - 12)) {
10209                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10210                            tp->fw_len, tp->fw_needed);
10211                 release_firmware(tp->fw);
10212                 tp->fw = NULL;
10213                 return -EINVAL;
10214         }
10215
10216         /* We no longer need firmware; we have it. */
10217         tp->fw_needed = NULL;
10218         return 0;
10219 }
10220
10221 static u32 tg3_irq_count(struct tg3 *tp)
10222 {
10223         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10224
10225         if (irq_cnt > 1) {
10226                 /* We want as many rx rings enabled as there are cpus.
10227                  * In multiqueue MSI-X mode, the first MSI-X vector
10228                  * only deals with link interrupts, etc, so we add
10229                  * one to the number of vectors we are requesting.
10230                  */
10231                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
10232         }
10233
10234         return irq_cnt;
10235 }
10236
10237 static bool tg3_enable_msix(struct tg3 *tp)
10238 {
10239         int i, rc;
10240         struct msix_entry msix_ent[tp->irq_max];
10241
10242         tp->txq_cnt = tp->txq_req;
10243         tp->rxq_cnt = tp->rxq_req;
10244         if (!tp->rxq_cnt)
10245                 tp->rxq_cnt = netif_get_num_default_rss_queues();
10246         if (tp->rxq_cnt > tp->rxq_max)
10247                 tp->rxq_cnt = tp->rxq_max;
10248
10249         /* Disable multiple TX rings by default.  Simple round-robin hardware
10250          * scheduling of the TX rings can cause starvation of rings with
10251          * small packets when other rings have TSO or jumbo packets.
10252          */
10253         if (!tp->txq_req)
10254                 tp->txq_cnt = 1;
10255
10256         tp->irq_cnt = tg3_irq_count(tp);
10257
10258         for (i = 0; i < tp->irq_max; i++) {
10259                 msix_ent[i].entry  = i;
10260                 msix_ent[i].vector = 0;
10261         }
10262
10263         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10264         if (rc < 0) {
10265                 return false;
10266         } else if (rc != 0) {
10267                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10268                         return false;
10269                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10270                               tp->irq_cnt, rc);
10271                 tp->irq_cnt = rc;
10272                 tp->rxq_cnt = max(rc - 1, 1);
10273                 if (tp->txq_cnt)
10274                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
10275         }
10276
10277         for (i = 0; i < tp->irq_max; i++)
10278                 tp->napi[i].irq_vec = msix_ent[i].vector;
10279
10280         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
10281                 pci_disable_msix(tp->pdev);
10282                 return false;
10283         }
10284
10285         if (tp->irq_cnt == 1)
10286                 return true;
10287
10288         tg3_flag_set(tp, ENABLE_RSS);
10289
10290         if (tp->txq_cnt > 1)
10291                 tg3_flag_set(tp, ENABLE_TSS);
10292
10293         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
10294
10295         return true;
10296 }
10297
10298 static void tg3_ints_init(struct tg3 *tp)
10299 {
10300         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10301             !tg3_flag(tp, TAGGED_STATUS)) {
10302                 /* All MSI supporting chips should support tagged
10303                  * status.  Assert that this is the case.
10304                  */
10305                 netdev_warn(tp->dev,
10306                             "MSI without TAGGED_STATUS? Not using MSI\n");
10307                 goto defcfg;
10308         }
10309
10310         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10311                 tg3_flag_set(tp, USING_MSIX);
10312         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10313                 tg3_flag_set(tp, USING_MSI);
10314
10315         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10316                 u32 msi_mode = tr32(MSGINT_MODE);
10317                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10318                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10319                 if (!tg3_flag(tp, 1SHOT_MSI))
10320                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10321                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10322         }
10323 defcfg:
10324         if (!tg3_flag(tp, USING_MSIX)) {
10325                 tp->irq_cnt = 1;
10326                 tp->napi[0].irq_vec = tp->pdev->irq;
10327         }
10328
10329         if (tp->irq_cnt == 1) {
10330                 tp->txq_cnt = 1;
10331                 tp->rxq_cnt = 1;
10332                 netif_set_real_num_tx_queues(tp->dev, 1);
10333                 netif_set_real_num_rx_queues(tp->dev, 1);
10334         }
10335 }
10336
10337 static void tg3_ints_fini(struct tg3 *tp)
10338 {
10339         if (tg3_flag(tp, USING_MSIX))
10340                 pci_disable_msix(tp->pdev);
10341         else if (tg3_flag(tp, USING_MSI))
10342                 pci_disable_msi(tp->pdev);
10343         tg3_flag_clear(tp, USING_MSI);
10344         tg3_flag_clear(tp, USING_MSIX);
10345         tg3_flag_clear(tp, ENABLE_RSS);
10346         tg3_flag_clear(tp, ENABLE_TSS);
10347 }
10348
10349 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq)
10350 {
10351         struct net_device *dev = tp->dev;
10352         int i, err;
10353
10354         /*
10355          * Setup interrupts first so we know how
10356          * many NAPI resources to allocate
10357          */
10358         tg3_ints_init(tp);
10359
10360         tg3_rss_check_indir_tbl(tp);
10361
10362         /* The placement of this call is tied
10363          * to the setup and use of Host TX descriptors.
10364          */
10365         err = tg3_alloc_consistent(tp);
10366         if (err)
10367                 goto err_out1;
10368
10369         tg3_napi_init(tp);
10370
10371         tg3_napi_enable(tp);
10372
10373         for (i = 0; i < tp->irq_cnt; i++) {
10374                 struct tg3_napi *tnapi = &tp->napi[i];
10375                 err = tg3_request_irq(tp, i);
10376                 if (err) {
10377                         for (i--; i >= 0; i--) {
10378                                 tnapi = &tp->napi[i];
10379                                 free_irq(tnapi->irq_vec, tnapi);
10380                         }
10381                         goto err_out2;
10382                 }
10383         }
10384
10385         tg3_full_lock(tp, 0);
10386
10387         err = tg3_init_hw(tp, reset_phy);
10388         if (err) {
10389                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10390                 tg3_free_rings(tp);
10391         }
10392
10393         tg3_full_unlock(tp);
10394
10395         if (err)
10396                 goto err_out3;
10397
10398         if (test_irq && tg3_flag(tp, USING_MSI)) {
10399                 err = tg3_test_msi(tp);
10400
10401                 if (err) {
10402                         tg3_full_lock(tp, 0);
10403                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10404                         tg3_free_rings(tp);
10405                         tg3_full_unlock(tp);
10406
10407                         goto err_out2;
10408                 }
10409
10410                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10411                         u32 val = tr32(PCIE_TRANSACTION_CFG);
10412
10413                         tw32(PCIE_TRANSACTION_CFG,
10414                              val | PCIE_TRANS_CFG_1SHOT_MSI);
10415                 }
10416         }
10417
10418         tg3_phy_start(tp);
10419
10420         tg3_hwmon_open(tp);
10421
10422         tg3_full_lock(tp, 0);
10423
10424         tg3_timer_start(tp);
10425         tg3_flag_set(tp, INIT_COMPLETE);
10426         tg3_enable_ints(tp);
10427
10428         tg3_full_unlock(tp);
10429
10430         netif_tx_start_all_queues(dev);
10431
10432         /*
10433          * Reset loopback feature if it was turned on while the device was down
10434          * make sure that it's installed properly now.
10435          */
10436         if (dev->features & NETIF_F_LOOPBACK)
10437                 tg3_set_loopback(dev, dev->features);
10438
10439         return 0;
10440
10441 err_out3:
10442         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10443                 struct tg3_napi *tnapi = &tp->napi[i];
10444                 free_irq(tnapi->irq_vec, tnapi);
10445         }
10446
10447 err_out2:
10448         tg3_napi_disable(tp);
10449         tg3_napi_fini(tp);
10450         tg3_free_consistent(tp);
10451
10452 err_out1:
10453         tg3_ints_fini(tp);
10454
10455         return err;
10456 }
10457
10458 static void tg3_stop(struct tg3 *tp)
10459 {
10460         int i;
10461
10462         tg3_napi_disable(tp);
10463         tg3_reset_task_cancel(tp);
10464
10465         netif_tx_disable(tp->dev);
10466
10467         tg3_timer_stop(tp);
10468
10469         tg3_hwmon_close(tp);
10470
10471         tg3_phy_stop(tp);
10472
10473         tg3_full_lock(tp, 1);
10474
10475         tg3_disable_ints(tp);
10476
10477         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10478         tg3_free_rings(tp);
10479         tg3_flag_clear(tp, INIT_COMPLETE);
10480
10481         tg3_full_unlock(tp);
10482
10483         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10484                 struct tg3_napi *tnapi = &tp->napi[i];
10485                 free_irq(tnapi->irq_vec, tnapi);
10486         }
10487
10488         tg3_ints_fini(tp);
10489
10490         tg3_napi_fini(tp);
10491
10492         tg3_free_consistent(tp);
10493 }
10494
10495 static int tg3_open(struct net_device *dev)
10496 {
10497         struct tg3 *tp = netdev_priv(dev);
10498         int err;
10499
10500         if (tp->fw_needed) {
10501                 err = tg3_request_firmware(tp);
10502                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
10503                         if (err)
10504                                 return err;
10505                 } else if (err) {
10506                         netdev_warn(tp->dev, "TSO capability disabled\n");
10507                         tg3_flag_clear(tp, TSO_CAPABLE);
10508                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10509                         netdev_notice(tp->dev, "TSO capability restored\n");
10510                         tg3_flag_set(tp, TSO_CAPABLE);
10511                 }
10512         }
10513
10514         netif_carrier_off(tp->dev);
10515
10516         err = tg3_power_up(tp);
10517         if (err)
10518                 return err;
10519
10520         tg3_full_lock(tp, 0);
10521
10522         tg3_disable_ints(tp);
10523         tg3_flag_clear(tp, INIT_COMPLETE);
10524
10525         tg3_full_unlock(tp);
10526
10527         err = tg3_start(tp, true, true);
10528         if (err) {
10529                 tg3_frob_aux_power(tp, false);
10530                 pci_set_power_state(tp->pdev, PCI_D3hot);
10531         }
10532         return err;
10533 }
10534
10535 static int tg3_close(struct net_device *dev)
10536 {
10537         struct tg3 *tp = netdev_priv(dev);
10538
10539         tg3_stop(tp);
10540
10541         /* Clear stats across close / open calls */
10542         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10543         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10544
10545         tg3_power_down(tp);
10546
10547         netif_carrier_off(tp->dev);
10548
10549         return 0;
10550 }
10551
10552 static inline u64 get_stat64(tg3_stat64_t *val)
10553 {
10554        return ((u64)val->high << 32) | ((u64)val->low);
10555 }
10556
10557 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10558 {
10559         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10560
10561         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10562             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10563              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10564                 u32 val;
10565
10566                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10567                         tg3_writephy(tp, MII_TG3_TEST1,
10568                                      val | MII_TG3_TEST1_CRC_EN);
10569                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10570                 } else
10571                         val = 0;
10572
10573                 tp->phy_crc_errors += val;
10574
10575                 return tp->phy_crc_errors;
10576         }
10577
10578         return get_stat64(&hw_stats->rx_fcs_errors);
10579 }
10580
10581 #define ESTAT_ADD(member) \
10582         estats->member =        old_estats->member + \
10583                                 get_stat64(&hw_stats->member)
10584
10585 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10586 {
10587         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10588         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10589
10590         ESTAT_ADD(rx_octets);
10591         ESTAT_ADD(rx_fragments);
10592         ESTAT_ADD(rx_ucast_packets);
10593         ESTAT_ADD(rx_mcast_packets);
10594         ESTAT_ADD(rx_bcast_packets);
10595         ESTAT_ADD(rx_fcs_errors);
10596         ESTAT_ADD(rx_align_errors);
10597         ESTAT_ADD(rx_xon_pause_rcvd);
10598         ESTAT_ADD(rx_xoff_pause_rcvd);
10599         ESTAT_ADD(rx_mac_ctrl_rcvd);
10600         ESTAT_ADD(rx_xoff_entered);
10601         ESTAT_ADD(rx_frame_too_long_errors);
10602         ESTAT_ADD(rx_jabbers);
10603         ESTAT_ADD(rx_undersize_packets);
10604         ESTAT_ADD(rx_in_length_errors);
10605         ESTAT_ADD(rx_out_length_errors);
10606         ESTAT_ADD(rx_64_or_less_octet_packets);
10607         ESTAT_ADD(rx_65_to_127_octet_packets);
10608         ESTAT_ADD(rx_128_to_255_octet_packets);
10609         ESTAT_ADD(rx_256_to_511_octet_packets);
10610         ESTAT_ADD(rx_512_to_1023_octet_packets);
10611         ESTAT_ADD(rx_1024_to_1522_octet_packets);
10612         ESTAT_ADD(rx_1523_to_2047_octet_packets);
10613         ESTAT_ADD(rx_2048_to_4095_octet_packets);
10614         ESTAT_ADD(rx_4096_to_8191_octet_packets);
10615         ESTAT_ADD(rx_8192_to_9022_octet_packets);
10616
10617         ESTAT_ADD(tx_octets);
10618         ESTAT_ADD(tx_collisions);
10619         ESTAT_ADD(tx_xon_sent);
10620         ESTAT_ADD(tx_xoff_sent);
10621         ESTAT_ADD(tx_flow_control);
10622         ESTAT_ADD(tx_mac_errors);
10623         ESTAT_ADD(tx_single_collisions);
10624         ESTAT_ADD(tx_mult_collisions);
10625         ESTAT_ADD(tx_deferred);
10626         ESTAT_ADD(tx_excessive_collisions);
10627         ESTAT_ADD(tx_late_collisions);
10628         ESTAT_ADD(tx_collide_2times);
10629         ESTAT_ADD(tx_collide_3times);
10630         ESTAT_ADD(tx_collide_4times);
10631         ESTAT_ADD(tx_collide_5times);
10632         ESTAT_ADD(tx_collide_6times);
10633         ESTAT_ADD(tx_collide_7times);
10634         ESTAT_ADD(tx_collide_8times);
10635         ESTAT_ADD(tx_collide_9times);
10636         ESTAT_ADD(tx_collide_10times);
10637         ESTAT_ADD(tx_collide_11times);
10638         ESTAT_ADD(tx_collide_12times);
10639         ESTAT_ADD(tx_collide_13times);
10640         ESTAT_ADD(tx_collide_14times);
10641         ESTAT_ADD(tx_collide_15times);
10642         ESTAT_ADD(tx_ucast_packets);
10643         ESTAT_ADD(tx_mcast_packets);
10644         ESTAT_ADD(tx_bcast_packets);
10645         ESTAT_ADD(tx_carrier_sense_errors);
10646         ESTAT_ADD(tx_discards);
10647         ESTAT_ADD(tx_errors);
10648
10649         ESTAT_ADD(dma_writeq_full);
10650         ESTAT_ADD(dma_write_prioq_full);
10651         ESTAT_ADD(rxbds_empty);
10652         ESTAT_ADD(rx_discards);
10653         ESTAT_ADD(rx_errors);
10654         ESTAT_ADD(rx_threshold_hit);
10655
10656         ESTAT_ADD(dma_readq_full);
10657         ESTAT_ADD(dma_read_prioq_full);
10658         ESTAT_ADD(tx_comp_queue_full);
10659
10660         ESTAT_ADD(ring_set_send_prod_index);
10661         ESTAT_ADD(ring_status_update);
10662         ESTAT_ADD(nic_irqs);
10663         ESTAT_ADD(nic_avoided_irqs);
10664         ESTAT_ADD(nic_tx_threshold_hit);
10665
10666         ESTAT_ADD(mbuf_lwm_thresh_hit);
10667 }
10668
10669 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
10670 {
10671         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10672         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10673
10674         stats->rx_packets = old_stats->rx_packets +
10675                 get_stat64(&hw_stats->rx_ucast_packets) +
10676                 get_stat64(&hw_stats->rx_mcast_packets) +
10677                 get_stat64(&hw_stats->rx_bcast_packets);
10678
10679         stats->tx_packets = old_stats->tx_packets +
10680                 get_stat64(&hw_stats->tx_ucast_packets) +
10681                 get_stat64(&hw_stats->tx_mcast_packets) +
10682                 get_stat64(&hw_stats->tx_bcast_packets);
10683
10684         stats->rx_bytes = old_stats->rx_bytes +
10685                 get_stat64(&hw_stats->rx_octets);
10686         stats->tx_bytes = old_stats->tx_bytes +
10687                 get_stat64(&hw_stats->tx_octets);
10688
10689         stats->rx_errors = old_stats->rx_errors +
10690                 get_stat64(&hw_stats->rx_errors);
10691         stats->tx_errors = old_stats->tx_errors +
10692                 get_stat64(&hw_stats->tx_errors) +
10693                 get_stat64(&hw_stats->tx_mac_errors) +
10694                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10695                 get_stat64(&hw_stats->tx_discards);
10696
10697         stats->multicast = old_stats->multicast +
10698                 get_stat64(&hw_stats->rx_mcast_packets);
10699         stats->collisions = old_stats->collisions +
10700                 get_stat64(&hw_stats->tx_collisions);
10701
10702         stats->rx_length_errors = old_stats->rx_length_errors +
10703                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10704                 get_stat64(&hw_stats->rx_undersize_packets);
10705
10706         stats->rx_over_errors = old_stats->rx_over_errors +
10707                 get_stat64(&hw_stats->rxbds_empty);
10708         stats->rx_frame_errors = old_stats->rx_frame_errors +
10709                 get_stat64(&hw_stats->rx_align_errors);
10710         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10711                 get_stat64(&hw_stats->tx_discards);
10712         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10713                 get_stat64(&hw_stats->tx_carrier_sense_errors);
10714
10715         stats->rx_crc_errors = old_stats->rx_crc_errors +
10716                 tg3_calc_crc_errors(tp);
10717
10718         stats->rx_missed_errors = old_stats->rx_missed_errors +
10719                 get_stat64(&hw_stats->rx_discards);
10720
10721         stats->rx_dropped = tp->rx_dropped;
10722         stats->tx_dropped = tp->tx_dropped;
10723 }
10724
10725 static int tg3_get_regs_len(struct net_device *dev)
10726 {
10727         return TG3_REG_BLK_SIZE;
10728 }
10729
10730 static void tg3_get_regs(struct net_device *dev,
10731                 struct ethtool_regs *regs, void *_p)
10732 {
10733         struct tg3 *tp = netdev_priv(dev);
10734
10735         regs->version = 0;
10736
10737         memset(_p, 0, TG3_REG_BLK_SIZE);
10738
10739         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10740                 return;
10741
10742         tg3_full_lock(tp, 0);
10743
10744         tg3_dump_legacy_regs(tp, (u32 *)_p);
10745
10746         tg3_full_unlock(tp);
10747 }
10748
10749 static int tg3_get_eeprom_len(struct net_device *dev)
10750 {
10751         struct tg3 *tp = netdev_priv(dev);
10752
10753         return tp->nvram_size;
10754 }
10755
10756 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10757 {
10758         struct tg3 *tp = netdev_priv(dev);
10759         int ret;
10760         u8  *pd;
10761         u32 i, offset, len, b_offset, b_count;
10762         __be32 val;
10763
10764         if (tg3_flag(tp, NO_NVRAM))
10765                 return -EINVAL;
10766
10767         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10768                 return -EAGAIN;
10769
10770         offset = eeprom->offset;
10771         len = eeprom->len;
10772         eeprom->len = 0;
10773
10774         eeprom->magic = TG3_EEPROM_MAGIC;
10775
10776         if (offset & 3) {
10777                 /* adjustments to start on required 4 byte boundary */
10778                 b_offset = offset & 3;
10779                 b_count = 4 - b_offset;
10780                 if (b_count > len) {
10781                         /* i.e. offset=1 len=2 */
10782                         b_count = len;
10783                 }
10784                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10785                 if (ret)
10786                         return ret;
10787                 memcpy(data, ((char *)&val) + b_offset, b_count);
10788                 len -= b_count;
10789                 offset += b_count;
10790                 eeprom->len += b_count;
10791         }
10792
10793         /* read bytes up to the last 4 byte boundary */
10794         pd = &data[eeprom->len];
10795         for (i = 0; i < (len - (len & 3)); i += 4) {
10796                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10797                 if (ret) {
10798                         eeprom->len += i;
10799                         return ret;
10800                 }
10801                 memcpy(pd + i, &val, 4);
10802         }
10803         eeprom->len += i;
10804
10805         if (len & 3) {
10806                 /* read last bytes not ending on 4 byte boundary */
10807                 pd = &data[eeprom->len];
10808                 b_count = len & 3;
10809                 b_offset = offset + len - b_count;
10810                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10811                 if (ret)
10812                         return ret;
10813                 memcpy(pd, &val, b_count);
10814                 eeprom->len += b_count;
10815         }
10816         return 0;
10817 }
10818
10819 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10820 {
10821         struct tg3 *tp = netdev_priv(dev);
10822         int ret;
10823         u32 offset, len, b_offset, odd_len;
10824         u8 *buf;
10825         __be32 start, end;
10826
10827         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10828                 return -EAGAIN;
10829
10830         if (tg3_flag(tp, NO_NVRAM) ||
10831             eeprom->magic != TG3_EEPROM_MAGIC)
10832                 return -EINVAL;
10833
10834         offset = eeprom->offset;
10835         len = eeprom->len;
10836
10837         if ((b_offset = (offset & 3))) {
10838                 /* adjustments to start on required 4 byte boundary */
10839                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10840                 if (ret)
10841                         return ret;
10842                 len += b_offset;
10843                 offset &= ~3;
10844                 if (len < 4)
10845                         len = 4;
10846         }
10847
10848         odd_len = 0;
10849         if (len & 3) {
10850                 /* adjustments to end on required 4 byte boundary */
10851                 odd_len = 1;
10852                 len = (len + 3) & ~3;
10853                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10854                 if (ret)
10855                         return ret;
10856         }
10857
10858         buf = data;
10859         if (b_offset || odd_len) {
10860                 buf = kmalloc(len, GFP_KERNEL);
10861                 if (!buf)
10862                         return -ENOMEM;
10863                 if (b_offset)
10864                         memcpy(buf, &start, 4);
10865                 if (odd_len)
10866                         memcpy(buf+len-4, &end, 4);
10867                 memcpy(buf + b_offset, data, eeprom->len);
10868         }
10869
10870         ret = tg3_nvram_write_block(tp, offset, len, buf);
10871
10872         if (buf != data)
10873                 kfree(buf);
10874
10875         return ret;
10876 }
10877
10878 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10879 {
10880         struct tg3 *tp = netdev_priv(dev);
10881
10882         if (tg3_flag(tp, USE_PHYLIB)) {
10883                 struct phy_device *phydev;
10884                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10885                         return -EAGAIN;
10886                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10887                 return phy_ethtool_gset(phydev, cmd);
10888         }
10889
10890         cmd->supported = (SUPPORTED_Autoneg);
10891
10892         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10893                 cmd->supported |= (SUPPORTED_1000baseT_Half |
10894                                    SUPPORTED_1000baseT_Full);
10895
10896         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10897                 cmd->supported |= (SUPPORTED_100baseT_Half |
10898                                   SUPPORTED_100baseT_Full |
10899                                   SUPPORTED_10baseT_Half |
10900                                   SUPPORTED_10baseT_Full |
10901                                   SUPPORTED_TP);
10902                 cmd->port = PORT_TP;
10903         } else {
10904                 cmd->supported |= SUPPORTED_FIBRE;
10905                 cmd->port = PORT_FIBRE;
10906         }
10907
10908         cmd->advertising = tp->link_config.advertising;
10909         if (tg3_flag(tp, PAUSE_AUTONEG)) {
10910                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10911                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10912                                 cmd->advertising |= ADVERTISED_Pause;
10913                         } else {
10914                                 cmd->advertising |= ADVERTISED_Pause |
10915                                                     ADVERTISED_Asym_Pause;
10916                         }
10917                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10918                         cmd->advertising |= ADVERTISED_Asym_Pause;
10919                 }
10920         }
10921         if (netif_running(dev) && netif_carrier_ok(dev)) {
10922                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10923                 cmd->duplex = tp->link_config.active_duplex;
10924                 cmd->lp_advertising = tp->link_config.rmt_adv;
10925                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10926                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10927                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10928                         else
10929                                 cmd->eth_tp_mdix = ETH_TP_MDI;
10930                 }
10931         } else {
10932                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
10933                 cmd->duplex = DUPLEX_UNKNOWN;
10934                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10935         }
10936         cmd->phy_address = tp->phy_addr;
10937         cmd->transceiver = XCVR_INTERNAL;
10938         cmd->autoneg = tp->link_config.autoneg;
10939         cmd->maxtxpkt = 0;
10940         cmd->maxrxpkt = 0;
10941         return 0;
10942 }
10943
10944 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10945 {
10946         struct tg3 *tp = netdev_priv(dev);
10947         u32 speed = ethtool_cmd_speed(cmd);
10948
10949         if (tg3_flag(tp, USE_PHYLIB)) {
10950                 struct phy_device *phydev;
10951                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10952                         return -EAGAIN;
10953                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10954                 return phy_ethtool_sset(phydev, cmd);
10955         }
10956
10957         if (cmd->autoneg != AUTONEG_ENABLE &&
10958             cmd->autoneg != AUTONEG_DISABLE)
10959                 return -EINVAL;
10960
10961         if (cmd->autoneg == AUTONEG_DISABLE &&
10962             cmd->duplex != DUPLEX_FULL &&
10963             cmd->duplex != DUPLEX_HALF)
10964                 return -EINVAL;
10965
10966         if (cmd->autoneg == AUTONEG_ENABLE) {
10967                 u32 mask = ADVERTISED_Autoneg |
10968                            ADVERTISED_Pause |
10969                            ADVERTISED_Asym_Pause;
10970
10971                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10972                         mask |= ADVERTISED_1000baseT_Half |
10973                                 ADVERTISED_1000baseT_Full;
10974
10975                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10976                         mask |= ADVERTISED_100baseT_Half |
10977                                 ADVERTISED_100baseT_Full |
10978                                 ADVERTISED_10baseT_Half |
10979                                 ADVERTISED_10baseT_Full |
10980                                 ADVERTISED_TP;
10981                 else
10982                         mask |= ADVERTISED_FIBRE;
10983
10984                 if (cmd->advertising & ~mask)
10985                         return -EINVAL;
10986
10987                 mask &= (ADVERTISED_1000baseT_Half |
10988                          ADVERTISED_1000baseT_Full |
10989                          ADVERTISED_100baseT_Half |
10990                          ADVERTISED_100baseT_Full |
10991                          ADVERTISED_10baseT_Half |
10992                          ADVERTISED_10baseT_Full);
10993
10994                 cmd->advertising &= mask;
10995         } else {
10996                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10997                         if (speed != SPEED_1000)
10998                                 return -EINVAL;
10999
11000                         if (cmd->duplex != DUPLEX_FULL)
11001                                 return -EINVAL;
11002                 } else {
11003                         if (speed != SPEED_100 &&
11004                             speed != SPEED_10)
11005                                 return -EINVAL;
11006                 }
11007         }
11008
11009         tg3_full_lock(tp, 0);
11010
11011         tp->link_config.autoneg = cmd->autoneg;
11012         if (cmd->autoneg == AUTONEG_ENABLE) {
11013                 tp->link_config.advertising = (cmd->advertising |
11014                                               ADVERTISED_Autoneg);
11015                 tp->link_config.speed = SPEED_UNKNOWN;
11016                 tp->link_config.duplex = DUPLEX_UNKNOWN;
11017         } else {
11018                 tp->link_config.advertising = 0;
11019                 tp->link_config.speed = speed;
11020                 tp->link_config.duplex = cmd->duplex;
11021         }
11022
11023         if (netif_running(dev))
11024                 tg3_setup_phy(tp, 1);
11025
11026         tg3_full_unlock(tp);
11027
11028         return 0;
11029 }
11030
11031 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11032 {
11033         struct tg3 *tp = netdev_priv(dev);
11034
11035         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11036         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11037         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11038         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11039 }
11040
11041 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11042 {
11043         struct tg3 *tp = netdev_priv(dev);
11044
11045         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11046                 wol->supported = WAKE_MAGIC;
11047         else
11048                 wol->supported = 0;
11049         wol->wolopts = 0;
11050         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11051                 wol->wolopts = WAKE_MAGIC;
11052         memset(&wol->sopass, 0, sizeof(wol->sopass));
11053 }
11054
11055 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11056 {
11057         struct tg3 *tp = netdev_priv(dev);
11058         struct device *dp = &tp->pdev->dev;
11059
11060         if (wol->wolopts & ~WAKE_MAGIC)
11061                 return -EINVAL;
11062         if ((wol->wolopts & WAKE_MAGIC) &&
11063             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11064                 return -EINVAL;
11065
11066         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11067
11068         spin_lock_bh(&tp->lock);
11069         if (device_may_wakeup(dp))
11070                 tg3_flag_set(tp, WOL_ENABLE);
11071         else
11072                 tg3_flag_clear(tp, WOL_ENABLE);
11073         spin_unlock_bh(&tp->lock);
11074
11075         return 0;
11076 }
11077
11078 static u32 tg3_get_msglevel(struct net_device *dev)
11079 {
11080         struct tg3 *tp = netdev_priv(dev);
11081         return tp->msg_enable;
11082 }
11083
11084 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11085 {
11086         struct tg3 *tp = netdev_priv(dev);
11087         tp->msg_enable = value;
11088 }
11089
11090 static int tg3_nway_reset(struct net_device *dev)
11091 {
11092         struct tg3 *tp = netdev_priv(dev);
11093         int r;
11094
11095         if (!netif_running(dev))
11096                 return -EAGAIN;
11097
11098         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11099                 return -EINVAL;
11100
11101         if (tg3_flag(tp, USE_PHYLIB)) {
11102                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11103                         return -EAGAIN;
11104                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11105         } else {
11106                 u32 bmcr;
11107
11108                 spin_lock_bh(&tp->lock);
11109                 r = -EINVAL;
11110                 tg3_readphy(tp, MII_BMCR, &bmcr);
11111                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11112                     ((bmcr & BMCR_ANENABLE) ||
11113                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11114                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11115                                                    BMCR_ANENABLE);
11116                         r = 0;
11117                 }
11118                 spin_unlock_bh(&tp->lock);
11119         }
11120
11121         return r;
11122 }
11123
11124 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11125 {
11126         struct tg3 *tp = netdev_priv(dev);
11127
11128         ering->rx_max_pending = tp->rx_std_ring_mask;
11129         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11130                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11131         else
11132                 ering->rx_jumbo_max_pending = 0;
11133
11134         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11135
11136         ering->rx_pending = tp->rx_pending;
11137         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11138                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11139         else
11140                 ering->rx_jumbo_pending = 0;
11141
11142         ering->tx_pending = tp->napi[0].tx_pending;
11143 }
11144
11145 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11146 {
11147         struct tg3 *tp = netdev_priv(dev);
11148         int i, irq_sync = 0, err = 0;
11149
11150         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11151             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11152             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11153             (ering->tx_pending <= MAX_SKB_FRAGS) ||
11154             (tg3_flag(tp, TSO_BUG) &&
11155              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11156                 return -EINVAL;
11157
11158         if (netif_running(dev)) {
11159                 tg3_phy_stop(tp);
11160                 tg3_netif_stop(tp);
11161                 irq_sync = 1;
11162         }
11163
11164         tg3_full_lock(tp, irq_sync);
11165
11166         tp->rx_pending = ering->rx_pending;
11167
11168         if (tg3_flag(tp, MAX_RXPEND_64) &&
11169             tp->rx_pending > 63)
11170                 tp->rx_pending = 63;
11171         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11172
11173         for (i = 0; i < tp->irq_max; i++)
11174                 tp->napi[i].tx_pending = ering->tx_pending;
11175
11176         if (netif_running(dev)) {
11177                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11178                 err = tg3_restart_hw(tp, 1);
11179                 if (!err)
11180                         tg3_netif_start(tp);
11181         }
11182
11183         tg3_full_unlock(tp);
11184
11185         if (irq_sync && !err)
11186                 tg3_phy_start(tp);
11187
11188         return err;
11189 }
11190
11191 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11192 {
11193         struct tg3 *tp = netdev_priv(dev);
11194
11195         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11196
11197         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11198                 epause->rx_pause = 1;
11199         else
11200                 epause->rx_pause = 0;
11201
11202         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11203                 epause->tx_pause = 1;
11204         else
11205                 epause->tx_pause = 0;
11206 }
11207
11208 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11209 {
11210         struct tg3 *tp = netdev_priv(dev);
11211         int err = 0;
11212
11213         if (tg3_flag(tp, USE_PHYLIB)) {
11214                 u32 newadv;
11215                 struct phy_device *phydev;
11216
11217                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11218
11219                 if (!(phydev->supported & SUPPORTED_Pause) ||
11220                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11221                      (epause->rx_pause != epause->tx_pause)))
11222                         return -EINVAL;
11223
11224                 tp->link_config.flowctrl = 0;
11225                 if (epause->rx_pause) {
11226                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11227
11228                         if (epause->tx_pause) {
11229                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11230                                 newadv = ADVERTISED_Pause;
11231                         } else
11232                                 newadv = ADVERTISED_Pause |
11233                                          ADVERTISED_Asym_Pause;
11234                 } else if (epause->tx_pause) {
11235                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11236                         newadv = ADVERTISED_Asym_Pause;
11237                 } else
11238                         newadv = 0;
11239
11240                 if (epause->autoneg)
11241                         tg3_flag_set(tp, PAUSE_AUTONEG);
11242                 else
11243                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11244
11245                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11246                         u32 oldadv = phydev->advertising &
11247                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11248                         if (oldadv != newadv) {
11249                                 phydev->advertising &=
11250                                         ~(ADVERTISED_Pause |
11251                                           ADVERTISED_Asym_Pause);
11252                                 phydev->advertising |= newadv;
11253                                 if (phydev->autoneg) {
11254                                         /*
11255                                          * Always renegotiate the link to
11256                                          * inform our link partner of our
11257                                          * flow control settings, even if the
11258                                          * flow control is forced.  Let
11259                                          * tg3_adjust_link() do the final
11260                                          * flow control setup.
11261                                          */
11262                                         return phy_start_aneg(phydev);
11263                                 }
11264                         }
11265
11266                         if (!epause->autoneg)
11267                                 tg3_setup_flow_control(tp, 0, 0);
11268                 } else {
11269                         tp->link_config.advertising &=
11270                                         ~(ADVERTISED_Pause |
11271                                           ADVERTISED_Asym_Pause);
11272                         tp->link_config.advertising |= newadv;
11273                 }
11274         } else {
11275                 int irq_sync = 0;
11276
11277                 if (netif_running(dev)) {
11278                         tg3_netif_stop(tp);
11279                         irq_sync = 1;
11280                 }
11281
11282                 tg3_full_lock(tp, irq_sync);
11283
11284                 if (epause->autoneg)
11285                         tg3_flag_set(tp, PAUSE_AUTONEG);
11286                 else
11287                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11288                 if (epause->rx_pause)
11289                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11290                 else
11291                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11292                 if (epause->tx_pause)
11293                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11294                 else
11295                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11296
11297                 if (netif_running(dev)) {
11298                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11299                         err = tg3_restart_hw(tp, 1);
11300                         if (!err)
11301                                 tg3_netif_start(tp);
11302                 }
11303
11304                 tg3_full_unlock(tp);
11305         }
11306
11307         return err;
11308 }
11309
11310 static int tg3_get_sset_count(struct net_device *dev, int sset)
11311 {
11312         switch (sset) {
11313         case ETH_SS_TEST:
11314                 return TG3_NUM_TEST;
11315         case ETH_SS_STATS:
11316                 return TG3_NUM_STATS;
11317         default:
11318                 return -EOPNOTSUPP;
11319         }
11320 }
11321
11322 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11323                          u32 *rules __always_unused)
11324 {
11325         struct tg3 *tp = netdev_priv(dev);
11326
11327         if (!tg3_flag(tp, SUPPORT_MSIX))
11328                 return -EOPNOTSUPP;
11329
11330         switch (info->cmd) {
11331         case ETHTOOL_GRXRINGS:
11332                 if (netif_running(tp->dev))
11333                         info->data = tp->rxq_cnt;
11334                 else {
11335                         info->data = num_online_cpus();
11336                         if (info->data > TG3_RSS_MAX_NUM_QS)
11337                                 info->data = TG3_RSS_MAX_NUM_QS;
11338                 }
11339
11340                 /* The first interrupt vector only
11341                  * handles link interrupts.
11342                  */
11343                 info->data -= 1;
11344                 return 0;
11345
11346         default:
11347                 return -EOPNOTSUPP;
11348         }
11349 }
11350
11351 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11352 {
11353         u32 size = 0;
11354         struct tg3 *tp = netdev_priv(dev);
11355
11356         if (tg3_flag(tp, SUPPORT_MSIX))
11357                 size = TG3_RSS_INDIR_TBL_SIZE;
11358
11359         return size;
11360 }
11361
11362 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11363 {
11364         struct tg3 *tp = netdev_priv(dev);
11365         int i;
11366
11367         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11368                 indir[i] = tp->rss_ind_tbl[i];
11369
11370         return 0;
11371 }
11372
11373 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11374 {
11375         struct tg3 *tp = netdev_priv(dev);
11376         size_t i;
11377
11378         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11379                 tp->rss_ind_tbl[i] = indir[i];
11380
11381         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11382                 return 0;
11383
11384         /* It is legal to write the indirection
11385          * table while the device is running.
11386          */
11387         tg3_full_lock(tp, 0);
11388         tg3_rss_write_indir_tbl(tp);
11389         tg3_full_unlock(tp);
11390
11391         return 0;
11392 }
11393
11394 static void tg3_get_channels(struct net_device *dev,
11395                              struct ethtool_channels *channel)
11396 {
11397         struct tg3 *tp = netdev_priv(dev);
11398         u32 deflt_qs = netif_get_num_default_rss_queues();
11399
11400         channel->max_rx = tp->rxq_max;
11401         channel->max_tx = tp->txq_max;
11402
11403         if (netif_running(dev)) {
11404                 channel->rx_count = tp->rxq_cnt;
11405                 channel->tx_count = tp->txq_cnt;
11406         } else {
11407                 if (tp->rxq_req)
11408                         channel->rx_count = tp->rxq_req;
11409                 else
11410                         channel->rx_count = min(deflt_qs, tp->rxq_max);
11411
11412                 if (tp->txq_req)
11413                         channel->tx_count = tp->txq_req;
11414                 else
11415                         channel->tx_count = min(deflt_qs, tp->txq_max);
11416         }
11417 }
11418
11419 static int tg3_set_channels(struct net_device *dev,
11420                             struct ethtool_channels *channel)
11421 {
11422         struct tg3 *tp = netdev_priv(dev);
11423
11424         if (!tg3_flag(tp, SUPPORT_MSIX))
11425                 return -EOPNOTSUPP;
11426
11427         if (channel->rx_count > tp->rxq_max ||
11428             channel->tx_count > tp->txq_max)
11429                 return -EINVAL;
11430
11431         tp->rxq_req = channel->rx_count;
11432         tp->txq_req = channel->tx_count;
11433
11434         if (!netif_running(dev))
11435                 return 0;
11436
11437         tg3_stop(tp);
11438
11439         netif_carrier_off(dev);
11440
11441         tg3_start(tp, true, false);
11442
11443         return 0;
11444 }
11445
11446 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11447 {
11448         switch (stringset) {
11449         case ETH_SS_STATS:
11450                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11451                 break;
11452         case ETH_SS_TEST:
11453                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11454                 break;
11455         default:
11456                 WARN_ON(1);     /* we need a WARN() */
11457                 break;
11458         }
11459 }
11460
11461 static int tg3_set_phys_id(struct net_device *dev,
11462                             enum ethtool_phys_id_state state)
11463 {
11464         struct tg3 *tp = netdev_priv(dev);
11465
11466         if (!netif_running(tp->dev))
11467                 return -EAGAIN;
11468
11469         switch (state) {
11470         case ETHTOOL_ID_ACTIVE:
11471                 return 1;       /* cycle on/off once per second */
11472
11473         case ETHTOOL_ID_ON:
11474                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11475                      LED_CTRL_1000MBPS_ON |
11476                      LED_CTRL_100MBPS_ON |
11477                      LED_CTRL_10MBPS_ON |
11478                      LED_CTRL_TRAFFIC_OVERRIDE |
11479                      LED_CTRL_TRAFFIC_BLINK |
11480                      LED_CTRL_TRAFFIC_LED);
11481                 break;
11482
11483         case ETHTOOL_ID_OFF:
11484                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11485                      LED_CTRL_TRAFFIC_OVERRIDE);
11486                 break;
11487
11488         case ETHTOOL_ID_INACTIVE:
11489                 tw32(MAC_LED_CTRL, tp->led_ctrl);
11490                 break;
11491         }
11492
11493         return 0;
11494 }
11495
11496 static void tg3_get_ethtool_stats(struct net_device *dev,
11497                                    struct ethtool_stats *estats, u64 *tmp_stats)
11498 {
11499         struct tg3 *tp = netdev_priv(dev);
11500
11501         if (tp->hw_stats)
11502                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11503         else
11504                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11505 }
11506
11507 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11508 {
11509         int i;
11510         __be32 *buf;
11511         u32 offset = 0, len = 0;
11512         u32 magic, val;
11513
11514         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11515                 return NULL;
11516
11517         if (magic == TG3_EEPROM_MAGIC) {
11518                 for (offset = TG3_NVM_DIR_START;
11519                      offset < TG3_NVM_DIR_END;
11520                      offset += TG3_NVM_DIRENT_SIZE) {
11521                         if (tg3_nvram_read(tp, offset, &val))
11522                                 return NULL;
11523
11524                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11525                             TG3_NVM_DIRTYPE_EXTVPD)
11526                                 break;
11527                 }
11528
11529                 if (offset != TG3_NVM_DIR_END) {
11530                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11531                         if (tg3_nvram_read(tp, offset + 4, &offset))
11532                                 return NULL;
11533
11534                         offset = tg3_nvram_logical_addr(tp, offset);
11535                 }
11536         }
11537
11538         if (!offset || !len) {
11539                 offset = TG3_NVM_VPD_OFF;
11540                 len = TG3_NVM_VPD_LEN;
11541         }
11542
11543         buf = kmalloc(len, GFP_KERNEL);
11544         if (buf == NULL)
11545                 return NULL;
11546
11547         if (magic == TG3_EEPROM_MAGIC) {
11548                 for (i = 0; i < len; i += 4) {
11549                         /* The data is in little-endian format in NVRAM.
11550                          * Use the big-endian read routines to preserve
11551                          * the byte order as it exists in NVRAM.
11552                          */
11553                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11554                                 goto error;
11555                 }
11556         } else {
11557                 u8 *ptr;
11558                 ssize_t cnt;
11559                 unsigned int pos = 0;
11560
11561                 ptr = (u8 *)&buf[0];
11562                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11563                         cnt = pci_read_vpd(tp->pdev, pos,
11564                                            len - pos, ptr);
11565                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
11566                                 cnt = 0;
11567                         else if (cnt < 0)
11568                                 goto error;
11569                 }
11570                 if (pos != len)
11571                         goto error;
11572         }
11573
11574         *vpdlen = len;
11575
11576         return buf;
11577
11578 error:
11579         kfree(buf);
11580         return NULL;
11581 }
11582
11583 #define NVRAM_TEST_SIZE 0x100
11584 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
11585 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
11586 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
11587 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
11588 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
11589 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
11590 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11591 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11592
11593 static int tg3_test_nvram(struct tg3 *tp)
11594 {
11595         u32 csum, magic, len;
11596         __be32 *buf;
11597         int i, j, k, err = 0, size;
11598
11599         if (tg3_flag(tp, NO_NVRAM))
11600                 return 0;
11601
11602         if (tg3_nvram_read(tp, 0, &magic) != 0)
11603                 return -EIO;
11604
11605         if (magic == TG3_EEPROM_MAGIC)
11606                 size = NVRAM_TEST_SIZE;
11607         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11608                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11609                     TG3_EEPROM_SB_FORMAT_1) {
11610                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11611                         case TG3_EEPROM_SB_REVISION_0:
11612                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11613                                 break;
11614                         case TG3_EEPROM_SB_REVISION_2:
11615                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11616                                 break;
11617                         case TG3_EEPROM_SB_REVISION_3:
11618                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11619                                 break;
11620                         case TG3_EEPROM_SB_REVISION_4:
11621                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11622                                 break;
11623                         case TG3_EEPROM_SB_REVISION_5:
11624                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11625                                 break;
11626                         case TG3_EEPROM_SB_REVISION_6:
11627                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11628                                 break;
11629                         default:
11630                                 return -EIO;
11631                         }
11632                 } else
11633                         return 0;
11634         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11635                 size = NVRAM_SELFBOOT_HW_SIZE;
11636         else
11637                 return -EIO;
11638
11639         buf = kmalloc(size, GFP_KERNEL);
11640         if (buf == NULL)
11641                 return -ENOMEM;
11642
11643         err = -EIO;
11644         for (i = 0, j = 0; i < size; i += 4, j++) {
11645                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11646                 if (err)
11647                         break;
11648         }
11649         if (i < size)
11650                 goto out;
11651
11652         /* Selfboot format */
11653         magic = be32_to_cpu(buf[0]);
11654         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11655             TG3_EEPROM_MAGIC_FW) {
11656                 u8 *buf8 = (u8 *) buf, csum8 = 0;
11657
11658                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11659                     TG3_EEPROM_SB_REVISION_2) {
11660                         /* For rev 2, the csum doesn't include the MBA. */
11661                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11662                                 csum8 += buf8[i];
11663                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11664                                 csum8 += buf8[i];
11665                 } else {
11666                         for (i = 0; i < size; i++)
11667                                 csum8 += buf8[i];
11668                 }
11669
11670                 if (csum8 == 0) {
11671                         err = 0;
11672                         goto out;
11673                 }
11674
11675                 err = -EIO;
11676                 goto out;
11677         }
11678
11679         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11680             TG3_EEPROM_MAGIC_HW) {
11681                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11682                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11683                 u8 *buf8 = (u8 *) buf;
11684
11685                 /* Separate the parity bits and the data bytes.  */
11686                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11687                         if ((i == 0) || (i == 8)) {
11688                                 int l;
11689                                 u8 msk;
11690
11691                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11692                                         parity[k++] = buf8[i] & msk;
11693                                 i++;
11694                         } else if (i == 16) {
11695                                 int l;
11696                                 u8 msk;
11697
11698                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11699                                         parity[k++] = buf8[i] & msk;
11700                                 i++;
11701
11702                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11703                                         parity[k++] = buf8[i] & msk;
11704                                 i++;
11705                         }
11706                         data[j++] = buf8[i];
11707                 }
11708
11709                 err = -EIO;
11710                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11711                         u8 hw8 = hweight8(data[i]);
11712
11713                         if ((hw8 & 0x1) && parity[i])
11714                                 goto out;
11715                         else if (!(hw8 & 0x1) && !parity[i])
11716                                 goto out;
11717                 }
11718                 err = 0;
11719                 goto out;
11720         }
11721
11722         err = -EIO;
11723
11724         /* Bootstrap checksum at offset 0x10 */
11725         csum = calc_crc((unsigned char *) buf, 0x10);
11726         if (csum != le32_to_cpu(buf[0x10/4]))
11727                 goto out;
11728
11729         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11730         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11731         if (csum != le32_to_cpu(buf[0xfc/4]))
11732                 goto out;
11733
11734         kfree(buf);
11735
11736         buf = tg3_vpd_readblock(tp, &len);
11737         if (!buf)
11738                 return -ENOMEM;
11739
11740         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11741         if (i > 0) {
11742                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11743                 if (j < 0)
11744                         goto out;
11745
11746                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11747                         goto out;
11748
11749                 i += PCI_VPD_LRDT_TAG_SIZE;
11750                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11751                                               PCI_VPD_RO_KEYWORD_CHKSUM);
11752                 if (j > 0) {
11753                         u8 csum8 = 0;
11754
11755                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
11756
11757                         for (i = 0; i <= j; i++)
11758                                 csum8 += ((u8 *)buf)[i];
11759
11760                         if (csum8)
11761                                 goto out;
11762                 }
11763         }
11764
11765         err = 0;
11766
11767 out:
11768         kfree(buf);
11769         return err;
11770 }
11771
11772 #define TG3_SERDES_TIMEOUT_SEC  2
11773 #define TG3_COPPER_TIMEOUT_SEC  6
11774
11775 static int tg3_test_link(struct tg3 *tp)
11776 {
11777         int i, max;
11778
11779         if (!netif_running(tp->dev))
11780                 return -ENODEV;
11781
11782         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11783                 max = TG3_SERDES_TIMEOUT_SEC;
11784         else
11785                 max = TG3_COPPER_TIMEOUT_SEC;
11786
11787         for (i = 0; i < max; i++) {
11788                 if (netif_carrier_ok(tp->dev))
11789                         return 0;
11790
11791                 if (msleep_interruptible(1000))
11792                         break;
11793         }
11794
11795         return -EIO;
11796 }
11797
11798 /* Only test the commonly used registers */
11799 static int tg3_test_registers(struct tg3 *tp)
11800 {
11801         int i, is_5705, is_5750;
11802         u32 offset, read_mask, write_mask, val, save_val, read_val;
11803         static struct {
11804                 u16 offset;
11805                 u16 flags;
11806 #define TG3_FL_5705     0x1
11807 #define TG3_FL_NOT_5705 0x2
11808 #define TG3_FL_NOT_5788 0x4
11809 #define TG3_FL_NOT_5750 0x8
11810                 u32 read_mask;
11811                 u32 write_mask;
11812         } reg_tbl[] = {
11813                 /* MAC Control Registers */
11814                 { MAC_MODE, TG3_FL_NOT_5705,
11815                         0x00000000, 0x00ef6f8c },
11816                 { MAC_MODE, TG3_FL_5705,
11817                         0x00000000, 0x01ef6b8c },
11818                 { MAC_STATUS, TG3_FL_NOT_5705,
11819                         0x03800107, 0x00000000 },
11820                 { MAC_STATUS, TG3_FL_5705,
11821                         0x03800100, 0x00000000 },
11822                 { MAC_ADDR_0_HIGH, 0x0000,
11823                         0x00000000, 0x0000ffff },
11824                 { MAC_ADDR_0_LOW, 0x0000,
11825                         0x00000000, 0xffffffff },
11826                 { MAC_RX_MTU_SIZE, 0x0000,
11827                         0x00000000, 0x0000ffff },
11828                 { MAC_TX_MODE, 0x0000,
11829                         0x00000000, 0x00000070 },
11830                 { MAC_TX_LENGTHS, 0x0000,
11831                         0x00000000, 0x00003fff },
11832                 { MAC_RX_MODE, TG3_FL_NOT_5705,
11833                         0x00000000, 0x000007fc },
11834                 { MAC_RX_MODE, TG3_FL_5705,
11835                         0x00000000, 0x000007dc },
11836                 { MAC_HASH_REG_0, 0x0000,
11837                         0x00000000, 0xffffffff },
11838                 { MAC_HASH_REG_1, 0x0000,
11839                         0x00000000, 0xffffffff },
11840                 { MAC_HASH_REG_2, 0x0000,
11841                         0x00000000, 0xffffffff },
11842                 { MAC_HASH_REG_3, 0x0000,
11843                         0x00000000, 0xffffffff },
11844
11845                 /* Receive Data and Receive BD Initiator Control Registers. */
11846                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11847                         0x00000000, 0xffffffff },
11848                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11849                         0x00000000, 0xffffffff },
11850                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11851                         0x00000000, 0x00000003 },
11852                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11853                         0x00000000, 0xffffffff },
11854                 { RCVDBDI_STD_BD+0, 0x0000,
11855                         0x00000000, 0xffffffff },
11856                 { RCVDBDI_STD_BD+4, 0x0000,
11857                         0x00000000, 0xffffffff },
11858                 { RCVDBDI_STD_BD+8, 0x0000,
11859                         0x00000000, 0xffff0002 },
11860                 { RCVDBDI_STD_BD+0xc, 0x0000,
11861                         0x00000000, 0xffffffff },
11862
11863                 /* Receive BD Initiator Control Registers. */
11864                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11865                         0x00000000, 0xffffffff },
11866                 { RCVBDI_STD_THRESH, TG3_FL_5705,
11867                         0x00000000, 0x000003ff },
11868                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11869                         0x00000000, 0xffffffff },
11870
11871                 /* Host Coalescing Control Registers. */
11872                 { HOSTCC_MODE, TG3_FL_NOT_5705,
11873                         0x00000000, 0x00000004 },
11874                 { HOSTCC_MODE, TG3_FL_5705,
11875                         0x00000000, 0x000000f6 },
11876                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11877                         0x00000000, 0xffffffff },
11878                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11879                         0x00000000, 0x000003ff },
11880                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11881                         0x00000000, 0xffffffff },
11882                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11883                         0x00000000, 0x000003ff },
11884                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11885                         0x00000000, 0xffffffff },
11886                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11887                         0x00000000, 0x000000ff },
11888                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11889                         0x00000000, 0xffffffff },
11890                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11891                         0x00000000, 0x000000ff },
11892                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11893                         0x00000000, 0xffffffff },
11894                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11895                         0x00000000, 0xffffffff },
11896                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11897                         0x00000000, 0xffffffff },
11898                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11899                         0x00000000, 0x000000ff },
11900                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11901                         0x00000000, 0xffffffff },
11902                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11903                         0x00000000, 0x000000ff },
11904                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11905                         0x00000000, 0xffffffff },
11906                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11907                         0x00000000, 0xffffffff },
11908                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11909                         0x00000000, 0xffffffff },
11910                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11911                         0x00000000, 0xffffffff },
11912                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11913                         0x00000000, 0xffffffff },
11914                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11915                         0xffffffff, 0x00000000 },
11916                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11917                         0xffffffff, 0x00000000 },
11918
11919                 /* Buffer Manager Control Registers. */
11920                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11921                         0x00000000, 0x007fff80 },
11922                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11923                         0x00000000, 0x007fffff },
11924                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11925                         0x00000000, 0x0000003f },
11926                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11927                         0x00000000, 0x000001ff },
11928                 { BUFMGR_MB_HIGH_WATER, 0x0000,
11929                         0x00000000, 0x000001ff },
11930                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11931                         0xffffffff, 0x00000000 },
11932                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11933                         0xffffffff, 0x00000000 },
11934
11935                 /* Mailbox Registers */
11936                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11937                         0x00000000, 0x000001ff },
11938                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11939                         0x00000000, 0x000001ff },
11940                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11941                         0x00000000, 0x000007ff },
11942                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11943                         0x00000000, 0x000001ff },
11944
11945                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11946         };
11947
11948         is_5705 = is_5750 = 0;
11949         if (tg3_flag(tp, 5705_PLUS)) {
11950                 is_5705 = 1;
11951                 if (tg3_flag(tp, 5750_PLUS))
11952                         is_5750 = 1;
11953         }
11954
11955         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11956                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11957                         continue;
11958
11959                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11960                         continue;
11961
11962                 if (tg3_flag(tp, IS_5788) &&
11963                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
11964                         continue;
11965
11966                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11967                         continue;
11968
11969                 offset = (u32) reg_tbl[i].offset;
11970                 read_mask = reg_tbl[i].read_mask;
11971                 write_mask = reg_tbl[i].write_mask;
11972
11973                 /* Save the original register content */
11974                 save_val = tr32(offset);
11975
11976                 /* Determine the read-only value. */
11977                 read_val = save_val & read_mask;
11978
11979                 /* Write zero to the register, then make sure the read-only bits
11980                  * are not changed and the read/write bits are all zeros.
11981                  */
11982                 tw32(offset, 0);
11983
11984                 val = tr32(offset);
11985
11986                 /* Test the read-only and read/write bits. */
11987                 if (((val & read_mask) != read_val) || (val & write_mask))
11988                         goto out;
11989
11990                 /* Write ones to all the bits defined by RdMask and WrMask, then
11991                  * make sure the read-only bits are not changed and the
11992                  * read/write bits are all ones.
11993                  */
11994                 tw32(offset, read_mask | write_mask);
11995
11996                 val = tr32(offset);
11997
11998                 /* Test the read-only bits. */
11999                 if ((val & read_mask) != read_val)
12000                         goto out;
12001
12002                 /* Test the read/write bits. */
12003                 if ((val & write_mask) != write_mask)
12004                         goto out;
12005
12006                 tw32(offset, save_val);
12007         }
12008
12009         return 0;
12010
12011 out:
12012         if (netif_msg_hw(tp))
12013                 netdev_err(tp->dev,
12014                            "Register test failed at offset %x\n", offset);
12015         tw32(offset, save_val);
12016         return -EIO;
12017 }
12018
12019 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12020 {
12021         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12022         int i;
12023         u32 j;
12024
12025         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12026                 for (j = 0; j < len; j += 4) {
12027                         u32 val;
12028
12029                         tg3_write_mem(tp, offset + j, test_pattern[i]);
12030                         tg3_read_mem(tp, offset + j, &val);
12031                         if (val != test_pattern[i])
12032                                 return -EIO;
12033                 }
12034         }
12035         return 0;
12036 }
12037
12038 static int tg3_test_memory(struct tg3 *tp)
12039 {
12040         static struct mem_entry {
12041                 u32 offset;
12042                 u32 len;
12043         } mem_tbl_570x[] = {
12044                 { 0x00000000, 0x00b50},
12045                 { 0x00002000, 0x1c000},
12046                 { 0xffffffff, 0x00000}
12047         }, mem_tbl_5705[] = {
12048                 { 0x00000100, 0x0000c},
12049                 { 0x00000200, 0x00008},
12050                 { 0x00004000, 0x00800},
12051                 { 0x00006000, 0x01000},
12052                 { 0x00008000, 0x02000},
12053                 { 0x00010000, 0x0e000},
12054                 { 0xffffffff, 0x00000}
12055         }, mem_tbl_5755[] = {
12056                 { 0x00000200, 0x00008},
12057                 { 0x00004000, 0x00800},
12058                 { 0x00006000, 0x00800},
12059                 { 0x00008000, 0x02000},
12060                 { 0x00010000, 0x0c000},
12061                 { 0xffffffff, 0x00000}
12062         }, mem_tbl_5906[] = {
12063                 { 0x00000200, 0x00008},
12064                 { 0x00004000, 0x00400},
12065                 { 0x00006000, 0x00400},
12066                 { 0x00008000, 0x01000},
12067                 { 0x00010000, 0x01000},
12068                 { 0xffffffff, 0x00000}
12069         }, mem_tbl_5717[] = {
12070                 { 0x00000200, 0x00008},
12071                 { 0x00010000, 0x0a000},
12072                 { 0x00020000, 0x13c00},
12073                 { 0xffffffff, 0x00000}
12074         }, mem_tbl_57765[] = {
12075                 { 0x00000200, 0x00008},
12076                 { 0x00004000, 0x00800},
12077                 { 0x00006000, 0x09800},
12078                 { 0x00010000, 0x0a000},
12079                 { 0xffffffff, 0x00000}
12080         };
12081         struct mem_entry *mem_tbl;
12082         int err = 0;
12083         int i;
12084
12085         if (tg3_flag(tp, 5717_PLUS))
12086                 mem_tbl = mem_tbl_5717;
12087         else if (tg3_flag(tp, 57765_CLASS))
12088                 mem_tbl = mem_tbl_57765;
12089         else if (tg3_flag(tp, 5755_PLUS))
12090                 mem_tbl = mem_tbl_5755;
12091         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12092                 mem_tbl = mem_tbl_5906;
12093         else if (tg3_flag(tp, 5705_PLUS))
12094                 mem_tbl = mem_tbl_5705;
12095         else
12096                 mem_tbl = mem_tbl_570x;
12097
12098         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12099                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12100                 if (err)
12101                         break;
12102         }
12103
12104         return err;
12105 }
12106
12107 #define TG3_TSO_MSS             500
12108
12109 #define TG3_TSO_IP_HDR_LEN      20
12110 #define TG3_TSO_TCP_HDR_LEN     20
12111 #define TG3_TSO_TCP_OPT_LEN     12
12112
12113 static const u8 tg3_tso_header[] = {
12114 0x08, 0x00,
12115 0x45, 0x00, 0x00, 0x00,
12116 0x00, 0x00, 0x40, 0x00,
12117 0x40, 0x06, 0x00, 0x00,
12118 0x0a, 0x00, 0x00, 0x01,
12119 0x0a, 0x00, 0x00, 0x02,
12120 0x0d, 0x00, 0xe0, 0x00,
12121 0x00, 0x00, 0x01, 0x00,
12122 0x00, 0x00, 0x02, 0x00,
12123 0x80, 0x10, 0x10, 0x00,
12124 0x14, 0x09, 0x00, 0x00,
12125 0x01, 0x01, 0x08, 0x0a,
12126 0x11, 0x11, 0x11, 0x11,
12127 0x11, 0x11, 0x11, 0x11,
12128 };
12129
12130 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12131 {
12132         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12133         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12134         u32 budget;
12135         struct sk_buff *skb;
12136         u8 *tx_data, *rx_data;
12137         dma_addr_t map;
12138         int num_pkts, tx_len, rx_len, i, err;
12139         struct tg3_rx_buffer_desc *desc;
12140         struct tg3_napi *tnapi, *rnapi;
12141         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12142
12143         tnapi = &tp->napi[0];
12144         rnapi = &tp->napi[0];
12145         if (tp->irq_cnt > 1) {
12146                 if (tg3_flag(tp, ENABLE_RSS))
12147                         rnapi = &tp->napi[1];
12148                 if (tg3_flag(tp, ENABLE_TSS))
12149                         tnapi = &tp->napi[1];
12150         }
12151         coal_now = tnapi->coal_now | rnapi->coal_now;
12152
12153         err = -EIO;
12154
12155         tx_len = pktsz;
12156         skb = netdev_alloc_skb(tp->dev, tx_len);
12157         if (!skb)
12158                 return -ENOMEM;
12159
12160         tx_data = skb_put(skb, tx_len);
12161         memcpy(tx_data, tp->dev->dev_addr, 6);
12162         memset(tx_data + 6, 0x0, 8);
12163
12164         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12165
12166         if (tso_loopback) {
12167                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12168
12169                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12170                               TG3_TSO_TCP_OPT_LEN;
12171
12172                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12173                        sizeof(tg3_tso_header));
12174                 mss = TG3_TSO_MSS;
12175
12176                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12177                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12178
12179                 /* Set the total length field in the IP header */
12180                 iph->tot_len = htons((u16)(mss + hdr_len));
12181
12182                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12183                               TXD_FLAG_CPU_POST_DMA);
12184
12185                 if (tg3_flag(tp, HW_TSO_1) ||
12186                     tg3_flag(tp, HW_TSO_2) ||
12187                     tg3_flag(tp, HW_TSO_3)) {
12188                         struct tcphdr *th;
12189                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12190                         th = (struct tcphdr *)&tx_data[val];
12191                         th->check = 0;
12192                 } else
12193                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
12194
12195                 if (tg3_flag(tp, HW_TSO_3)) {
12196                         mss |= (hdr_len & 0xc) << 12;
12197                         if (hdr_len & 0x10)
12198                                 base_flags |= 0x00000010;
12199                         base_flags |= (hdr_len & 0x3e0) << 5;
12200                 } else if (tg3_flag(tp, HW_TSO_2))
12201                         mss |= hdr_len << 9;
12202                 else if (tg3_flag(tp, HW_TSO_1) ||
12203                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
12204                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12205                 } else {
12206                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12207                 }
12208
12209                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12210         } else {
12211                 num_pkts = 1;
12212                 data_off = ETH_HLEN;
12213
12214                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12215                     tx_len > VLAN_ETH_FRAME_LEN)
12216                         base_flags |= TXD_FLAG_JMB_PKT;
12217         }
12218
12219         for (i = data_off; i < tx_len; i++)
12220                 tx_data[i] = (u8) (i & 0xff);
12221
12222         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12223         if (pci_dma_mapping_error(tp->pdev, map)) {
12224                 dev_kfree_skb(skb);
12225                 return -EIO;
12226         }
12227
12228         val = tnapi->tx_prod;
12229         tnapi->tx_buffers[val].skb = skb;
12230         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12231
12232         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12233                rnapi->coal_now);
12234
12235         udelay(10);
12236
12237         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12238
12239         budget = tg3_tx_avail(tnapi);
12240         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12241                             base_flags | TXD_FLAG_END, mss, 0)) {
12242                 tnapi->tx_buffers[val].skb = NULL;
12243                 dev_kfree_skb(skb);
12244                 return -EIO;
12245         }
12246
12247         tnapi->tx_prod++;
12248
12249         /* Sync BD data before updating mailbox */
12250         wmb();
12251
12252         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12253         tr32_mailbox(tnapi->prodmbox);
12254
12255         udelay(10);
12256
12257         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
12258         for (i = 0; i < 35; i++) {
12259                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12260                        coal_now);
12261
12262                 udelay(10);
12263
12264                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12265                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
12266                 if ((tx_idx == tnapi->tx_prod) &&
12267                     (rx_idx == (rx_start_idx + num_pkts)))
12268                         break;
12269         }
12270
12271         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12272         dev_kfree_skb(skb);
12273
12274         if (tx_idx != tnapi->tx_prod)
12275                 goto out;
12276
12277         if (rx_idx != rx_start_idx + num_pkts)
12278                 goto out;
12279
12280         val = data_off;
12281         while (rx_idx != rx_start_idx) {
12282                 desc = &rnapi->rx_rcb[rx_start_idx++];
12283                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12284                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12285
12286                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12287                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12288                         goto out;
12289
12290                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12291                          - ETH_FCS_LEN;
12292
12293                 if (!tso_loopback) {
12294                         if (rx_len != tx_len)
12295                                 goto out;
12296
12297                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12298                                 if (opaque_key != RXD_OPAQUE_RING_STD)
12299                                         goto out;
12300                         } else {
12301                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12302                                         goto out;
12303                         }
12304                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12305                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12306                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
12307                         goto out;
12308                 }
12309
12310                 if (opaque_key == RXD_OPAQUE_RING_STD) {
12311                         rx_data = tpr->rx_std_buffers[desc_idx].data;
12312                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12313                                              mapping);
12314                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12315                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12316                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12317                                              mapping);
12318                 } else
12319                         goto out;
12320
12321                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12322                                             PCI_DMA_FROMDEVICE);
12323
12324                 rx_data += TG3_RX_OFFSET(tp);
12325                 for (i = data_off; i < rx_len; i++, val++) {
12326                         if (*(rx_data + i) != (u8) (val & 0xff))
12327                                 goto out;
12328                 }
12329         }
12330
12331         err = 0;
12332
12333         /* tg3_free_rings will unmap and free the rx_data */
12334 out:
12335         return err;
12336 }
12337
12338 #define TG3_STD_LOOPBACK_FAILED         1
12339 #define TG3_JMB_LOOPBACK_FAILED         2
12340 #define TG3_TSO_LOOPBACK_FAILED         4
12341 #define TG3_LOOPBACK_FAILED \
12342         (TG3_STD_LOOPBACK_FAILED | \
12343          TG3_JMB_LOOPBACK_FAILED | \
12344          TG3_TSO_LOOPBACK_FAILED)
12345
12346 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12347 {
12348         int err = -EIO;
12349         u32 eee_cap;
12350         u32 jmb_pkt_sz = 9000;
12351
12352         if (tp->dma_limit)
12353                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12354
12355         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12356         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12357
12358         if (!netif_running(tp->dev)) {
12359                 data[0] = TG3_LOOPBACK_FAILED;
12360                 data[1] = TG3_LOOPBACK_FAILED;
12361                 if (do_extlpbk)
12362                         data[2] = TG3_LOOPBACK_FAILED;
12363                 goto done;
12364         }
12365
12366         err = tg3_reset_hw(tp, 1);
12367         if (err) {
12368                 data[0] = TG3_LOOPBACK_FAILED;
12369                 data[1] = TG3_LOOPBACK_FAILED;
12370                 if (do_extlpbk)
12371                         data[2] = TG3_LOOPBACK_FAILED;
12372                 goto done;
12373         }
12374
12375         if (tg3_flag(tp, ENABLE_RSS)) {
12376                 int i;
12377
12378                 /* Reroute all rx packets to the 1st queue */
12379                 for (i = MAC_RSS_INDIR_TBL_0;
12380                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12381                         tw32(i, 0x0);
12382         }
12383
12384         /* HW errata - mac loopback fails in some cases on 5780.
12385          * Normal traffic and PHY loopback are not affected by
12386          * errata.  Also, the MAC loopback test is deprecated for
12387          * all newer ASIC revisions.
12388          */
12389         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
12390             !tg3_flag(tp, CPMU_PRESENT)) {
12391                 tg3_mac_loopback(tp, true);
12392
12393                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12394                         data[0] |= TG3_STD_LOOPBACK_FAILED;
12395
12396                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12397                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12398                         data[0] |= TG3_JMB_LOOPBACK_FAILED;
12399
12400                 tg3_mac_loopback(tp, false);
12401         }
12402
12403         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12404             !tg3_flag(tp, USE_PHYLIB)) {
12405                 int i;
12406
12407                 tg3_phy_lpbk_set(tp, 0, false);
12408
12409                 /* Wait for link */
12410                 for (i = 0; i < 100; i++) {
12411                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12412                                 break;
12413                         mdelay(1);
12414                 }
12415
12416                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12417                         data[1] |= TG3_STD_LOOPBACK_FAILED;
12418                 if (tg3_flag(tp, TSO_CAPABLE) &&
12419                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12420                         data[1] |= TG3_TSO_LOOPBACK_FAILED;
12421                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12422                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12423                         data[1] |= TG3_JMB_LOOPBACK_FAILED;
12424
12425                 if (do_extlpbk) {
12426                         tg3_phy_lpbk_set(tp, 0, true);
12427
12428                         /* All link indications report up, but the hardware
12429                          * isn't really ready for about 20 msec.  Double it
12430                          * to be sure.
12431                          */
12432                         mdelay(40);
12433
12434                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12435                                 data[2] |= TG3_STD_LOOPBACK_FAILED;
12436                         if (tg3_flag(tp, TSO_CAPABLE) &&
12437                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12438                                 data[2] |= TG3_TSO_LOOPBACK_FAILED;
12439                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12440                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12441                                 data[2] |= TG3_JMB_LOOPBACK_FAILED;
12442                 }
12443
12444                 /* Re-enable gphy autopowerdown. */
12445                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12446                         tg3_phy_toggle_apd(tp, true);
12447         }
12448
12449         err = (data[0] | data[1] | data[2]) ? -EIO : 0;
12450
12451 done:
12452         tp->phy_flags |= eee_cap;
12453
12454         return err;
12455 }
12456
12457 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12458                           u64 *data)
12459 {
12460         struct tg3 *tp = netdev_priv(dev);
12461         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12462
12463         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12464             tg3_power_up(tp)) {
12465                 etest->flags |= ETH_TEST_FL_FAILED;
12466                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12467                 return;
12468         }
12469
12470         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12471
12472         if (tg3_test_nvram(tp) != 0) {
12473                 etest->flags |= ETH_TEST_FL_FAILED;
12474                 data[0] = 1;
12475         }
12476         if (!doextlpbk && tg3_test_link(tp)) {
12477                 etest->flags |= ETH_TEST_FL_FAILED;
12478                 data[1] = 1;
12479         }
12480         if (etest->flags & ETH_TEST_FL_OFFLINE) {
12481                 int err, err2 = 0, irq_sync = 0;
12482
12483                 if (netif_running(dev)) {
12484                         tg3_phy_stop(tp);
12485                         tg3_netif_stop(tp);
12486                         irq_sync = 1;
12487                 }
12488
12489                 tg3_full_lock(tp, irq_sync);
12490
12491                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12492                 err = tg3_nvram_lock(tp);
12493                 tg3_halt_cpu(tp, RX_CPU_BASE);
12494                 if (!tg3_flag(tp, 5705_PLUS))
12495                         tg3_halt_cpu(tp, TX_CPU_BASE);
12496                 if (!err)
12497                         tg3_nvram_unlock(tp);
12498
12499                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12500                         tg3_phy_reset(tp);
12501
12502                 if (tg3_test_registers(tp) != 0) {
12503                         etest->flags |= ETH_TEST_FL_FAILED;
12504                         data[2] = 1;
12505                 }
12506
12507                 if (tg3_test_memory(tp) != 0) {
12508                         etest->flags |= ETH_TEST_FL_FAILED;
12509                         data[3] = 1;
12510                 }
12511
12512                 if (doextlpbk)
12513                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12514
12515                 if (tg3_test_loopback(tp, &data[4], doextlpbk))
12516                         etest->flags |= ETH_TEST_FL_FAILED;
12517
12518                 tg3_full_unlock(tp);
12519
12520                 if (tg3_test_interrupt(tp) != 0) {
12521                         etest->flags |= ETH_TEST_FL_FAILED;
12522                         data[7] = 1;
12523                 }
12524
12525                 tg3_full_lock(tp, 0);
12526
12527                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12528                 if (netif_running(dev)) {
12529                         tg3_flag_set(tp, INIT_COMPLETE);
12530                         err2 = tg3_restart_hw(tp, 1);
12531                         if (!err2)
12532                                 tg3_netif_start(tp);
12533                 }
12534
12535                 tg3_full_unlock(tp);
12536
12537                 if (irq_sync && !err2)
12538                         tg3_phy_start(tp);
12539         }
12540         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12541                 tg3_power_down(tp);
12542
12543 }
12544
12545 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12546 {
12547         struct mii_ioctl_data *data = if_mii(ifr);
12548         struct tg3 *tp = netdev_priv(dev);
12549         int err;
12550
12551         if (tg3_flag(tp, USE_PHYLIB)) {
12552                 struct phy_device *phydev;
12553                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12554                         return -EAGAIN;
12555                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12556                 return phy_mii_ioctl(phydev, ifr, cmd);
12557         }
12558
12559         switch (cmd) {
12560         case SIOCGMIIPHY:
12561                 data->phy_id = tp->phy_addr;
12562
12563                 /* fallthru */
12564         case SIOCGMIIREG: {
12565                 u32 mii_regval;
12566
12567                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12568                         break;                  /* We have no PHY */
12569
12570                 if (!netif_running(dev))
12571                         return -EAGAIN;
12572
12573                 spin_lock_bh(&tp->lock);
12574                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12575                 spin_unlock_bh(&tp->lock);
12576
12577                 data->val_out = mii_regval;
12578
12579                 return err;
12580         }
12581
12582         case SIOCSMIIREG:
12583                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12584                         break;                  /* We have no PHY */
12585
12586                 if (!netif_running(dev))
12587                         return -EAGAIN;
12588
12589                 spin_lock_bh(&tp->lock);
12590                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12591                 spin_unlock_bh(&tp->lock);
12592
12593                 return err;
12594
12595         default:
12596                 /* do nothing */
12597                 break;
12598         }
12599         return -EOPNOTSUPP;
12600 }
12601
12602 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12603 {
12604         struct tg3 *tp = netdev_priv(dev);
12605
12606         memcpy(ec, &tp->coal, sizeof(*ec));
12607         return 0;
12608 }
12609
12610 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12611 {
12612         struct tg3 *tp = netdev_priv(dev);
12613         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12614         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12615
12616         if (!tg3_flag(tp, 5705_PLUS)) {
12617                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12618                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12619                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12620                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12621         }
12622
12623         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12624             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12625             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12626             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12627             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12628             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12629             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12630             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12631             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12632             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12633                 return -EINVAL;
12634
12635         /* No rx interrupts will be generated if both are zero */
12636         if ((ec->rx_coalesce_usecs == 0) &&
12637             (ec->rx_max_coalesced_frames == 0))
12638                 return -EINVAL;
12639
12640         /* No tx interrupts will be generated if both are zero */
12641         if ((ec->tx_coalesce_usecs == 0) &&
12642             (ec->tx_max_coalesced_frames == 0))
12643                 return -EINVAL;
12644
12645         /* Only copy relevant parameters, ignore all others. */
12646         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12647         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12648         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12649         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12650         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12651         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12652         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12653         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12654         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12655
12656         if (netif_running(dev)) {
12657                 tg3_full_lock(tp, 0);
12658                 __tg3_set_coalesce(tp, &tp->coal);
12659                 tg3_full_unlock(tp);
12660         }
12661         return 0;
12662 }
12663
12664 static const struct ethtool_ops tg3_ethtool_ops = {
12665         .get_settings           = tg3_get_settings,
12666         .set_settings           = tg3_set_settings,
12667         .get_drvinfo            = tg3_get_drvinfo,
12668         .get_regs_len           = tg3_get_regs_len,
12669         .get_regs               = tg3_get_regs,
12670         .get_wol                = tg3_get_wol,
12671         .set_wol                = tg3_set_wol,
12672         .get_msglevel           = tg3_get_msglevel,
12673         .set_msglevel           = tg3_set_msglevel,
12674         .nway_reset             = tg3_nway_reset,
12675         .get_link               = ethtool_op_get_link,
12676         .get_eeprom_len         = tg3_get_eeprom_len,
12677         .get_eeprom             = tg3_get_eeprom,
12678         .set_eeprom             = tg3_set_eeprom,
12679         .get_ringparam          = tg3_get_ringparam,
12680         .set_ringparam          = tg3_set_ringparam,
12681         .get_pauseparam         = tg3_get_pauseparam,
12682         .set_pauseparam         = tg3_set_pauseparam,
12683         .self_test              = tg3_self_test,
12684         .get_strings            = tg3_get_strings,
12685         .set_phys_id            = tg3_set_phys_id,
12686         .get_ethtool_stats      = tg3_get_ethtool_stats,
12687         .get_coalesce           = tg3_get_coalesce,
12688         .set_coalesce           = tg3_set_coalesce,
12689         .get_sset_count         = tg3_get_sset_count,
12690         .get_rxnfc              = tg3_get_rxnfc,
12691         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
12692         .get_rxfh_indir         = tg3_get_rxfh_indir,
12693         .set_rxfh_indir         = tg3_set_rxfh_indir,
12694         .get_channels           = tg3_get_channels,
12695         .set_channels           = tg3_set_channels,
12696         .get_ts_info            = ethtool_op_get_ts_info,
12697 };
12698
12699 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
12700                                                 struct rtnl_link_stats64 *stats)
12701 {
12702         struct tg3 *tp = netdev_priv(dev);
12703
12704         spin_lock_bh(&tp->lock);
12705         if (!tp->hw_stats) {
12706                 spin_unlock_bh(&tp->lock);
12707                 return &tp->net_stats_prev;
12708         }
12709
12710         tg3_get_nstats(tp, stats);
12711         spin_unlock_bh(&tp->lock);
12712
12713         return stats;
12714 }
12715
12716 static void tg3_set_rx_mode(struct net_device *dev)
12717 {
12718         struct tg3 *tp = netdev_priv(dev);
12719
12720         if (!netif_running(dev))
12721                 return;
12722
12723         tg3_full_lock(tp, 0);
12724         __tg3_set_rx_mode(dev);
12725         tg3_full_unlock(tp);
12726 }
12727
12728 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12729                                int new_mtu)
12730 {
12731         dev->mtu = new_mtu;
12732
12733         if (new_mtu > ETH_DATA_LEN) {
12734                 if (tg3_flag(tp, 5780_CLASS)) {
12735                         netdev_update_features(dev);
12736                         tg3_flag_clear(tp, TSO_CAPABLE);
12737                 } else {
12738                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
12739                 }
12740         } else {
12741                 if (tg3_flag(tp, 5780_CLASS)) {
12742                         tg3_flag_set(tp, TSO_CAPABLE);
12743                         netdev_update_features(dev);
12744                 }
12745                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12746         }
12747 }
12748
12749 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12750 {
12751         struct tg3 *tp = netdev_priv(dev);
12752         int err, reset_phy = 0;
12753
12754         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12755                 return -EINVAL;
12756
12757         if (!netif_running(dev)) {
12758                 /* We'll just catch it later when the
12759                  * device is up'd.
12760                  */
12761                 tg3_set_mtu(dev, tp, new_mtu);
12762                 return 0;
12763         }
12764
12765         tg3_phy_stop(tp);
12766
12767         tg3_netif_stop(tp);
12768
12769         tg3_full_lock(tp, 1);
12770
12771         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12772
12773         tg3_set_mtu(dev, tp, new_mtu);
12774
12775         /* Reset PHY, otherwise the read DMA engine will be in a mode that
12776          * breaks all requests to 256 bytes.
12777          */
12778         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
12779                 reset_phy = 1;
12780
12781         err = tg3_restart_hw(tp, reset_phy);
12782
12783         if (!err)
12784                 tg3_netif_start(tp);
12785
12786         tg3_full_unlock(tp);
12787
12788         if (!err)
12789                 tg3_phy_start(tp);
12790
12791         return err;
12792 }
12793
12794 static const struct net_device_ops tg3_netdev_ops = {
12795         .ndo_open               = tg3_open,
12796         .ndo_stop               = tg3_close,
12797         .ndo_start_xmit         = tg3_start_xmit,
12798         .ndo_get_stats64        = tg3_get_stats64,
12799         .ndo_validate_addr      = eth_validate_addr,
12800         .ndo_set_rx_mode        = tg3_set_rx_mode,
12801         .ndo_set_mac_address    = tg3_set_mac_addr,
12802         .ndo_do_ioctl           = tg3_ioctl,
12803         .ndo_tx_timeout         = tg3_tx_timeout,
12804         .ndo_change_mtu         = tg3_change_mtu,
12805         .ndo_fix_features       = tg3_fix_features,
12806         .ndo_set_features       = tg3_set_features,
12807 #ifdef CONFIG_NET_POLL_CONTROLLER
12808         .ndo_poll_controller    = tg3_poll_controller,
12809 #endif
12810 };
12811
12812 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12813 {
12814         u32 cursize, val, magic;
12815
12816         tp->nvram_size = EEPROM_CHIP_SIZE;
12817
12818         if (tg3_nvram_read(tp, 0, &magic) != 0)
12819                 return;
12820
12821         if ((magic != TG3_EEPROM_MAGIC) &&
12822             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12823             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12824                 return;
12825
12826         /*
12827          * Size the chip by reading offsets at increasing powers of two.
12828          * When we encounter our validation signature, we know the addressing
12829          * has wrapped around, and thus have our chip size.
12830          */
12831         cursize = 0x10;
12832
12833         while (cursize < tp->nvram_size) {
12834                 if (tg3_nvram_read(tp, cursize, &val) != 0)
12835                         return;
12836
12837                 if (val == magic)
12838                         break;
12839
12840                 cursize <<= 1;
12841         }
12842
12843         tp->nvram_size = cursize;
12844 }
12845
12846 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12847 {
12848         u32 val;
12849
12850         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12851                 return;
12852
12853         /* Selfboot format */
12854         if (val != TG3_EEPROM_MAGIC) {
12855                 tg3_get_eeprom_size(tp);
12856                 return;
12857         }
12858
12859         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12860                 if (val != 0) {
12861                         /* This is confusing.  We want to operate on the
12862                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
12863                          * call will read from NVRAM and byteswap the data
12864                          * according to the byteswapping settings for all
12865                          * other register accesses.  This ensures the data we
12866                          * want will always reside in the lower 16-bits.
12867                          * However, the data in NVRAM is in LE format, which
12868                          * means the data from the NVRAM read will always be
12869                          * opposite the endianness of the CPU.  The 16-bit
12870                          * byteswap then brings the data to CPU endianness.
12871                          */
12872                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12873                         return;
12874                 }
12875         }
12876         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12877 }
12878
12879 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12880 {
12881         u32 nvcfg1;
12882
12883         nvcfg1 = tr32(NVRAM_CFG1);
12884         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12885                 tg3_flag_set(tp, FLASH);
12886         } else {
12887                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12888                 tw32(NVRAM_CFG1, nvcfg1);
12889         }
12890
12891         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12892             tg3_flag(tp, 5780_CLASS)) {
12893                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12894                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12895                         tp->nvram_jedecnum = JEDEC_ATMEL;
12896                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12897                         tg3_flag_set(tp, NVRAM_BUFFERED);
12898                         break;
12899                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12900                         tp->nvram_jedecnum = JEDEC_ATMEL;
12901                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12902                         break;
12903                 case FLASH_VENDOR_ATMEL_EEPROM:
12904                         tp->nvram_jedecnum = JEDEC_ATMEL;
12905                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12906                         tg3_flag_set(tp, NVRAM_BUFFERED);
12907                         break;
12908                 case FLASH_VENDOR_ST:
12909                         tp->nvram_jedecnum = JEDEC_ST;
12910                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12911                         tg3_flag_set(tp, NVRAM_BUFFERED);
12912                         break;
12913                 case FLASH_VENDOR_SAIFUN:
12914                         tp->nvram_jedecnum = JEDEC_SAIFUN;
12915                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12916                         break;
12917                 case FLASH_VENDOR_SST_SMALL:
12918                 case FLASH_VENDOR_SST_LARGE:
12919                         tp->nvram_jedecnum = JEDEC_SST;
12920                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12921                         break;
12922                 }
12923         } else {
12924                 tp->nvram_jedecnum = JEDEC_ATMEL;
12925                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12926                 tg3_flag_set(tp, NVRAM_BUFFERED);
12927         }
12928 }
12929
12930 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12931 {
12932         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12933         case FLASH_5752PAGE_SIZE_256:
12934                 tp->nvram_pagesize = 256;
12935                 break;
12936         case FLASH_5752PAGE_SIZE_512:
12937                 tp->nvram_pagesize = 512;
12938                 break;
12939         case FLASH_5752PAGE_SIZE_1K:
12940                 tp->nvram_pagesize = 1024;
12941                 break;
12942         case FLASH_5752PAGE_SIZE_2K:
12943                 tp->nvram_pagesize = 2048;
12944                 break;
12945         case FLASH_5752PAGE_SIZE_4K:
12946                 tp->nvram_pagesize = 4096;
12947                 break;
12948         case FLASH_5752PAGE_SIZE_264:
12949                 tp->nvram_pagesize = 264;
12950                 break;
12951         case FLASH_5752PAGE_SIZE_528:
12952                 tp->nvram_pagesize = 528;
12953                 break;
12954         }
12955 }
12956
12957 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12958 {
12959         u32 nvcfg1;
12960
12961         nvcfg1 = tr32(NVRAM_CFG1);
12962
12963         /* NVRAM protection for TPM */
12964         if (nvcfg1 & (1 << 27))
12965                 tg3_flag_set(tp, PROTECTED_NVRAM);
12966
12967         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12968         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12969         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12970                 tp->nvram_jedecnum = JEDEC_ATMEL;
12971                 tg3_flag_set(tp, NVRAM_BUFFERED);
12972                 break;
12973         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12974                 tp->nvram_jedecnum = JEDEC_ATMEL;
12975                 tg3_flag_set(tp, NVRAM_BUFFERED);
12976                 tg3_flag_set(tp, FLASH);
12977                 break;
12978         case FLASH_5752VENDOR_ST_M45PE10:
12979         case FLASH_5752VENDOR_ST_M45PE20:
12980         case FLASH_5752VENDOR_ST_M45PE40:
12981                 tp->nvram_jedecnum = JEDEC_ST;
12982                 tg3_flag_set(tp, NVRAM_BUFFERED);
12983                 tg3_flag_set(tp, FLASH);
12984                 break;
12985         }
12986
12987         if (tg3_flag(tp, FLASH)) {
12988                 tg3_nvram_get_pagesize(tp, nvcfg1);
12989         } else {
12990                 /* For eeprom, set pagesize to maximum eeprom size */
12991                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12992
12993                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12994                 tw32(NVRAM_CFG1, nvcfg1);
12995         }
12996 }
12997
12998 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12999 {
13000         u32 nvcfg1, protect = 0;
13001
13002         nvcfg1 = tr32(NVRAM_CFG1);
13003
13004         /* NVRAM protection for TPM */
13005         if (nvcfg1 & (1 << 27)) {
13006                 tg3_flag_set(tp, PROTECTED_NVRAM);
13007                 protect = 1;
13008         }
13009
13010         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13011         switch (nvcfg1) {
13012         case FLASH_5755VENDOR_ATMEL_FLASH_1:
13013         case FLASH_5755VENDOR_ATMEL_FLASH_2:
13014         case FLASH_5755VENDOR_ATMEL_FLASH_3:
13015         case FLASH_5755VENDOR_ATMEL_FLASH_5:
13016                 tp->nvram_jedecnum = JEDEC_ATMEL;
13017                 tg3_flag_set(tp, NVRAM_BUFFERED);
13018                 tg3_flag_set(tp, FLASH);
13019                 tp->nvram_pagesize = 264;
13020                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13021                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13022                         tp->nvram_size = (protect ? 0x3e200 :
13023                                           TG3_NVRAM_SIZE_512KB);
13024                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13025                         tp->nvram_size = (protect ? 0x1f200 :
13026                                           TG3_NVRAM_SIZE_256KB);
13027                 else
13028                         tp->nvram_size = (protect ? 0x1f200 :
13029                                           TG3_NVRAM_SIZE_128KB);
13030                 break;
13031         case FLASH_5752VENDOR_ST_M45PE10:
13032         case FLASH_5752VENDOR_ST_M45PE20:
13033         case FLASH_5752VENDOR_ST_M45PE40:
13034                 tp->nvram_jedecnum = JEDEC_ST;
13035                 tg3_flag_set(tp, NVRAM_BUFFERED);
13036                 tg3_flag_set(tp, FLASH);
13037                 tp->nvram_pagesize = 256;
13038                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13039                         tp->nvram_size = (protect ?
13040                                           TG3_NVRAM_SIZE_64KB :
13041                                           TG3_NVRAM_SIZE_128KB);
13042                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13043                         tp->nvram_size = (protect ?
13044                                           TG3_NVRAM_SIZE_64KB :
13045                                           TG3_NVRAM_SIZE_256KB);
13046                 else
13047                         tp->nvram_size = (protect ?
13048                                           TG3_NVRAM_SIZE_128KB :
13049                                           TG3_NVRAM_SIZE_512KB);
13050                 break;
13051         }
13052 }
13053
13054 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
13055 {
13056         u32 nvcfg1;
13057
13058         nvcfg1 = tr32(NVRAM_CFG1);
13059
13060         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13061         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13062         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13063         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13064         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13065                 tp->nvram_jedecnum = JEDEC_ATMEL;
13066                 tg3_flag_set(tp, NVRAM_BUFFERED);
13067                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13068
13069                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13070                 tw32(NVRAM_CFG1, nvcfg1);
13071                 break;
13072         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13073         case FLASH_5755VENDOR_ATMEL_FLASH_1:
13074         case FLASH_5755VENDOR_ATMEL_FLASH_2:
13075         case FLASH_5755VENDOR_ATMEL_FLASH_3:
13076                 tp->nvram_jedecnum = JEDEC_ATMEL;
13077                 tg3_flag_set(tp, NVRAM_BUFFERED);
13078                 tg3_flag_set(tp, FLASH);
13079                 tp->nvram_pagesize = 264;
13080                 break;
13081         case FLASH_5752VENDOR_ST_M45PE10:
13082         case FLASH_5752VENDOR_ST_M45PE20:
13083         case FLASH_5752VENDOR_ST_M45PE40:
13084                 tp->nvram_jedecnum = JEDEC_ST;
13085                 tg3_flag_set(tp, NVRAM_BUFFERED);
13086                 tg3_flag_set(tp, FLASH);
13087                 tp->nvram_pagesize = 256;
13088                 break;
13089         }
13090 }
13091
13092 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
13093 {
13094         u32 nvcfg1, protect = 0;
13095
13096         nvcfg1 = tr32(NVRAM_CFG1);
13097
13098         /* NVRAM protection for TPM */
13099         if (nvcfg1 & (1 << 27)) {
13100                 tg3_flag_set(tp, PROTECTED_NVRAM);
13101                 protect = 1;
13102         }
13103
13104         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13105         switch (nvcfg1) {
13106         case FLASH_5761VENDOR_ATMEL_ADB021D:
13107         case FLASH_5761VENDOR_ATMEL_ADB041D:
13108         case FLASH_5761VENDOR_ATMEL_ADB081D:
13109         case FLASH_5761VENDOR_ATMEL_ADB161D:
13110         case FLASH_5761VENDOR_ATMEL_MDB021D:
13111         case FLASH_5761VENDOR_ATMEL_MDB041D:
13112         case FLASH_5761VENDOR_ATMEL_MDB081D:
13113         case FLASH_5761VENDOR_ATMEL_MDB161D:
13114                 tp->nvram_jedecnum = JEDEC_ATMEL;
13115                 tg3_flag_set(tp, NVRAM_BUFFERED);
13116                 tg3_flag_set(tp, FLASH);
13117                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13118                 tp->nvram_pagesize = 256;
13119                 break;
13120         case FLASH_5761VENDOR_ST_A_M45PE20:
13121         case FLASH_5761VENDOR_ST_A_M45PE40:
13122         case FLASH_5761VENDOR_ST_A_M45PE80:
13123         case FLASH_5761VENDOR_ST_A_M45PE16:
13124         case FLASH_5761VENDOR_ST_M_M45PE20:
13125         case FLASH_5761VENDOR_ST_M_M45PE40:
13126         case FLASH_5761VENDOR_ST_M_M45PE80:
13127         case FLASH_5761VENDOR_ST_M_M45PE16:
13128                 tp->nvram_jedecnum = JEDEC_ST;
13129                 tg3_flag_set(tp, NVRAM_BUFFERED);
13130                 tg3_flag_set(tp, FLASH);
13131                 tp->nvram_pagesize = 256;
13132                 break;
13133         }
13134
13135         if (protect) {
13136                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13137         } else {
13138                 switch (nvcfg1) {
13139                 case FLASH_5761VENDOR_ATMEL_ADB161D:
13140                 case FLASH_5761VENDOR_ATMEL_MDB161D:
13141                 case FLASH_5761VENDOR_ST_A_M45PE16:
13142                 case FLASH_5761VENDOR_ST_M_M45PE16:
13143                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13144                         break;
13145                 case FLASH_5761VENDOR_ATMEL_ADB081D:
13146                 case FLASH_5761VENDOR_ATMEL_MDB081D:
13147                 case FLASH_5761VENDOR_ST_A_M45PE80:
13148                 case FLASH_5761VENDOR_ST_M_M45PE80:
13149                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13150                         break;
13151                 case FLASH_5761VENDOR_ATMEL_ADB041D:
13152                 case FLASH_5761VENDOR_ATMEL_MDB041D:
13153                 case FLASH_5761VENDOR_ST_A_M45PE40:
13154                 case FLASH_5761VENDOR_ST_M_M45PE40:
13155                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13156                         break;
13157                 case FLASH_5761VENDOR_ATMEL_ADB021D:
13158                 case FLASH_5761VENDOR_ATMEL_MDB021D:
13159                 case FLASH_5761VENDOR_ST_A_M45PE20:
13160                 case FLASH_5761VENDOR_ST_M_M45PE20:
13161                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13162                         break;
13163                 }
13164         }
13165 }
13166
13167 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
13168 {
13169         tp->nvram_jedecnum = JEDEC_ATMEL;
13170         tg3_flag_set(tp, NVRAM_BUFFERED);
13171         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13172 }
13173
13174 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
13175 {
13176         u32 nvcfg1;
13177
13178         nvcfg1 = tr32(NVRAM_CFG1);
13179
13180         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13181         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13182         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13183                 tp->nvram_jedecnum = JEDEC_ATMEL;
13184                 tg3_flag_set(tp, NVRAM_BUFFERED);
13185                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13186
13187                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13188                 tw32(NVRAM_CFG1, nvcfg1);
13189                 return;
13190         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13191         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13192         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13193         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13194         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13195         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13196         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13197                 tp->nvram_jedecnum = JEDEC_ATMEL;
13198                 tg3_flag_set(tp, NVRAM_BUFFERED);
13199                 tg3_flag_set(tp, FLASH);
13200
13201                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13202                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13203                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13204                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13205                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13206                         break;
13207                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13208                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13209                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13210                         break;
13211                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13212                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13213                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13214                         break;
13215                 }
13216                 break;
13217         case FLASH_5752VENDOR_ST_M45PE10:
13218         case FLASH_5752VENDOR_ST_M45PE20:
13219         case FLASH_5752VENDOR_ST_M45PE40:
13220                 tp->nvram_jedecnum = JEDEC_ST;
13221                 tg3_flag_set(tp, NVRAM_BUFFERED);
13222                 tg3_flag_set(tp, FLASH);
13223
13224                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13225                 case FLASH_5752VENDOR_ST_M45PE10:
13226                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13227                         break;
13228                 case FLASH_5752VENDOR_ST_M45PE20:
13229                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13230                         break;
13231                 case FLASH_5752VENDOR_ST_M45PE40:
13232                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13233                         break;
13234                 }
13235                 break;
13236         default:
13237                 tg3_flag_set(tp, NO_NVRAM);
13238                 return;
13239         }
13240
13241         tg3_nvram_get_pagesize(tp, nvcfg1);
13242         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13243                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13244 }
13245
13246
13247 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
13248 {
13249         u32 nvcfg1;
13250
13251         nvcfg1 = tr32(NVRAM_CFG1);
13252
13253         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13254         case FLASH_5717VENDOR_ATMEL_EEPROM:
13255         case FLASH_5717VENDOR_MICRO_EEPROM:
13256                 tp->nvram_jedecnum = JEDEC_ATMEL;
13257                 tg3_flag_set(tp, NVRAM_BUFFERED);
13258                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13259
13260                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13261                 tw32(NVRAM_CFG1, nvcfg1);
13262                 return;
13263         case FLASH_5717VENDOR_ATMEL_MDB011D:
13264         case FLASH_5717VENDOR_ATMEL_ADB011B:
13265         case FLASH_5717VENDOR_ATMEL_ADB011D:
13266         case FLASH_5717VENDOR_ATMEL_MDB021D:
13267         case FLASH_5717VENDOR_ATMEL_ADB021B:
13268         case FLASH_5717VENDOR_ATMEL_ADB021D:
13269         case FLASH_5717VENDOR_ATMEL_45USPT:
13270                 tp->nvram_jedecnum = JEDEC_ATMEL;
13271                 tg3_flag_set(tp, NVRAM_BUFFERED);
13272                 tg3_flag_set(tp, FLASH);
13273
13274                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13275                 case FLASH_5717VENDOR_ATMEL_MDB021D:
13276                         /* Detect size with tg3_nvram_get_size() */
13277                         break;
13278                 case FLASH_5717VENDOR_ATMEL_ADB021B:
13279                 case FLASH_5717VENDOR_ATMEL_ADB021D:
13280                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13281                         break;
13282                 default:
13283                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13284                         break;
13285                 }
13286                 break;
13287         case FLASH_5717VENDOR_ST_M_M25PE10:
13288         case FLASH_5717VENDOR_ST_A_M25PE10:
13289         case FLASH_5717VENDOR_ST_M_M45PE10:
13290         case FLASH_5717VENDOR_ST_A_M45PE10:
13291         case FLASH_5717VENDOR_ST_M_M25PE20:
13292         case FLASH_5717VENDOR_ST_A_M25PE20:
13293         case FLASH_5717VENDOR_ST_M_M45PE20:
13294         case FLASH_5717VENDOR_ST_A_M45PE20:
13295         case FLASH_5717VENDOR_ST_25USPT:
13296         case FLASH_5717VENDOR_ST_45USPT:
13297                 tp->nvram_jedecnum = JEDEC_ST;
13298                 tg3_flag_set(tp, NVRAM_BUFFERED);
13299                 tg3_flag_set(tp, FLASH);
13300
13301                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13302                 case FLASH_5717VENDOR_ST_M_M25PE20:
13303                 case FLASH_5717VENDOR_ST_M_M45PE20:
13304                         /* Detect size with tg3_nvram_get_size() */
13305                         break;
13306                 case FLASH_5717VENDOR_ST_A_M25PE20:
13307                 case FLASH_5717VENDOR_ST_A_M45PE20:
13308                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13309                         break;
13310                 default:
13311                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13312                         break;
13313                 }
13314                 break;
13315         default:
13316                 tg3_flag_set(tp, NO_NVRAM);
13317                 return;
13318         }
13319
13320         tg3_nvram_get_pagesize(tp, nvcfg1);
13321         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13322                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13323 }
13324
13325 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
13326 {
13327         u32 nvcfg1, nvmpinstrp;
13328
13329         nvcfg1 = tr32(NVRAM_CFG1);
13330         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13331
13332         switch (nvmpinstrp) {
13333         case FLASH_5720_EEPROM_HD:
13334         case FLASH_5720_EEPROM_LD:
13335                 tp->nvram_jedecnum = JEDEC_ATMEL;
13336                 tg3_flag_set(tp, NVRAM_BUFFERED);
13337
13338                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13339                 tw32(NVRAM_CFG1, nvcfg1);
13340                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
13341                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13342                 else
13343                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
13344                 return;
13345         case FLASH_5720VENDOR_M_ATMEL_DB011D:
13346         case FLASH_5720VENDOR_A_ATMEL_DB011B:
13347         case FLASH_5720VENDOR_A_ATMEL_DB011D:
13348         case FLASH_5720VENDOR_M_ATMEL_DB021D:
13349         case FLASH_5720VENDOR_A_ATMEL_DB021B:
13350         case FLASH_5720VENDOR_A_ATMEL_DB021D:
13351         case FLASH_5720VENDOR_M_ATMEL_DB041D:
13352         case FLASH_5720VENDOR_A_ATMEL_DB041B:
13353         case FLASH_5720VENDOR_A_ATMEL_DB041D:
13354         case FLASH_5720VENDOR_M_ATMEL_DB081D:
13355         case FLASH_5720VENDOR_A_ATMEL_DB081D:
13356         case FLASH_5720VENDOR_ATMEL_45USPT:
13357                 tp->nvram_jedecnum = JEDEC_ATMEL;
13358                 tg3_flag_set(tp, NVRAM_BUFFERED);
13359                 tg3_flag_set(tp, FLASH);
13360
13361                 switch (nvmpinstrp) {
13362                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13363                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13364                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13365                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13366                         break;
13367                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13368                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13369                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13370                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13371                         break;
13372                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13373                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13374                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13375                         break;
13376                 default:
13377                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13378                         break;
13379                 }
13380                 break;
13381         case FLASH_5720VENDOR_M_ST_M25PE10:
13382         case FLASH_5720VENDOR_M_ST_M45PE10:
13383         case FLASH_5720VENDOR_A_ST_M25PE10:
13384         case FLASH_5720VENDOR_A_ST_M45PE10:
13385         case FLASH_5720VENDOR_M_ST_M25PE20:
13386         case FLASH_5720VENDOR_M_ST_M45PE20:
13387         case FLASH_5720VENDOR_A_ST_M25PE20:
13388         case FLASH_5720VENDOR_A_ST_M45PE20:
13389         case FLASH_5720VENDOR_M_ST_M25PE40:
13390         case FLASH_5720VENDOR_M_ST_M45PE40:
13391         case FLASH_5720VENDOR_A_ST_M25PE40:
13392         case FLASH_5720VENDOR_A_ST_M45PE40:
13393         case FLASH_5720VENDOR_M_ST_M25PE80:
13394         case FLASH_5720VENDOR_M_ST_M45PE80:
13395         case FLASH_5720VENDOR_A_ST_M25PE80:
13396         case FLASH_5720VENDOR_A_ST_M45PE80:
13397         case FLASH_5720VENDOR_ST_25USPT:
13398         case FLASH_5720VENDOR_ST_45USPT:
13399                 tp->nvram_jedecnum = JEDEC_ST;
13400                 tg3_flag_set(tp, NVRAM_BUFFERED);
13401                 tg3_flag_set(tp, FLASH);
13402
13403                 switch (nvmpinstrp) {
13404                 case FLASH_5720VENDOR_M_ST_M25PE20:
13405                 case FLASH_5720VENDOR_M_ST_M45PE20:
13406                 case FLASH_5720VENDOR_A_ST_M25PE20:
13407                 case FLASH_5720VENDOR_A_ST_M45PE20:
13408                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13409                         break;
13410                 case FLASH_5720VENDOR_M_ST_M25PE40:
13411                 case FLASH_5720VENDOR_M_ST_M45PE40:
13412                 case FLASH_5720VENDOR_A_ST_M25PE40:
13413                 case FLASH_5720VENDOR_A_ST_M45PE40:
13414                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13415                         break;
13416                 case FLASH_5720VENDOR_M_ST_M25PE80:
13417                 case FLASH_5720VENDOR_M_ST_M45PE80:
13418                 case FLASH_5720VENDOR_A_ST_M25PE80:
13419                 case FLASH_5720VENDOR_A_ST_M45PE80:
13420                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13421                         break;
13422                 default:
13423                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13424                         break;
13425                 }
13426                 break;
13427         default:
13428                 tg3_flag_set(tp, NO_NVRAM);
13429                 return;
13430         }
13431
13432         tg3_nvram_get_pagesize(tp, nvcfg1);
13433         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13434                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13435 }
13436
13437 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
13438 static void __devinit tg3_nvram_init(struct tg3 *tp)
13439 {
13440         tw32_f(GRC_EEPROM_ADDR,
13441              (EEPROM_ADDR_FSM_RESET |
13442               (EEPROM_DEFAULT_CLOCK_PERIOD <<
13443                EEPROM_ADDR_CLKPERD_SHIFT)));
13444
13445         msleep(1);
13446
13447         /* Enable seeprom accesses. */
13448         tw32_f(GRC_LOCAL_CTRL,
13449              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13450         udelay(100);
13451
13452         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13453             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
13454                 tg3_flag_set(tp, NVRAM);
13455
13456                 if (tg3_nvram_lock(tp)) {
13457                         netdev_warn(tp->dev,
13458                                     "Cannot get nvram lock, %s failed\n",
13459                                     __func__);
13460                         return;
13461                 }
13462                 tg3_enable_nvram_access(tp);
13463
13464                 tp->nvram_size = 0;
13465
13466                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13467                         tg3_get_5752_nvram_info(tp);
13468                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13469                         tg3_get_5755_nvram_info(tp);
13470                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13471                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13472                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13473                         tg3_get_5787_nvram_info(tp);
13474                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13475                         tg3_get_5761_nvram_info(tp);
13476                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13477                         tg3_get_5906_nvram_info(tp);
13478                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13479                          tg3_flag(tp, 57765_CLASS))
13480                         tg3_get_57780_nvram_info(tp);
13481                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13482                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13483                         tg3_get_5717_nvram_info(tp);
13484                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13485                         tg3_get_5720_nvram_info(tp);
13486                 else
13487                         tg3_get_nvram_info(tp);
13488
13489                 if (tp->nvram_size == 0)
13490                         tg3_get_nvram_size(tp);
13491
13492                 tg3_disable_nvram_access(tp);
13493                 tg3_nvram_unlock(tp);
13494
13495         } else {
13496                 tg3_flag_clear(tp, NVRAM);
13497                 tg3_flag_clear(tp, NVRAM_BUFFERED);
13498
13499                 tg3_get_eeprom_size(tp);
13500         }
13501 }
13502
13503 struct subsys_tbl_ent {
13504         u16 subsys_vendor, subsys_devid;
13505         u32 phy_id;
13506 };
13507
13508 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
13509         /* Broadcom boards. */
13510         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13511           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13512         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13513           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13514         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13515           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13516         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13517           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13518         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13519           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13520         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13521           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13522         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13523           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13524         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13525           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13526         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13527           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13528         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13529           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13530         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13531           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13532
13533         /* 3com boards. */
13534         { TG3PCI_SUBVENDOR_ID_3COM,
13535           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13536         { TG3PCI_SUBVENDOR_ID_3COM,
13537           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13538         { TG3PCI_SUBVENDOR_ID_3COM,
13539           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13540         { TG3PCI_SUBVENDOR_ID_3COM,
13541           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13542         { TG3PCI_SUBVENDOR_ID_3COM,
13543           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13544
13545         /* DELL boards. */
13546         { TG3PCI_SUBVENDOR_ID_DELL,
13547           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13548         { TG3PCI_SUBVENDOR_ID_DELL,
13549           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13550         { TG3PCI_SUBVENDOR_ID_DELL,
13551           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13552         { TG3PCI_SUBVENDOR_ID_DELL,
13553           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13554
13555         /* Compaq boards. */
13556         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13557           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13558         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13559           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13560         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13561           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13562         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13563           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13564         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13565           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13566
13567         /* IBM boards. */
13568         { TG3PCI_SUBVENDOR_ID_IBM,
13569           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13570 };
13571
13572 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13573 {
13574         int i;
13575
13576         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13577                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13578                      tp->pdev->subsystem_vendor) &&
13579                     (subsys_id_to_phy_id[i].subsys_devid ==
13580                      tp->pdev->subsystem_device))
13581                         return &subsys_id_to_phy_id[i];
13582         }
13583         return NULL;
13584 }
13585
13586 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13587 {
13588         u32 val;
13589
13590         tp->phy_id = TG3_PHY_ID_INVALID;
13591         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13592
13593         /* Assume an onboard device and WOL capable by default.  */
13594         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13595         tg3_flag_set(tp, WOL_CAP);
13596
13597         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13598                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13599                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13600                         tg3_flag_set(tp, IS_NIC);
13601                 }
13602                 val = tr32(VCPU_CFGSHDW);
13603                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13604                         tg3_flag_set(tp, ASPM_WORKAROUND);
13605                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13606                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13607                         tg3_flag_set(tp, WOL_ENABLE);
13608                         device_set_wakeup_enable(&tp->pdev->dev, true);
13609                 }
13610                 goto done;
13611         }
13612
13613         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13614         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13615                 u32 nic_cfg, led_cfg;
13616                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13617                 int eeprom_phy_serdes = 0;
13618
13619                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13620                 tp->nic_sram_data_cfg = nic_cfg;
13621
13622                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13623                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13624                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13625                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13626                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13627                     (ver > 0) && (ver < 0x100))
13628                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13629
13630                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13631                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13632
13633                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13634                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13635                         eeprom_phy_serdes = 1;
13636
13637                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13638                 if (nic_phy_id != 0) {
13639                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13640                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13641
13642                         eeprom_phy_id  = (id1 >> 16) << 10;
13643                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
13644                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
13645                 } else
13646                         eeprom_phy_id = 0;
13647
13648                 tp->phy_id = eeprom_phy_id;
13649                 if (eeprom_phy_serdes) {
13650                         if (!tg3_flag(tp, 5705_PLUS))
13651                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13652                         else
13653                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13654                 }
13655
13656                 if (tg3_flag(tp, 5750_PLUS))
13657                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13658                                     SHASTA_EXT_LED_MODE_MASK);
13659                 else
13660                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13661
13662                 switch (led_cfg) {
13663                 default:
13664                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13665                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13666                         break;
13667
13668                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13669                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13670                         break;
13671
13672                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13673                         tp->led_ctrl = LED_CTRL_MODE_MAC;
13674
13675                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13676                          * read on some older 5700/5701 bootcode.
13677                          */
13678                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13679                             ASIC_REV_5700 ||
13680                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
13681                             ASIC_REV_5701)
13682                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13683
13684                         break;
13685
13686                 case SHASTA_EXT_LED_SHARED:
13687                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
13688                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13689                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13690                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13691                                                  LED_CTRL_MODE_PHY_2);
13692                         break;
13693
13694                 case SHASTA_EXT_LED_MAC:
13695                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13696                         break;
13697
13698                 case SHASTA_EXT_LED_COMBO:
13699                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
13700                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13701                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13702                                                  LED_CTRL_MODE_PHY_2);
13703                         break;
13704
13705                 }
13706
13707                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13708                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13709                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13710                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13711
13712                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13713                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13714
13715                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13716                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13717                         if ((tp->pdev->subsystem_vendor ==
13718                              PCI_VENDOR_ID_ARIMA) &&
13719                             (tp->pdev->subsystem_device == 0x205a ||
13720                              tp->pdev->subsystem_device == 0x2063))
13721                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13722                 } else {
13723                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13724                         tg3_flag_set(tp, IS_NIC);
13725                 }
13726
13727                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13728                         tg3_flag_set(tp, ENABLE_ASF);
13729                         if (tg3_flag(tp, 5750_PLUS))
13730                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13731                 }
13732
13733                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13734                     tg3_flag(tp, 5750_PLUS))
13735                         tg3_flag_set(tp, ENABLE_APE);
13736
13737                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13738                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13739                         tg3_flag_clear(tp, WOL_CAP);
13740
13741                 if (tg3_flag(tp, WOL_CAP) &&
13742                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13743                         tg3_flag_set(tp, WOL_ENABLE);
13744                         device_set_wakeup_enable(&tp->pdev->dev, true);
13745                 }
13746
13747                 if (cfg2 & (1 << 17))
13748                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13749
13750                 /* serdes signal pre-emphasis in register 0x590 set by */
13751                 /* bootcode if bit 18 is set */
13752                 if (cfg2 & (1 << 18))
13753                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13754
13755                 if ((tg3_flag(tp, 57765_PLUS) ||
13756                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13757                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13758                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13759                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13760
13761                 if (tg3_flag(tp, PCI_EXPRESS) &&
13762                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13763                     !tg3_flag(tp, 57765_PLUS)) {
13764                         u32 cfg3;
13765
13766                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13767                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13768                                 tg3_flag_set(tp, ASPM_WORKAROUND);
13769                 }
13770
13771                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13772                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13773                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13774                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13775                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13776                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13777         }
13778 done:
13779         if (tg3_flag(tp, WOL_CAP))
13780                 device_set_wakeup_enable(&tp->pdev->dev,
13781                                          tg3_flag(tp, WOL_ENABLE));
13782         else
13783                 device_set_wakeup_capable(&tp->pdev->dev, false);
13784 }
13785
13786 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13787 {
13788         int i;
13789         u32 val;
13790
13791         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13792         tw32(OTP_CTRL, cmd);
13793
13794         /* Wait for up to 1 ms for command to execute. */
13795         for (i = 0; i < 100; i++) {
13796                 val = tr32(OTP_STATUS);
13797                 if (val & OTP_STATUS_CMD_DONE)
13798                         break;
13799                 udelay(10);
13800         }
13801
13802         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13803 }
13804
13805 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13806  * configuration is a 32-bit value that straddles the alignment boundary.
13807  * We do two 32-bit reads and then shift and merge the results.
13808  */
13809 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13810 {
13811         u32 bhalf_otp, thalf_otp;
13812
13813         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13814
13815         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13816                 return 0;
13817
13818         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13819
13820         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13821                 return 0;
13822
13823         thalf_otp = tr32(OTP_READ_DATA);
13824
13825         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13826
13827         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13828                 return 0;
13829
13830         bhalf_otp = tr32(OTP_READ_DATA);
13831
13832         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13833 }
13834
13835 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13836 {
13837         u32 adv = ADVERTISED_Autoneg;
13838
13839         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13840                 adv |= ADVERTISED_1000baseT_Half |
13841                        ADVERTISED_1000baseT_Full;
13842
13843         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13844                 adv |= ADVERTISED_100baseT_Half |
13845                        ADVERTISED_100baseT_Full |
13846                        ADVERTISED_10baseT_Half |
13847                        ADVERTISED_10baseT_Full |
13848                        ADVERTISED_TP;
13849         else
13850                 adv |= ADVERTISED_FIBRE;
13851
13852         tp->link_config.advertising = adv;
13853         tp->link_config.speed = SPEED_UNKNOWN;
13854         tp->link_config.duplex = DUPLEX_UNKNOWN;
13855         tp->link_config.autoneg = AUTONEG_ENABLE;
13856         tp->link_config.active_speed = SPEED_UNKNOWN;
13857         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
13858
13859         tp->old_link = -1;
13860 }
13861
13862 static int __devinit tg3_phy_probe(struct tg3 *tp)
13863 {
13864         u32 hw_phy_id_1, hw_phy_id_2;
13865         u32 hw_phy_id, hw_phy_id_masked;
13866         int err;
13867
13868         /* flow control autonegotiation is default behavior */
13869         tg3_flag_set(tp, PAUSE_AUTONEG);
13870         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13871
13872         if (tg3_flag(tp, ENABLE_APE)) {
13873                 switch (tp->pci_fn) {
13874                 case 0:
13875                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
13876                         break;
13877                 case 1:
13878                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
13879                         break;
13880                 case 2:
13881                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
13882                         break;
13883                 case 3:
13884                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
13885                         break;
13886                 }
13887         }
13888
13889         if (tg3_flag(tp, USE_PHYLIB))
13890                 return tg3_phy_init(tp);
13891
13892         /* Reading the PHY ID register can conflict with ASF
13893          * firmware access to the PHY hardware.
13894          */
13895         err = 0;
13896         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13897                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13898         } else {
13899                 /* Now read the physical PHY_ID from the chip and verify
13900                  * that it is sane.  If it doesn't look good, we fall back
13901                  * to either the hard-coded table based PHY_ID and failing
13902                  * that the value found in the eeprom area.
13903                  */
13904                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13905                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13906
13907                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13908                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13909                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13910
13911                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13912         }
13913
13914         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13915                 tp->phy_id = hw_phy_id;
13916                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13917                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13918                 else
13919                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13920         } else {
13921                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13922                         /* Do nothing, phy ID already set up in
13923                          * tg3_get_eeprom_hw_cfg().
13924                          */
13925                 } else {
13926                         struct subsys_tbl_ent *p;
13927
13928                         /* No eeprom signature?  Try the hardcoded
13929                          * subsys device table.
13930                          */
13931                         p = tg3_lookup_by_subsys(tp);
13932                         if (!p)
13933                                 return -ENODEV;
13934
13935                         tp->phy_id = p->phy_id;
13936                         if (!tp->phy_id ||
13937                             tp->phy_id == TG3_PHY_ID_BCM8002)
13938                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13939                 }
13940         }
13941
13942         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13943             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13944              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13945              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13946               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13947              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13948               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13949                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13950
13951         tg3_phy_init_link_config(tp);
13952
13953         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13954             !tg3_flag(tp, ENABLE_APE) &&
13955             !tg3_flag(tp, ENABLE_ASF)) {
13956                 u32 bmsr, dummy;
13957
13958                 tg3_readphy(tp, MII_BMSR, &bmsr);
13959                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13960                     (bmsr & BMSR_LSTATUS))
13961                         goto skip_phy_reset;
13962
13963                 err = tg3_phy_reset(tp);
13964                 if (err)
13965                         return err;
13966
13967                 tg3_phy_set_wirespeed(tp);
13968
13969                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13970                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13971                                             tp->link_config.flowctrl);
13972
13973                         tg3_writephy(tp, MII_BMCR,
13974                                      BMCR_ANENABLE | BMCR_ANRESTART);
13975                 }
13976         }
13977
13978 skip_phy_reset:
13979         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13980                 err = tg3_init_5401phy_dsp(tp);
13981                 if (err)
13982                         return err;
13983
13984                 err = tg3_init_5401phy_dsp(tp);
13985         }
13986
13987         return err;
13988 }
13989
13990 static void __devinit tg3_read_vpd(struct tg3 *tp)
13991 {
13992         u8 *vpd_data;
13993         unsigned int block_end, rosize, len;
13994         u32 vpdlen;
13995         int j, i = 0;
13996
13997         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13998         if (!vpd_data)
13999                 goto out_no_vpd;
14000
14001         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
14002         if (i < 0)
14003                 goto out_not_found;
14004
14005         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
14006         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
14007         i += PCI_VPD_LRDT_TAG_SIZE;
14008
14009         if (block_end > vpdlen)
14010                 goto out_not_found;
14011
14012         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14013                                       PCI_VPD_RO_KEYWORD_MFR_ID);
14014         if (j > 0) {
14015                 len = pci_vpd_info_field_size(&vpd_data[j]);
14016
14017                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14018                 if (j + len > block_end || len != 4 ||
14019                     memcmp(&vpd_data[j], "1028", 4))
14020                         goto partno;
14021
14022                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14023                                               PCI_VPD_RO_KEYWORD_VENDOR0);
14024                 if (j < 0)
14025                         goto partno;
14026
14027                 len = pci_vpd_info_field_size(&vpd_data[j]);
14028
14029                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14030                 if (j + len > block_end)
14031                         goto partno;
14032
14033                 memcpy(tp->fw_ver, &vpd_data[j], len);
14034                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
14035         }
14036
14037 partno:
14038         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14039                                       PCI_VPD_RO_KEYWORD_PARTNO);
14040         if (i < 0)
14041                 goto out_not_found;
14042
14043         len = pci_vpd_info_field_size(&vpd_data[i]);
14044
14045         i += PCI_VPD_INFO_FLD_HDR_SIZE;
14046         if (len > TG3_BPN_SIZE ||
14047             (len + i) > vpdlen)
14048                 goto out_not_found;
14049
14050         memcpy(tp->board_part_number, &vpd_data[i], len);
14051
14052 out_not_found:
14053         kfree(vpd_data);
14054         if (tp->board_part_number[0])
14055                 return;
14056
14057 out_no_vpd:
14058         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14059                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
14060                         strcpy(tp->board_part_number, "BCM5717");
14061                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14062                         strcpy(tp->board_part_number, "BCM5718");
14063                 else
14064                         goto nomatch;
14065         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
14066                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14067                         strcpy(tp->board_part_number, "BCM57780");
14068                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14069                         strcpy(tp->board_part_number, "BCM57760");
14070                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14071                         strcpy(tp->board_part_number, "BCM57790");
14072                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14073                         strcpy(tp->board_part_number, "BCM57788");
14074                 else
14075                         goto nomatch;
14076         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
14077                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14078                         strcpy(tp->board_part_number, "BCM57761");
14079                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14080                         strcpy(tp->board_part_number, "BCM57765");
14081                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14082                         strcpy(tp->board_part_number, "BCM57781");
14083                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14084                         strcpy(tp->board_part_number, "BCM57785");
14085                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14086                         strcpy(tp->board_part_number, "BCM57791");
14087                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14088                         strcpy(tp->board_part_number, "BCM57795");
14089                 else
14090                         goto nomatch;
14091         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
14092                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14093                         strcpy(tp->board_part_number, "BCM57762");
14094                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14095                         strcpy(tp->board_part_number, "BCM57766");
14096                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14097                         strcpy(tp->board_part_number, "BCM57782");
14098                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14099                         strcpy(tp->board_part_number, "BCM57786");
14100                 else
14101                         goto nomatch;
14102         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14103                 strcpy(tp->board_part_number, "BCM95906");
14104         } else {
14105 nomatch:
14106                 strcpy(tp->board_part_number, "none");
14107         }
14108 }
14109
14110 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14111 {
14112         u32 val;
14113
14114         if (tg3_nvram_read(tp, offset, &val) ||
14115             (val & 0xfc000000) != 0x0c000000 ||
14116             tg3_nvram_read(tp, offset + 4, &val) ||
14117             val != 0)
14118                 return 0;
14119
14120         return 1;
14121 }
14122
14123 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
14124 {
14125         u32 val, offset, start, ver_offset;
14126         int i, dst_off;
14127         bool newver = false;
14128
14129         if (tg3_nvram_read(tp, 0xc, &offset) ||
14130             tg3_nvram_read(tp, 0x4, &start))
14131                 return;
14132
14133         offset = tg3_nvram_logical_addr(tp, offset);
14134
14135         if (tg3_nvram_read(tp, offset, &val))
14136                 return;
14137
14138         if ((val & 0xfc000000) == 0x0c000000) {
14139                 if (tg3_nvram_read(tp, offset + 4, &val))
14140                         return;
14141
14142                 if (val == 0)
14143                         newver = true;
14144         }
14145
14146         dst_off = strlen(tp->fw_ver);
14147
14148         if (newver) {
14149                 if (TG3_VER_SIZE - dst_off < 16 ||
14150                     tg3_nvram_read(tp, offset + 8, &ver_offset))
14151                         return;
14152
14153                 offset = offset + ver_offset - start;
14154                 for (i = 0; i < 16; i += 4) {
14155                         __be32 v;
14156                         if (tg3_nvram_read_be32(tp, offset + i, &v))
14157                                 return;
14158
14159                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
14160                 }
14161         } else {
14162                 u32 major, minor;
14163
14164                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14165                         return;
14166
14167                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14168                         TG3_NVM_BCVER_MAJSFT;
14169                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
14170                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14171                          "v%d.%02d", major, minor);
14172         }
14173 }
14174
14175 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
14176 {
14177         u32 val, major, minor;
14178
14179         /* Use native endian representation */
14180         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14181                 return;
14182
14183         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14184                 TG3_NVM_HWSB_CFG1_MAJSFT;
14185         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14186                 TG3_NVM_HWSB_CFG1_MINSFT;
14187
14188         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14189 }
14190
14191 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
14192 {
14193         u32 offset, major, minor, build;
14194
14195         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
14196
14197         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14198                 return;
14199
14200         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14201         case TG3_EEPROM_SB_REVISION_0:
14202                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14203                 break;
14204         case TG3_EEPROM_SB_REVISION_2:
14205                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14206                 break;
14207         case TG3_EEPROM_SB_REVISION_3:
14208                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14209                 break;
14210         case TG3_EEPROM_SB_REVISION_4:
14211                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14212                 break;
14213         case TG3_EEPROM_SB_REVISION_5:
14214                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14215                 break;
14216         case TG3_EEPROM_SB_REVISION_6:
14217                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14218                 break;
14219         default:
14220                 return;
14221         }
14222
14223         if (tg3_nvram_read(tp, offset, &val))
14224                 return;
14225
14226         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14227                 TG3_EEPROM_SB_EDH_BLD_SHFT;
14228         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14229                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
14230         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
14231
14232         if (minor > 99 || build > 26)
14233                 return;
14234
14235         offset = strlen(tp->fw_ver);
14236         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14237                  " v%d.%02d", major, minor);
14238
14239         if (build > 0) {
14240                 offset = strlen(tp->fw_ver);
14241                 if (offset < TG3_VER_SIZE - 1)
14242                         tp->fw_ver[offset] = 'a' + build - 1;
14243         }
14244 }
14245
14246 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
14247 {
14248         u32 val, offset, start;
14249         int i, vlen;
14250
14251         for (offset = TG3_NVM_DIR_START;
14252              offset < TG3_NVM_DIR_END;
14253              offset += TG3_NVM_DIRENT_SIZE) {
14254                 if (tg3_nvram_read(tp, offset, &val))
14255                         return;
14256
14257                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14258                         break;
14259         }
14260
14261         if (offset == TG3_NVM_DIR_END)
14262                 return;
14263
14264         if (!tg3_flag(tp, 5705_PLUS))
14265                 start = 0x08000000;
14266         else if (tg3_nvram_read(tp, offset - 4, &start))
14267                 return;
14268
14269         if (tg3_nvram_read(tp, offset + 4, &offset) ||
14270             !tg3_fw_img_is_valid(tp, offset) ||
14271             tg3_nvram_read(tp, offset + 8, &val))
14272                 return;
14273
14274         offset += val - start;
14275
14276         vlen = strlen(tp->fw_ver);
14277
14278         tp->fw_ver[vlen++] = ',';
14279         tp->fw_ver[vlen++] = ' ';
14280
14281         for (i = 0; i < 4; i++) {
14282                 __be32 v;
14283                 if (tg3_nvram_read_be32(tp, offset, &v))
14284                         return;
14285
14286                 offset += sizeof(v);
14287
14288                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
14289                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
14290                         break;
14291                 }
14292
14293                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
14294                 vlen += sizeof(v);
14295         }
14296 }
14297
14298 static void __devinit tg3_probe_ncsi(struct tg3 *tp)
14299 {
14300         u32 apedata;
14301
14302         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
14303         if (apedata != APE_SEG_SIG_MAGIC)
14304                 return;
14305
14306         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
14307         if (!(apedata & APE_FW_STATUS_READY))
14308                 return;
14309
14310         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
14311                 tg3_flag_set(tp, APE_HAS_NCSI);
14312 }
14313
14314 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
14315 {
14316         int vlen;
14317         u32 apedata;
14318         char *fwtype;
14319
14320         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
14321
14322         if (tg3_flag(tp, APE_HAS_NCSI))
14323                 fwtype = "NCSI";
14324         else
14325                 fwtype = "DASH";
14326
14327         vlen = strlen(tp->fw_ver);
14328
14329         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
14330                  fwtype,
14331                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
14332                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
14333                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
14334                  (apedata & APE_FW_VERSION_BLDMSK));
14335 }
14336
14337 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
14338 {
14339         u32 val;
14340         bool vpd_vers = false;
14341
14342         if (tp->fw_ver[0] != 0)
14343                 vpd_vers = true;
14344
14345         if (tg3_flag(tp, NO_NVRAM)) {
14346                 strcat(tp->fw_ver, "sb");
14347                 return;
14348         }
14349
14350         if (tg3_nvram_read(tp, 0, &val))
14351                 return;
14352
14353         if (val == TG3_EEPROM_MAGIC)
14354                 tg3_read_bc_ver(tp);
14355         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
14356                 tg3_read_sb_ver(tp, val);
14357         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
14358                 tg3_read_hwsb_ver(tp);
14359
14360         if (tg3_flag(tp, ENABLE_ASF)) {
14361                 if (tg3_flag(tp, ENABLE_APE)) {
14362                         tg3_probe_ncsi(tp);
14363                         if (!vpd_vers)
14364                                 tg3_read_dash_ver(tp);
14365                 } else if (!vpd_vers) {
14366                         tg3_read_mgmtfw_ver(tp);
14367                 }
14368         }
14369
14370         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
14371 }
14372
14373 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
14374 {
14375         if (tg3_flag(tp, LRG_PROD_RING_CAP))
14376                 return TG3_RX_RET_MAX_SIZE_5717;
14377         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
14378                 return TG3_RX_RET_MAX_SIZE_5700;
14379         else
14380                 return TG3_RX_RET_MAX_SIZE_5705;
14381 }
14382
14383 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
14384         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
14385         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
14386         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
14387         { },
14388 };
14389
14390 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14391 {
14392         struct pci_dev *peer;
14393         unsigned int func, devnr = tp->pdev->devfn & ~7;
14394
14395         for (func = 0; func < 8; func++) {
14396                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14397                 if (peer && peer != tp->pdev)
14398                         break;
14399                 pci_dev_put(peer);
14400         }
14401         /* 5704 can be configured in single-port mode, set peer to
14402          * tp->pdev in that case.
14403          */
14404         if (!peer) {
14405                 peer = tp->pdev;
14406                 return peer;
14407         }
14408
14409         /*
14410          * We don't need to keep the refcount elevated; there's no way
14411          * to remove one half of this device without removing the other
14412          */
14413         pci_dev_put(peer);
14414
14415         return peer;
14416 }
14417
14418 static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
14419 {
14420         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
14421         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
14422                 u32 reg;
14423
14424                 /* All devices that use the alternate
14425                  * ASIC REV location have a CPMU.
14426                  */
14427                 tg3_flag_set(tp, CPMU_PRESENT);
14428
14429                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14430                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
14431                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
14432                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
14433                         reg = TG3PCI_GEN2_PRODID_ASICREV;
14434                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
14435                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
14436                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
14437                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
14438                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14439                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14440                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
14441                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
14442                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
14443                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14444                         reg = TG3PCI_GEN15_PRODID_ASICREV;
14445                 else
14446                         reg = TG3PCI_PRODID_ASICREV;
14447
14448                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
14449         }
14450
14451         /* Wrong chip ID in 5752 A0. This code can be removed later
14452          * as A0 is not in production.
14453          */
14454         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
14455                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
14456
14457         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14458             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14459             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14460                 tg3_flag_set(tp, 5717_PLUS);
14461
14462         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14463             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14464                 tg3_flag_set(tp, 57765_CLASS);
14465
14466         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
14467                 tg3_flag_set(tp, 57765_PLUS);
14468
14469         /* Intentionally exclude ASIC_REV_5906 */
14470         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14471             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14472             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14473             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14474             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14475             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14476             tg3_flag(tp, 57765_PLUS))
14477                 tg3_flag_set(tp, 5755_PLUS);
14478
14479         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14480             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14481                 tg3_flag_set(tp, 5780_CLASS);
14482
14483         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14484             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14485             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14486             tg3_flag(tp, 5755_PLUS) ||
14487             tg3_flag(tp, 5780_CLASS))
14488                 tg3_flag_set(tp, 5750_PLUS);
14489
14490         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14491             tg3_flag(tp, 5750_PLUS))
14492                 tg3_flag_set(tp, 5705_PLUS);
14493 }
14494
14495 static int __devinit tg3_get_invariants(struct tg3 *tp)
14496 {
14497         u32 misc_ctrl_reg;
14498         u32 pci_state_reg, grc_misc_cfg;
14499         u32 val;
14500         u16 pci_cmd;
14501         int err;
14502
14503         /* Force memory write invalidate off.  If we leave it on,
14504          * then on 5700_BX chips we have to enable a workaround.
14505          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
14506          * to match the cacheline size.  The Broadcom driver have this
14507          * workaround but turns MWI off all the times so never uses
14508          * it.  This seems to suggest that the workaround is insufficient.
14509          */
14510         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14511         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
14512         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14513
14514         /* Important! -- Make sure register accesses are byteswapped
14515          * correctly.  Also, for those chips that require it, make
14516          * sure that indirect register accesses are enabled before
14517          * the first operation.
14518          */
14519         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14520                               &misc_ctrl_reg);
14521         tp->misc_host_ctrl |= (misc_ctrl_reg &
14522                                MISC_HOST_CTRL_CHIPREV);
14523         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14524                                tp->misc_host_ctrl);
14525
14526         tg3_detect_asic_rev(tp, misc_ctrl_reg);
14527
14528         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14529          * we need to disable memory and use config. cycles
14530          * only to access all registers. The 5702/03 chips
14531          * can mistakenly decode the special cycles from the
14532          * ICH chipsets as memory write cycles, causing corruption
14533          * of register and memory space. Only certain ICH bridges
14534          * will drive special cycles with non-zero data during the
14535          * address phase which can fall within the 5703's address
14536          * range. This is not an ICH bug as the PCI spec allows
14537          * non-zero address during special cycles. However, only
14538          * these ICH bridges are known to drive non-zero addresses
14539          * during special cycles.
14540          *
14541          * Since special cycles do not cross PCI bridges, we only
14542          * enable this workaround if the 5703 is on the secondary
14543          * bus of these ICH bridges.
14544          */
14545         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14546             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14547                 static struct tg3_dev_id {
14548                         u32     vendor;
14549                         u32     device;
14550                         u32     rev;
14551                 } ich_chipsets[] = {
14552                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14553                           PCI_ANY_ID },
14554                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14555                           PCI_ANY_ID },
14556                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14557                           0xa },
14558                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14559                           PCI_ANY_ID },
14560                         { },
14561                 };
14562                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
14563                 struct pci_dev *bridge = NULL;
14564
14565                 while (pci_id->vendor != 0) {
14566                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
14567                                                 bridge);
14568                         if (!bridge) {
14569                                 pci_id++;
14570                                 continue;
14571                         }
14572                         if (pci_id->rev != PCI_ANY_ID) {
14573                                 if (bridge->revision > pci_id->rev)
14574                                         continue;
14575                         }
14576                         if (bridge->subordinate &&
14577                             (bridge->subordinate->number ==
14578                              tp->pdev->bus->number)) {
14579                                 tg3_flag_set(tp, ICH_WORKAROUND);
14580                                 pci_dev_put(bridge);
14581                                 break;
14582                         }
14583                 }
14584         }
14585
14586         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14587                 static struct tg3_dev_id {
14588                         u32     vendor;
14589                         u32     device;
14590                 } bridge_chipsets[] = {
14591                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14592                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14593                         { },
14594                 };
14595                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14596                 struct pci_dev *bridge = NULL;
14597
14598                 while (pci_id->vendor != 0) {
14599                         bridge = pci_get_device(pci_id->vendor,
14600                                                 pci_id->device,
14601                                                 bridge);
14602                         if (!bridge) {
14603                                 pci_id++;
14604                                 continue;
14605                         }
14606                         if (bridge->subordinate &&
14607                             (bridge->subordinate->number <=
14608                              tp->pdev->bus->number) &&
14609                             (bridge->subordinate->busn_res.end >=
14610                              tp->pdev->bus->number)) {
14611                                 tg3_flag_set(tp, 5701_DMA_BUG);
14612                                 pci_dev_put(bridge);
14613                                 break;
14614                         }
14615                 }
14616         }
14617
14618         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14619          * DMA addresses > 40-bit. This bridge may have other additional
14620          * 57xx devices behind it in some 4-port NIC designs for example.
14621          * Any tg3 device found behind the bridge will also need the 40-bit
14622          * DMA workaround.
14623          */
14624         if (tg3_flag(tp, 5780_CLASS)) {
14625                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14626                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14627         } else {
14628                 struct pci_dev *bridge = NULL;
14629
14630                 do {
14631                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14632                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
14633                                                 bridge);
14634                         if (bridge && bridge->subordinate &&
14635                             (bridge->subordinate->number <=
14636                              tp->pdev->bus->number) &&
14637                             (bridge->subordinate->busn_res.end >=
14638                              tp->pdev->bus->number)) {
14639                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14640                                 pci_dev_put(bridge);
14641                                 break;
14642                         }
14643                 } while (bridge);
14644         }
14645
14646         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14647             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14648                 tp->pdev_peer = tg3_find_peer(tp);
14649
14650         /* Determine TSO capabilities */
14651         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14652                 ; /* Do nothing. HW bug. */
14653         else if (tg3_flag(tp, 57765_PLUS))
14654                 tg3_flag_set(tp, HW_TSO_3);
14655         else if (tg3_flag(tp, 5755_PLUS) ||
14656                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14657                 tg3_flag_set(tp, HW_TSO_2);
14658         else if (tg3_flag(tp, 5750_PLUS)) {
14659                 tg3_flag_set(tp, HW_TSO_1);
14660                 tg3_flag_set(tp, TSO_BUG);
14661                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14662                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14663                         tg3_flag_clear(tp, TSO_BUG);
14664         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14665                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14666                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14667                         tg3_flag_set(tp, TSO_BUG);
14668                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14669                         tp->fw_needed = FIRMWARE_TG3TSO5;
14670                 else
14671                         tp->fw_needed = FIRMWARE_TG3TSO;
14672         }
14673
14674         /* Selectively allow TSO based on operating conditions */
14675         if (tg3_flag(tp, HW_TSO_1) ||
14676             tg3_flag(tp, HW_TSO_2) ||
14677             tg3_flag(tp, HW_TSO_3) ||
14678             tp->fw_needed) {
14679                 /* For firmware TSO, assume ASF is disabled.
14680                  * We'll disable TSO later if we discover ASF
14681                  * is enabled in tg3_get_eeprom_hw_cfg().
14682                  */
14683                 tg3_flag_set(tp, TSO_CAPABLE);
14684         } else {
14685                 tg3_flag_clear(tp, TSO_CAPABLE);
14686                 tg3_flag_clear(tp, TSO_BUG);
14687                 tp->fw_needed = NULL;
14688         }
14689
14690         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14691                 tp->fw_needed = FIRMWARE_TG3;
14692
14693         tp->irq_max = 1;
14694
14695         if (tg3_flag(tp, 5750_PLUS)) {
14696                 tg3_flag_set(tp, SUPPORT_MSI);
14697                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14698                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14699                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14700                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14701                      tp->pdev_peer == tp->pdev))
14702                         tg3_flag_clear(tp, SUPPORT_MSI);
14703
14704                 if (tg3_flag(tp, 5755_PLUS) ||
14705                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14706                         tg3_flag_set(tp, 1SHOT_MSI);
14707                 }
14708
14709                 if (tg3_flag(tp, 57765_PLUS)) {
14710                         tg3_flag_set(tp, SUPPORT_MSIX);
14711                         tp->irq_max = TG3_IRQ_MAX_VECS;
14712                 }
14713         }
14714
14715         tp->txq_max = 1;
14716         tp->rxq_max = 1;
14717         if (tp->irq_max > 1) {
14718                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
14719                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
14720
14721                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14722                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14723                         tp->txq_max = tp->irq_max - 1;
14724         }
14725
14726         if (tg3_flag(tp, 5755_PLUS) ||
14727             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14728                 tg3_flag_set(tp, SHORT_DMA_BUG);
14729
14730         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14731                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14732
14733         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14734             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14735             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14736                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14737
14738         if (tg3_flag(tp, 57765_PLUS) &&
14739             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14740                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14741
14742         if (!tg3_flag(tp, 5705_PLUS) ||
14743             tg3_flag(tp, 5780_CLASS) ||
14744             tg3_flag(tp, USE_JUMBO_BDFLAG))
14745                 tg3_flag_set(tp, JUMBO_CAPABLE);
14746
14747         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14748                               &pci_state_reg);
14749
14750         if (pci_is_pcie(tp->pdev)) {
14751                 u16 lnkctl;
14752
14753                 tg3_flag_set(tp, PCI_EXPRESS);
14754
14755                 pci_read_config_word(tp->pdev,
14756                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14757                                      &lnkctl);
14758                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14759                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14760                             ASIC_REV_5906) {
14761                                 tg3_flag_clear(tp, HW_TSO_2);
14762                                 tg3_flag_clear(tp, TSO_CAPABLE);
14763                         }
14764                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14765                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14766                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14767                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14768                                 tg3_flag_set(tp, CLKREQ_BUG);
14769                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14770                         tg3_flag_set(tp, L1PLLPD_EN);
14771                 }
14772         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14773                 /* BCM5785 devices are effectively PCIe devices, and should
14774                  * follow PCIe codepaths, but do not have a PCIe capabilities
14775                  * section.
14776                  */
14777                 tg3_flag_set(tp, PCI_EXPRESS);
14778         } else if (!tg3_flag(tp, 5705_PLUS) ||
14779                    tg3_flag(tp, 5780_CLASS)) {
14780                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14781                 if (!tp->pcix_cap) {
14782                         dev_err(&tp->pdev->dev,
14783                                 "Cannot find PCI-X capability, aborting\n");
14784                         return -EIO;
14785                 }
14786
14787                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14788                         tg3_flag_set(tp, PCIX_MODE);
14789         }
14790
14791         /* If we have an AMD 762 or VIA K8T800 chipset, write
14792          * reordering to the mailbox registers done by the host
14793          * controller can cause major troubles.  We read back from
14794          * every mailbox register write to force the writes to be
14795          * posted to the chip in order.
14796          */
14797         if (pci_dev_present(tg3_write_reorder_chipsets) &&
14798             !tg3_flag(tp, PCI_EXPRESS))
14799                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14800
14801         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14802                              &tp->pci_cacheline_sz);
14803         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14804                              &tp->pci_lat_timer);
14805         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14806             tp->pci_lat_timer < 64) {
14807                 tp->pci_lat_timer = 64;
14808                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14809                                       tp->pci_lat_timer);
14810         }
14811
14812         /* Important! -- It is critical that the PCI-X hw workaround
14813          * situation is decided before the first MMIO register access.
14814          */
14815         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14816                 /* 5700 BX chips need to have their TX producer index
14817                  * mailboxes written twice to workaround a bug.
14818                  */
14819                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14820
14821                 /* If we are in PCI-X mode, enable register write workaround.
14822                  *
14823                  * The workaround is to use indirect register accesses
14824                  * for all chip writes not to mailbox registers.
14825                  */
14826                 if (tg3_flag(tp, PCIX_MODE)) {
14827                         u32 pm_reg;
14828
14829                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14830
14831                         /* The chip can have it's power management PCI config
14832                          * space registers clobbered due to this bug.
14833                          * So explicitly force the chip into D0 here.
14834                          */
14835                         pci_read_config_dword(tp->pdev,
14836                                               tp->pm_cap + PCI_PM_CTRL,
14837                                               &pm_reg);
14838                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14839                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14840                         pci_write_config_dword(tp->pdev,
14841                                                tp->pm_cap + PCI_PM_CTRL,
14842                                                pm_reg);
14843
14844                         /* Also, force SERR#/PERR# in PCI command. */
14845                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14846                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14847                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14848                 }
14849         }
14850
14851         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14852                 tg3_flag_set(tp, PCI_HIGH_SPEED);
14853         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14854                 tg3_flag_set(tp, PCI_32BIT);
14855
14856         /* Chip-specific fixup from Broadcom driver */
14857         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14858             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14859                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14860                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14861         }
14862
14863         /* Default fast path register access methods */
14864         tp->read32 = tg3_read32;
14865         tp->write32 = tg3_write32;
14866         tp->read32_mbox = tg3_read32;
14867         tp->write32_mbox = tg3_write32;
14868         tp->write32_tx_mbox = tg3_write32;
14869         tp->write32_rx_mbox = tg3_write32;
14870
14871         /* Various workaround register access methods */
14872         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14873                 tp->write32 = tg3_write_indirect_reg32;
14874         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14875                  (tg3_flag(tp, PCI_EXPRESS) &&
14876                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14877                 /*
14878                  * Back to back register writes can cause problems on these
14879                  * chips, the workaround is to read back all reg writes
14880                  * except those to mailbox regs.
14881                  *
14882                  * See tg3_write_indirect_reg32().
14883                  */
14884                 tp->write32 = tg3_write_flush_reg32;
14885         }
14886
14887         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14888                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14889                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14890                         tp->write32_rx_mbox = tg3_write_flush_reg32;
14891         }
14892
14893         if (tg3_flag(tp, ICH_WORKAROUND)) {
14894                 tp->read32 = tg3_read_indirect_reg32;
14895                 tp->write32 = tg3_write_indirect_reg32;
14896                 tp->read32_mbox = tg3_read_indirect_mbox;
14897                 tp->write32_mbox = tg3_write_indirect_mbox;
14898                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14899                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14900
14901                 iounmap(tp->regs);
14902                 tp->regs = NULL;
14903
14904                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14905                 pci_cmd &= ~PCI_COMMAND_MEMORY;
14906                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14907         }
14908         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14909                 tp->read32_mbox = tg3_read32_mbox_5906;
14910                 tp->write32_mbox = tg3_write32_mbox_5906;
14911                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14912                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14913         }
14914
14915         if (tp->write32 == tg3_write_indirect_reg32 ||
14916             (tg3_flag(tp, PCIX_MODE) &&
14917              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14918               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14919                 tg3_flag_set(tp, SRAM_USE_CONFIG);
14920
14921         /* The memory arbiter has to be enabled in order for SRAM accesses
14922          * to succeed.  Normally on powerup the tg3 chip firmware will make
14923          * sure it is enabled, but other entities such as system netboot
14924          * code might disable it.
14925          */
14926         val = tr32(MEMARB_MODE);
14927         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14928
14929         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14930         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14931             tg3_flag(tp, 5780_CLASS)) {
14932                 if (tg3_flag(tp, PCIX_MODE)) {
14933                         pci_read_config_dword(tp->pdev,
14934                                               tp->pcix_cap + PCI_X_STATUS,
14935                                               &val);
14936                         tp->pci_fn = val & 0x7;
14937                 }
14938         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14939                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14940                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14941                     NIC_SRAM_CPMUSTAT_SIG) {
14942                         tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14943                         tp->pci_fn = tp->pci_fn ? 1 : 0;
14944                 }
14945         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14946                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14947                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14948                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14949                     NIC_SRAM_CPMUSTAT_SIG) {
14950                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14951                                      TG3_CPMU_STATUS_FSHFT_5719;
14952                 }
14953         }
14954
14955         /* Get eeprom hw config before calling tg3_set_power_state().
14956          * In particular, the TG3_FLAG_IS_NIC flag must be
14957          * determined before calling tg3_set_power_state() so that
14958          * we know whether or not to switch out of Vaux power.
14959          * When the flag is set, it means that GPIO1 is used for eeprom
14960          * write protect and also implies that it is a LOM where GPIOs
14961          * are not used to switch power.
14962          */
14963         tg3_get_eeprom_hw_cfg(tp);
14964
14965         if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14966                 tg3_flag_clear(tp, TSO_CAPABLE);
14967                 tg3_flag_clear(tp, TSO_BUG);
14968                 tp->fw_needed = NULL;
14969         }
14970
14971         if (tg3_flag(tp, ENABLE_APE)) {
14972                 /* Allow reads and writes to the
14973                  * APE register and memory space.
14974                  */
14975                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14976                                  PCISTATE_ALLOW_APE_SHMEM_WR |
14977                                  PCISTATE_ALLOW_APE_PSPACE_WR;
14978                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14979                                        pci_state_reg);
14980
14981                 tg3_ape_lock_init(tp);
14982         }
14983
14984         /* Set up tp->grc_local_ctrl before calling
14985          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14986          * will bring 5700's external PHY out of reset.
14987          * It is also used as eeprom write protect on LOMs.
14988          */
14989         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14990         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14991             tg3_flag(tp, EEPROM_WRITE_PROT))
14992                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14993                                        GRC_LCLCTRL_GPIO_OUTPUT1);
14994         /* Unused GPIO3 must be driven as output on 5752 because there
14995          * are no pull-up resistors on unused GPIO pins.
14996          */
14997         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14998                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14999
15000         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
15001             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
15002             tg3_flag(tp, 57765_CLASS))
15003                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15004
15005         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15006             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
15007                 /* Turn off the debug UART. */
15008                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15009                 if (tg3_flag(tp, IS_NIC))
15010                         /* Keep VMain power. */
15011                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
15012                                               GRC_LCLCTRL_GPIO_OUTPUT0;
15013         }
15014
15015         /* Switch out of Vaux if it is a NIC */
15016         tg3_pwrsrc_switch_to_vmain(tp);
15017
15018         /* Derive initial jumbo mode from MTU assigned in
15019          * ether_setup() via the alloc_etherdev() call
15020          */
15021         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
15022                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
15023
15024         /* Determine WakeOnLan speed to use. */
15025         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15026             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
15027             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
15028             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
15029                 tg3_flag_clear(tp, WOL_SPEED_100MB);
15030         } else {
15031                 tg3_flag_set(tp, WOL_SPEED_100MB);
15032         }
15033
15034         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15035                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
15036
15037         /* A few boards don't want Ethernet@WireSpeed phy feature */
15038         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15039             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15040              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
15041              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
15042             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15043             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15044                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
15045
15046         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
15047             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
15048                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
15049         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
15050                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
15051
15052         if (tg3_flag(tp, 5705_PLUS) &&
15053             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
15054             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
15055             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
15056             !tg3_flag(tp, 57765_PLUS)) {
15057                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
15058                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
15059                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
15060                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
15061                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15062                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
15063                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
15064                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
15065                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
15066                 } else
15067                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
15068         }
15069
15070         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15071             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
15072                 tp->phy_otp = tg3_read_otp_phycfg(tp);
15073                 if (tp->phy_otp == 0)
15074                         tp->phy_otp = TG3_OTP_DEFAULT;
15075         }
15076
15077         if (tg3_flag(tp, CPMU_PRESENT))
15078                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15079         else
15080                 tp->mi_mode = MAC_MI_MODE_BASE;
15081
15082         tp->coalesce_mode = 0;
15083         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
15084             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
15085                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15086
15087         /* Set these bits to enable statistics workaround. */
15088         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15089             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
15090             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
15091                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15092                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15093         }
15094
15095         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15096             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15097                 tg3_flag_set(tp, USE_PHYLIB);
15098
15099         err = tg3_mdio_init(tp);
15100         if (err)
15101                 return err;
15102
15103         /* Initialize data/descriptor byte/word swapping. */
15104         val = tr32(GRC_MODE);
15105         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
15106                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15107                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
15108                         GRC_MODE_B2HRX_ENABLE |
15109                         GRC_MODE_HTX2B_ENABLE |
15110                         GRC_MODE_HOST_STACKUP);
15111         else
15112                 val &= GRC_MODE_HOST_STACKUP;
15113
15114         tw32(GRC_MODE, val | tp->grc_mode);
15115
15116         tg3_switch_clocks(tp);
15117
15118         /* Clear this out for sanity. */
15119         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15120
15121         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15122                               &pci_state_reg);
15123         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
15124             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
15125                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
15126
15127                 if (chiprevid == CHIPREV_ID_5701_A0 ||
15128                     chiprevid == CHIPREV_ID_5701_B0 ||
15129                     chiprevid == CHIPREV_ID_5701_B2 ||
15130                     chiprevid == CHIPREV_ID_5701_B5) {
15131                         void __iomem *sram_base;
15132
15133                         /* Write some dummy words into the SRAM status block
15134                          * area, see if it reads back correctly.  If the return
15135                          * value is bad, force enable the PCIX workaround.
15136                          */
15137                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15138
15139                         writel(0x00000000, sram_base);
15140                         writel(0x00000000, sram_base + 4);
15141                         writel(0xffffffff, sram_base + 4);
15142                         if (readl(sram_base) != 0x00000000)
15143                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15144                 }
15145         }
15146
15147         udelay(50);
15148         tg3_nvram_init(tp);
15149
15150         grc_misc_cfg = tr32(GRC_MISC_CFG);
15151         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15152
15153         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15154             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
15155              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
15156                 tg3_flag_set(tp, IS_5788);
15157
15158         if (!tg3_flag(tp, IS_5788) &&
15159             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
15160                 tg3_flag_set(tp, TAGGED_STATUS);
15161         if (tg3_flag(tp, TAGGED_STATUS)) {
15162                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
15163                                       HOSTCC_MODE_CLRTICK_TXBD);
15164
15165                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
15166                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15167                                        tp->misc_host_ctrl);
15168         }
15169
15170         /* Preserve the APE MAC_MODE bits */
15171         if (tg3_flag(tp, ENABLE_APE))
15172                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
15173         else
15174                 tp->mac_mode = 0;
15175
15176         /* these are limited to 10/100 only */
15177         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
15178              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15179             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15180              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
15181              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
15182               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
15183               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
15184             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
15185              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
15186               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
15187               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
15188             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
15189             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15190             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15191             (tp->phy_flags & TG3_PHYFLG_IS_FET))
15192                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
15193
15194         err = tg3_phy_probe(tp);
15195         if (err) {
15196                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
15197                 /* ... but do not return immediately ... */
15198                 tg3_mdio_fini(tp);
15199         }
15200
15201         tg3_read_vpd(tp);
15202         tg3_read_fw_ver(tp);
15203
15204         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
15205                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15206         } else {
15207                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15208                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15209                 else
15210                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15211         }
15212
15213         /* 5700 {AX,BX} chips have a broken status block link
15214          * change bit implementation, so we must use the
15215          * status register in those cases.
15216          */
15217         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15218                 tg3_flag_set(tp, USE_LINKCHG_REG);
15219         else
15220                 tg3_flag_clear(tp, USE_LINKCHG_REG);
15221
15222         /* The led_ctrl is set during tg3_phy_probe, here we might
15223          * have to force the link status polling mechanism based
15224          * upon subsystem IDs.
15225          */
15226         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
15227             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15228             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
15229                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15230                 tg3_flag_set(tp, USE_LINKCHG_REG);
15231         }
15232
15233         /* For all SERDES we poll the MAC status register. */
15234         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
15235                 tg3_flag_set(tp, POLL_SERDES);
15236         else
15237                 tg3_flag_clear(tp, POLL_SERDES);
15238
15239         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
15240         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
15241         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15242             tg3_flag(tp, PCIX_MODE)) {
15243                 tp->rx_offset = NET_SKB_PAD;
15244 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
15245                 tp->rx_copy_thresh = ~(u16)0;
15246 #endif
15247         }
15248
15249         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
15250         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
15251         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
15252
15253         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
15254
15255         /* Increment the rx prod index on the rx std ring by at most
15256          * 8 for these chips to workaround hw errata.
15257          */
15258         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
15259             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
15260             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
15261                 tp->rx_std_max_post = 8;
15262
15263         if (tg3_flag(tp, ASPM_WORKAROUND))
15264                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
15265                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
15266
15267         return err;
15268 }
15269
15270 #ifdef CONFIG_SPARC
15271 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
15272 {
15273         struct net_device *dev = tp->dev;
15274         struct pci_dev *pdev = tp->pdev;
15275         struct device_node *dp = pci_device_to_OF_node(pdev);
15276         const unsigned char *addr;
15277         int len;
15278
15279         addr = of_get_property(dp, "local-mac-address", &len);
15280         if (addr && len == 6) {
15281                 memcpy(dev->dev_addr, addr, 6);
15282                 memcpy(dev->perm_addr, dev->dev_addr, 6);
15283                 return 0;
15284         }
15285         return -ENODEV;
15286 }
15287
15288 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
15289 {
15290         struct net_device *dev = tp->dev;
15291
15292         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
15293         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
15294         return 0;
15295 }
15296 #endif
15297
15298 static int __devinit tg3_get_device_address(struct tg3 *tp)
15299 {
15300         struct net_device *dev = tp->dev;
15301         u32 hi, lo, mac_offset;
15302         int addr_ok = 0;
15303
15304 #ifdef CONFIG_SPARC
15305         if (!tg3_get_macaddr_sparc(tp))
15306                 return 0;
15307 #endif
15308
15309         mac_offset = 0x7c;
15310         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15311             tg3_flag(tp, 5780_CLASS)) {
15312                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
15313                         mac_offset = 0xcc;
15314                 if (tg3_nvram_lock(tp))
15315                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
15316                 else
15317                         tg3_nvram_unlock(tp);
15318         } else if (tg3_flag(tp, 5717_PLUS)) {
15319                 if (tp->pci_fn & 1)
15320                         mac_offset = 0xcc;
15321                 if (tp->pci_fn > 1)
15322                         mac_offset += 0x18c;
15323         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15324                 mac_offset = 0x10;
15325
15326         /* First try to get it from MAC address mailbox. */
15327         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
15328         if ((hi >> 16) == 0x484b) {
15329                 dev->dev_addr[0] = (hi >>  8) & 0xff;
15330                 dev->dev_addr[1] = (hi >>  0) & 0xff;
15331
15332                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
15333                 dev->dev_addr[2] = (lo >> 24) & 0xff;
15334                 dev->dev_addr[3] = (lo >> 16) & 0xff;
15335                 dev->dev_addr[4] = (lo >>  8) & 0xff;
15336                 dev->dev_addr[5] = (lo >>  0) & 0xff;
15337
15338                 /* Some old bootcode may report a 0 MAC address in SRAM */
15339                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
15340         }
15341         if (!addr_ok) {
15342                 /* Next, try NVRAM. */
15343                 if (!tg3_flag(tp, NO_NVRAM) &&
15344                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
15345                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
15346                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
15347                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
15348                 }
15349                 /* Finally just fetch it out of the MAC control regs. */
15350                 else {
15351                         hi = tr32(MAC_ADDR_0_HIGH);
15352                         lo = tr32(MAC_ADDR_0_LOW);
15353
15354                         dev->dev_addr[5] = lo & 0xff;
15355                         dev->dev_addr[4] = (lo >> 8) & 0xff;
15356                         dev->dev_addr[3] = (lo >> 16) & 0xff;
15357                         dev->dev_addr[2] = (lo >> 24) & 0xff;
15358                         dev->dev_addr[1] = hi & 0xff;
15359                         dev->dev_addr[0] = (hi >> 8) & 0xff;
15360                 }
15361         }
15362
15363         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
15364 #ifdef CONFIG_SPARC
15365                 if (!tg3_get_default_macaddr_sparc(tp))
15366                         return 0;
15367 #endif
15368                 return -EINVAL;
15369         }
15370         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
15371         return 0;
15372 }
15373
15374 #define BOUNDARY_SINGLE_CACHELINE       1
15375 #define BOUNDARY_MULTI_CACHELINE        2
15376
15377 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
15378 {
15379         int cacheline_size;
15380         u8 byte;
15381         int goal;
15382
15383         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
15384         if (byte == 0)
15385                 cacheline_size = 1024;
15386         else
15387                 cacheline_size = (int) byte * 4;
15388
15389         /* On 5703 and later chips, the boundary bits have no
15390          * effect.
15391          */
15392         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15393             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
15394             !tg3_flag(tp, PCI_EXPRESS))
15395                 goto out;
15396
15397 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
15398         goal = BOUNDARY_MULTI_CACHELINE;
15399 #else
15400 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
15401         goal = BOUNDARY_SINGLE_CACHELINE;
15402 #else
15403         goal = 0;
15404 #endif
15405 #endif
15406
15407         if (tg3_flag(tp, 57765_PLUS)) {
15408                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
15409                 goto out;
15410         }
15411
15412         if (!goal)
15413                 goto out;
15414
15415         /* PCI controllers on most RISC systems tend to disconnect
15416          * when a device tries to burst across a cache-line boundary.
15417          * Therefore, letting tg3 do so just wastes PCI bandwidth.
15418          *
15419          * Unfortunately, for PCI-E there are only limited
15420          * write-side controls for this, and thus for reads
15421          * we will still get the disconnects.  We'll also waste
15422          * these PCI cycles for both read and write for chips
15423          * other than 5700 and 5701 which do not implement the
15424          * boundary bits.
15425          */
15426         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
15427                 switch (cacheline_size) {
15428                 case 16:
15429                 case 32:
15430                 case 64:
15431                 case 128:
15432                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15433                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
15434                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
15435                         } else {
15436                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15437                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15438                         }
15439                         break;
15440
15441                 case 256:
15442                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
15443                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
15444                         break;
15445
15446                 default:
15447                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15448                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15449                         break;
15450                 }
15451         } else if (tg3_flag(tp, PCI_EXPRESS)) {
15452                 switch (cacheline_size) {
15453                 case 16:
15454                 case 32:
15455                 case 64:
15456                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15457                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15458                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
15459                                 break;
15460                         }
15461                         /* fallthrough */
15462                 case 128:
15463                 default:
15464                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15465                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
15466                         break;
15467                 }
15468         } else {
15469                 switch (cacheline_size) {
15470                 case 16:
15471                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15472                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
15473                                         DMA_RWCTRL_WRITE_BNDRY_16);
15474                                 break;
15475                         }
15476                         /* fallthrough */
15477                 case 32:
15478                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15479                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
15480                                         DMA_RWCTRL_WRITE_BNDRY_32);
15481                                 break;
15482                         }
15483                         /* fallthrough */
15484                 case 64:
15485                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15486                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
15487                                         DMA_RWCTRL_WRITE_BNDRY_64);
15488                                 break;
15489                         }
15490                         /* fallthrough */
15491                 case 128:
15492                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15493                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
15494                                         DMA_RWCTRL_WRITE_BNDRY_128);
15495                                 break;
15496                         }
15497                         /* fallthrough */
15498                 case 256:
15499                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
15500                                 DMA_RWCTRL_WRITE_BNDRY_256);
15501                         break;
15502                 case 512:
15503                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
15504                                 DMA_RWCTRL_WRITE_BNDRY_512);
15505                         break;
15506                 case 1024:
15507                 default:
15508                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
15509                                 DMA_RWCTRL_WRITE_BNDRY_1024);
15510                         break;
15511                 }
15512         }
15513
15514 out:
15515         return val;
15516 }
15517
15518 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
15519 {
15520         struct tg3_internal_buffer_desc test_desc;
15521         u32 sram_dma_descs;
15522         int i, ret;
15523
15524         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15525
15526         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15527         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15528         tw32(RDMAC_STATUS, 0);
15529         tw32(WDMAC_STATUS, 0);
15530
15531         tw32(BUFMGR_MODE, 0);
15532         tw32(FTQ_RESET, 0);
15533
15534         test_desc.addr_hi = ((u64) buf_dma) >> 32;
15535         test_desc.addr_lo = buf_dma & 0xffffffff;
15536         test_desc.nic_mbuf = 0x00002100;
15537         test_desc.len = size;
15538
15539         /*
15540          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15541          * the *second* time the tg3 driver was getting loaded after an
15542          * initial scan.
15543          *
15544          * Broadcom tells me:
15545          *   ...the DMA engine is connected to the GRC block and a DMA
15546          *   reset may affect the GRC block in some unpredictable way...
15547          *   The behavior of resets to individual blocks has not been tested.
15548          *
15549          * Broadcom noted the GRC reset will also reset all sub-components.
15550          */
15551         if (to_device) {
15552                 test_desc.cqid_sqid = (13 << 8) | 2;
15553
15554                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15555                 udelay(40);
15556         } else {
15557                 test_desc.cqid_sqid = (16 << 8) | 7;
15558
15559                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15560                 udelay(40);
15561         }
15562         test_desc.flags = 0x00000005;
15563
15564         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15565                 u32 val;
15566
15567                 val = *(((u32 *)&test_desc) + i);
15568                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15569                                        sram_dma_descs + (i * sizeof(u32)));
15570                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15571         }
15572         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15573
15574         if (to_device)
15575                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15576         else
15577                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15578
15579         ret = -ENODEV;
15580         for (i = 0; i < 40; i++) {
15581                 u32 val;
15582
15583                 if (to_device)
15584                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15585                 else
15586                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15587                 if ((val & 0xffff) == sram_dma_descs) {
15588                         ret = 0;
15589                         break;
15590                 }
15591
15592                 udelay(100);
15593         }
15594
15595         return ret;
15596 }
15597
15598 #define TEST_BUFFER_SIZE        0x2000
15599
15600 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15601         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15602         { },
15603 };
15604
15605 static int __devinit tg3_test_dma(struct tg3 *tp)
15606 {
15607         dma_addr_t buf_dma;
15608         u32 *buf, saved_dma_rwctrl;
15609         int ret = 0;
15610
15611         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15612                                  &buf_dma, GFP_KERNEL);
15613         if (!buf) {
15614                 ret = -ENOMEM;
15615                 goto out_nofree;
15616         }
15617
15618         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15619                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15620
15621         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15622
15623         if (tg3_flag(tp, 57765_PLUS))
15624                 goto out;
15625
15626         if (tg3_flag(tp, PCI_EXPRESS)) {
15627                 /* DMA read watermark not used on PCIE */
15628                 tp->dma_rwctrl |= 0x00180000;
15629         } else if (!tg3_flag(tp, PCIX_MODE)) {
15630                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15631                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15632                         tp->dma_rwctrl |= 0x003f0000;
15633                 else
15634                         tp->dma_rwctrl |= 0x003f000f;
15635         } else {
15636                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15637                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15638                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15639                         u32 read_water = 0x7;
15640
15641                         /* If the 5704 is behind the EPB bridge, we can
15642                          * do the less restrictive ONE_DMA workaround for
15643                          * better performance.
15644                          */
15645                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15646                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15647                                 tp->dma_rwctrl |= 0x8000;
15648                         else if (ccval == 0x6 || ccval == 0x7)
15649                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15650
15651                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15652                                 read_water = 4;
15653                         /* Set bit 23 to enable PCIX hw bug fix */
15654                         tp->dma_rwctrl |=
15655                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15656                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15657                                 (1 << 23);
15658                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15659                         /* 5780 always in PCIX mode */
15660                         tp->dma_rwctrl |= 0x00144000;
15661                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15662                         /* 5714 always in PCIX mode */
15663                         tp->dma_rwctrl |= 0x00148000;
15664                 } else {
15665                         tp->dma_rwctrl |= 0x001b000f;
15666                 }
15667         }
15668
15669         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15670             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15671                 tp->dma_rwctrl &= 0xfffffff0;
15672
15673         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15674             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15675                 /* Remove this if it causes problems for some boards. */
15676                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15677
15678                 /* On 5700/5701 chips, we need to set this bit.
15679                  * Otherwise the chip will issue cacheline transactions
15680                  * to streamable DMA memory with not all the byte
15681                  * enables turned on.  This is an error on several
15682                  * RISC PCI controllers, in particular sparc64.
15683                  *
15684                  * On 5703/5704 chips, this bit has been reassigned
15685                  * a different meaning.  In particular, it is used
15686                  * on those chips to enable a PCI-X workaround.
15687                  */
15688                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15689         }
15690
15691         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15692
15693 #if 0
15694         /* Unneeded, already done by tg3_get_invariants.  */
15695         tg3_switch_clocks(tp);
15696 #endif
15697
15698         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15699             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15700                 goto out;
15701
15702         /* It is best to perform DMA test with maximum write burst size
15703          * to expose the 5700/5701 write DMA bug.
15704          */
15705         saved_dma_rwctrl = tp->dma_rwctrl;
15706         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15707         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15708
15709         while (1) {
15710                 u32 *p = buf, i;
15711
15712                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15713                         p[i] = i;
15714
15715                 /* Send the buffer to the chip. */
15716                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15717                 if (ret) {
15718                         dev_err(&tp->pdev->dev,
15719                                 "%s: Buffer write failed. err = %d\n",
15720                                 __func__, ret);
15721                         break;
15722                 }
15723
15724 #if 0
15725                 /* validate data reached card RAM correctly. */
15726                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15727                         u32 val;
15728                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
15729                         if (le32_to_cpu(val) != p[i]) {
15730                                 dev_err(&tp->pdev->dev,
15731                                         "%s: Buffer corrupted on device! "
15732                                         "(%d != %d)\n", __func__, val, i);
15733                                 /* ret = -ENODEV here? */
15734                         }
15735                         p[i] = 0;
15736                 }
15737 #endif
15738                 /* Now read it back. */
15739                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15740                 if (ret) {
15741                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15742                                 "err = %d\n", __func__, ret);
15743                         break;
15744                 }
15745
15746                 /* Verify it. */
15747                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15748                         if (p[i] == i)
15749                                 continue;
15750
15751                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15752                             DMA_RWCTRL_WRITE_BNDRY_16) {
15753                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15754                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15755                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15756                                 break;
15757                         } else {
15758                                 dev_err(&tp->pdev->dev,
15759                                         "%s: Buffer corrupted on read back! "
15760                                         "(%d != %d)\n", __func__, p[i], i);
15761                                 ret = -ENODEV;
15762                                 goto out;
15763                         }
15764                 }
15765
15766                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15767                         /* Success. */
15768                         ret = 0;
15769                         break;
15770                 }
15771         }
15772         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15773             DMA_RWCTRL_WRITE_BNDRY_16) {
15774                 /* DMA test passed without adjusting DMA boundary,
15775                  * now look for chipsets that are known to expose the
15776                  * DMA bug without failing the test.
15777                  */
15778                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15779                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15780                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15781                 } else {
15782                         /* Safe to use the calculated DMA boundary. */
15783                         tp->dma_rwctrl = saved_dma_rwctrl;
15784                 }
15785
15786                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15787         }
15788
15789 out:
15790         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15791 out_nofree:
15792         return ret;
15793 }
15794
15795 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15796 {
15797         if (tg3_flag(tp, 57765_PLUS)) {
15798                 tp->bufmgr_config.mbuf_read_dma_low_water =
15799                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15800                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15801                         DEFAULT_MB_MACRX_LOW_WATER_57765;
15802                 tp->bufmgr_config.mbuf_high_water =
15803                         DEFAULT_MB_HIGH_WATER_57765;
15804
15805                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15806                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15807                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15808                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15809                 tp->bufmgr_config.mbuf_high_water_jumbo =
15810                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15811         } else if (tg3_flag(tp, 5705_PLUS)) {
15812                 tp->bufmgr_config.mbuf_read_dma_low_water =
15813                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15814                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15815                         DEFAULT_MB_MACRX_LOW_WATER_5705;
15816                 tp->bufmgr_config.mbuf_high_water =
15817                         DEFAULT_MB_HIGH_WATER_5705;
15818                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15819                         tp->bufmgr_config.mbuf_mac_rx_low_water =
15820                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
15821                         tp->bufmgr_config.mbuf_high_water =
15822                                 DEFAULT_MB_HIGH_WATER_5906;
15823                 }
15824
15825                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15826                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15827                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15828                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15829                 tp->bufmgr_config.mbuf_high_water_jumbo =
15830                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15831         } else {
15832                 tp->bufmgr_config.mbuf_read_dma_low_water =
15833                         DEFAULT_MB_RDMA_LOW_WATER;
15834                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15835                         DEFAULT_MB_MACRX_LOW_WATER;
15836                 tp->bufmgr_config.mbuf_high_water =
15837                         DEFAULT_MB_HIGH_WATER;
15838
15839                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15840                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15841                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15842                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15843                 tp->bufmgr_config.mbuf_high_water_jumbo =
15844                         DEFAULT_MB_HIGH_WATER_JUMBO;
15845         }
15846
15847         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15848         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15849 }
15850
15851 static char * __devinit tg3_phy_string(struct tg3 *tp)
15852 {
15853         switch (tp->phy_id & TG3_PHY_ID_MASK) {
15854         case TG3_PHY_ID_BCM5400:        return "5400";
15855         case TG3_PHY_ID_BCM5401:        return "5401";
15856         case TG3_PHY_ID_BCM5411:        return "5411";
15857         case TG3_PHY_ID_BCM5701:        return "5701";
15858         case TG3_PHY_ID_BCM5703:        return "5703";
15859         case TG3_PHY_ID_BCM5704:        return "5704";
15860         case TG3_PHY_ID_BCM5705:        return "5705";
15861         case TG3_PHY_ID_BCM5750:        return "5750";
15862         case TG3_PHY_ID_BCM5752:        return "5752";
15863         case TG3_PHY_ID_BCM5714:        return "5714";
15864         case TG3_PHY_ID_BCM5780:        return "5780";
15865         case TG3_PHY_ID_BCM5755:        return "5755";
15866         case TG3_PHY_ID_BCM5787:        return "5787";
15867         case TG3_PHY_ID_BCM5784:        return "5784";
15868         case TG3_PHY_ID_BCM5756:        return "5722/5756";
15869         case TG3_PHY_ID_BCM5906:        return "5906";
15870         case TG3_PHY_ID_BCM5761:        return "5761";
15871         case TG3_PHY_ID_BCM5718C:       return "5718C";
15872         case TG3_PHY_ID_BCM5718S:       return "5718S";
15873         case TG3_PHY_ID_BCM57765:       return "57765";
15874         case TG3_PHY_ID_BCM5719C:       return "5719C";
15875         case TG3_PHY_ID_BCM5720C:       return "5720C";
15876         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
15877         case 0:                 return "serdes";
15878         default:                return "unknown";
15879         }
15880 }
15881
15882 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15883 {
15884         if (tg3_flag(tp, PCI_EXPRESS)) {
15885                 strcpy(str, "PCI Express");
15886                 return str;
15887         } else if (tg3_flag(tp, PCIX_MODE)) {
15888                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15889
15890                 strcpy(str, "PCIX:");
15891
15892                 if ((clock_ctrl == 7) ||
15893                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15894                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15895                         strcat(str, "133MHz");
15896                 else if (clock_ctrl == 0)
15897                         strcat(str, "33MHz");
15898                 else if (clock_ctrl == 2)
15899                         strcat(str, "50MHz");
15900                 else if (clock_ctrl == 4)
15901                         strcat(str, "66MHz");
15902                 else if (clock_ctrl == 6)
15903                         strcat(str, "100MHz");
15904         } else {
15905                 strcpy(str, "PCI:");
15906                 if (tg3_flag(tp, PCI_HIGH_SPEED))
15907                         strcat(str, "66MHz");
15908                 else
15909                         strcat(str, "33MHz");
15910         }
15911         if (tg3_flag(tp, PCI_32BIT))
15912                 strcat(str, ":32-bit");
15913         else
15914                 strcat(str, ":64-bit");
15915         return str;
15916 }
15917
15918 static void __devinit tg3_init_coal(struct tg3 *tp)
15919 {
15920         struct ethtool_coalesce *ec = &tp->coal;
15921
15922         memset(ec, 0, sizeof(*ec));
15923         ec->cmd = ETHTOOL_GCOALESCE;
15924         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15925         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15926         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15927         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15928         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15929         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15930         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15931         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15932         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15933
15934         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15935                                  HOSTCC_MODE_CLRTICK_TXBD)) {
15936                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15937                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15938                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15939                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15940         }
15941
15942         if (tg3_flag(tp, 5705_PLUS)) {
15943                 ec->rx_coalesce_usecs_irq = 0;
15944                 ec->tx_coalesce_usecs_irq = 0;
15945                 ec->stats_block_coalesce_usecs = 0;
15946         }
15947 }
15948
15949 static int __devinit tg3_init_one(struct pci_dev *pdev,
15950                                   const struct pci_device_id *ent)
15951 {
15952         struct net_device *dev;
15953         struct tg3 *tp;
15954         int i, err, pm_cap;
15955         u32 sndmbx, rcvmbx, intmbx;
15956         char str[40];
15957         u64 dma_mask, persist_dma_mask;
15958         netdev_features_t features = 0;
15959
15960         printk_once(KERN_INFO "%s\n", version);
15961
15962         err = pci_enable_device(pdev);
15963         if (err) {
15964                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15965                 return err;
15966         }
15967
15968         err = pci_request_regions(pdev, DRV_MODULE_NAME);
15969         if (err) {
15970                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15971                 goto err_out_disable_pdev;
15972         }
15973
15974         pci_set_master(pdev);
15975
15976         /* Find power-management capability. */
15977         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15978         if (pm_cap == 0) {
15979                 dev_err(&pdev->dev,
15980                         "Cannot find Power Management capability, aborting\n");
15981                 err = -EIO;
15982                 goto err_out_free_res;
15983         }
15984
15985         err = pci_set_power_state(pdev, PCI_D0);
15986         if (err) {
15987                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15988                 goto err_out_free_res;
15989         }
15990
15991         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15992         if (!dev) {
15993                 err = -ENOMEM;
15994                 goto err_out_power_down;
15995         }
15996
15997         SET_NETDEV_DEV(dev, &pdev->dev);
15998
15999         tp = netdev_priv(dev);
16000         tp->pdev = pdev;
16001         tp->dev = dev;
16002         tp->pm_cap = pm_cap;
16003         tp->rx_mode = TG3_DEF_RX_MODE;
16004         tp->tx_mode = TG3_DEF_TX_MODE;
16005
16006         if (tg3_debug > 0)
16007                 tp->msg_enable = tg3_debug;
16008         else
16009                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
16010
16011         /* The word/byte swap controls here control register access byte
16012          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
16013          * setting below.
16014          */
16015         tp->misc_host_ctrl =
16016                 MISC_HOST_CTRL_MASK_PCI_INT |
16017                 MISC_HOST_CTRL_WORD_SWAP |
16018                 MISC_HOST_CTRL_INDIR_ACCESS |
16019                 MISC_HOST_CTRL_PCISTATE_RW;
16020
16021         /* The NONFRM (non-frame) byte/word swap controls take effect
16022          * on descriptor entries, anything which isn't packet data.
16023          *
16024          * The StrongARM chips on the board (one for tx, one for rx)
16025          * are running in big-endian mode.
16026          */
16027         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
16028                         GRC_MODE_WSWAP_NONFRM_DATA);
16029 #ifdef __BIG_ENDIAN
16030         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
16031 #endif
16032         spin_lock_init(&tp->lock);
16033         spin_lock_init(&tp->indirect_lock);
16034         INIT_WORK(&tp->reset_task, tg3_reset_task);
16035
16036         tp->regs = pci_ioremap_bar(pdev, BAR_0);
16037         if (!tp->regs) {
16038                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
16039                 err = -ENOMEM;
16040                 goto err_out_free_dev;
16041         }
16042
16043         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16044             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16045             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16046             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16047             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16048             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16049             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16050             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
16051                 tg3_flag_set(tp, ENABLE_APE);
16052                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16053                 if (!tp->aperegs) {
16054                         dev_err(&pdev->dev,
16055                                 "Cannot map APE registers, aborting\n");
16056                         err = -ENOMEM;
16057                         goto err_out_iounmap;
16058                 }
16059         }
16060
16061         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16062         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
16063
16064         dev->ethtool_ops = &tg3_ethtool_ops;
16065         dev->watchdog_timeo = TG3_TX_TIMEOUT;
16066         dev->netdev_ops = &tg3_netdev_ops;
16067         dev->irq = pdev->irq;
16068
16069         err = tg3_get_invariants(tp);
16070         if (err) {
16071                 dev_err(&pdev->dev,
16072                         "Problem fetching invariants of chip, aborting\n");
16073                 goto err_out_apeunmap;
16074         }
16075
16076         /* The EPB bridge inside 5714, 5715, and 5780 and any
16077          * device behind the EPB cannot support DMA addresses > 40-bit.
16078          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16079          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16080          * do DMA address check in tg3_start_xmit().
16081          */
16082         if (tg3_flag(tp, IS_5788))
16083                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
16084         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
16085                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
16086 #ifdef CONFIG_HIGHMEM
16087                 dma_mask = DMA_BIT_MASK(64);
16088 #endif
16089         } else
16090                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
16091
16092         /* Configure DMA attributes. */
16093         if (dma_mask > DMA_BIT_MASK(32)) {
16094                 err = pci_set_dma_mask(pdev, dma_mask);
16095                 if (!err) {
16096                         features |= NETIF_F_HIGHDMA;
16097                         err = pci_set_consistent_dma_mask(pdev,
16098                                                           persist_dma_mask);
16099                         if (err < 0) {
16100                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
16101                                         "DMA for consistent allocations\n");
16102                                 goto err_out_apeunmap;
16103                         }
16104                 }
16105         }
16106         if (err || dma_mask == DMA_BIT_MASK(32)) {
16107                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
16108                 if (err) {
16109                         dev_err(&pdev->dev,
16110                                 "No usable DMA configuration, aborting\n");
16111                         goto err_out_apeunmap;
16112                 }
16113         }
16114
16115         tg3_init_bufmgr_config(tp);
16116
16117         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16118
16119         /* 5700 B0 chips do not support checksumming correctly due
16120          * to hardware bugs.
16121          */
16122         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
16123                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16124
16125                 if (tg3_flag(tp, 5755_PLUS))
16126                         features |= NETIF_F_IPV6_CSUM;
16127         }
16128
16129         /* TSO is on by default on chips that support hardware TSO.
16130          * Firmware TSO on older chips gives lower performance, so it
16131          * is off by default, but can be enabled using ethtool.
16132          */
16133         if ((tg3_flag(tp, HW_TSO_1) ||
16134              tg3_flag(tp, HW_TSO_2) ||
16135              tg3_flag(tp, HW_TSO_3)) &&
16136             (features & NETIF_F_IP_CSUM))
16137                 features |= NETIF_F_TSO;
16138         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
16139                 if (features & NETIF_F_IPV6_CSUM)
16140                         features |= NETIF_F_TSO6;
16141                 if (tg3_flag(tp, HW_TSO_3) ||
16142                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
16143                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
16144                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
16145                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
16146                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
16147                         features |= NETIF_F_TSO_ECN;
16148         }
16149
16150         dev->features |= features;
16151         dev->vlan_features |= features;
16152
16153         /*
16154          * Add loopback capability only for a subset of devices that support
16155          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16156          * loopback for the remaining devices.
16157          */
16158         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
16159             !tg3_flag(tp, CPMU_PRESENT))
16160                 /* Add the loopback capability */
16161                 features |= NETIF_F_LOOPBACK;
16162
16163         dev->hw_features |= features;
16164
16165         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
16166             !tg3_flag(tp, TSO_CAPABLE) &&
16167             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
16168                 tg3_flag_set(tp, MAX_RXPEND_64);
16169                 tp->rx_pending = 63;
16170         }
16171
16172         err = tg3_get_device_address(tp);
16173         if (err) {
16174                 dev_err(&pdev->dev,
16175                         "Could not obtain valid ethernet address, aborting\n");
16176                 goto err_out_apeunmap;
16177         }
16178
16179         /*
16180          * Reset chip in case UNDI or EFI driver did not shutdown
16181          * DMA self test will enable WDMAC and we'll see (spurious)
16182          * pending DMA on the PCI bus at that point.
16183          */
16184         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
16185             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
16186                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
16187                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16188         }
16189
16190         err = tg3_test_dma(tp);
16191         if (err) {
16192                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
16193                 goto err_out_apeunmap;
16194         }
16195
16196         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
16197         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
16198         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
16199         for (i = 0; i < tp->irq_max; i++) {
16200                 struct tg3_napi *tnapi = &tp->napi[i];
16201
16202                 tnapi->tp = tp;
16203                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
16204
16205                 tnapi->int_mbox = intmbx;
16206                 if (i <= 4)
16207                         intmbx += 0x8;
16208                 else
16209                         intmbx += 0x4;
16210
16211                 tnapi->consmbox = rcvmbx;
16212                 tnapi->prodmbox = sndmbx;
16213
16214                 if (i)
16215                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
16216                 else
16217                         tnapi->coal_now = HOSTCC_MODE_NOW;
16218
16219                 if (!tg3_flag(tp, SUPPORT_MSIX))
16220                         break;
16221
16222                 /*
16223                  * If we support MSIX, we'll be using RSS.  If we're using
16224                  * RSS, the first vector only handles link interrupts and the
16225                  * remaining vectors handle rx and tx interrupts.  Reuse the
16226                  * mailbox values for the next iteration.  The values we setup
16227                  * above are still useful for the single vectored mode.
16228                  */
16229                 if (!i)
16230                         continue;
16231
16232                 rcvmbx += 0x8;
16233
16234                 if (sndmbx & 0x4)
16235                         sndmbx -= 0x4;
16236                 else
16237                         sndmbx += 0xc;
16238         }
16239
16240         tg3_init_coal(tp);
16241
16242         pci_set_drvdata(pdev, dev);
16243
16244         if (tg3_flag(tp, 5717_PLUS)) {
16245                 /* Resume a low-power mode */
16246                 tg3_frob_aux_power(tp, false);
16247         }
16248
16249         tg3_timer_init(tp);
16250
16251         err = register_netdev(dev);
16252         if (err) {
16253                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
16254                 goto err_out_apeunmap;
16255         }
16256
16257         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16258                     tp->board_part_number,
16259                     tp->pci_chip_rev_id,
16260                     tg3_bus_string(tp, str),
16261                     dev->dev_addr);
16262
16263         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
16264                 struct phy_device *phydev;
16265                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
16266                 netdev_info(dev,
16267                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
16268                             phydev->drv->name, dev_name(&phydev->dev));
16269         } else {
16270                 char *ethtype;
16271
16272                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
16273                         ethtype = "10/100Base-TX";
16274                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
16275                         ethtype = "1000Base-SX";
16276                 else
16277                         ethtype = "10/100/1000Base-T";
16278
16279                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
16280                             "(WireSpeed[%d], EEE[%d])\n",
16281                             tg3_phy_string(tp), ethtype,
16282                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
16283                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
16284         }
16285
16286         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
16287                     (dev->features & NETIF_F_RXCSUM) != 0,
16288                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
16289                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
16290                     tg3_flag(tp, ENABLE_ASF) != 0,
16291                     tg3_flag(tp, TSO_CAPABLE) != 0);
16292         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16293                     tp->dma_rwctrl,
16294                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
16295                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
16296
16297         pci_save_state(pdev);
16298
16299         return 0;
16300
16301 err_out_apeunmap:
16302         if (tp->aperegs) {
16303                 iounmap(tp->aperegs);
16304                 tp->aperegs = NULL;
16305         }
16306
16307 err_out_iounmap:
16308         if (tp->regs) {
16309                 iounmap(tp->regs);
16310                 tp->regs = NULL;
16311         }
16312
16313 err_out_free_dev:
16314         free_netdev(dev);
16315
16316 err_out_power_down:
16317         pci_set_power_state(pdev, PCI_D3hot);
16318
16319 err_out_free_res:
16320         pci_release_regions(pdev);
16321
16322 err_out_disable_pdev:
16323         pci_disable_device(pdev);
16324         pci_set_drvdata(pdev, NULL);
16325         return err;
16326 }
16327
16328 static void __devexit tg3_remove_one(struct pci_dev *pdev)
16329 {
16330         struct net_device *dev = pci_get_drvdata(pdev);
16331
16332         if (dev) {
16333                 struct tg3 *tp = netdev_priv(dev);
16334
16335                 release_firmware(tp->fw);
16336
16337                 tg3_reset_task_cancel(tp);
16338
16339                 if (tg3_flag(tp, USE_PHYLIB)) {
16340                         tg3_phy_fini(tp);
16341                         tg3_mdio_fini(tp);
16342                 }
16343
16344                 unregister_netdev(dev);
16345                 if (tp->aperegs) {
16346                         iounmap(tp->aperegs);
16347                         tp->aperegs = NULL;
16348                 }
16349                 if (tp->regs) {
16350                         iounmap(tp->regs);
16351                         tp->regs = NULL;
16352                 }
16353                 free_netdev(dev);
16354                 pci_release_regions(pdev);
16355                 pci_disable_device(pdev);
16356                 pci_set_drvdata(pdev, NULL);
16357         }
16358 }
16359
16360 #ifdef CONFIG_PM_SLEEP
16361 static int tg3_suspend(struct device *device)
16362 {
16363         struct pci_dev *pdev = to_pci_dev(device);
16364         struct net_device *dev = pci_get_drvdata(pdev);
16365         struct tg3 *tp = netdev_priv(dev);
16366         int err;
16367
16368         if (!netif_running(dev))
16369                 return 0;
16370
16371         tg3_reset_task_cancel(tp);
16372         tg3_phy_stop(tp);
16373         tg3_netif_stop(tp);
16374
16375         tg3_timer_stop(tp);
16376
16377         tg3_full_lock(tp, 1);
16378         tg3_disable_ints(tp);
16379         tg3_full_unlock(tp);
16380
16381         netif_device_detach(dev);
16382
16383         tg3_full_lock(tp, 0);
16384         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16385         tg3_flag_clear(tp, INIT_COMPLETE);
16386         tg3_full_unlock(tp);
16387
16388         err = tg3_power_down_prepare(tp);
16389         if (err) {
16390                 int err2;
16391
16392                 tg3_full_lock(tp, 0);
16393
16394                 tg3_flag_set(tp, INIT_COMPLETE);
16395                 err2 = tg3_restart_hw(tp, 1);
16396                 if (err2)
16397                         goto out;
16398
16399                 tg3_timer_start(tp);
16400
16401                 netif_device_attach(dev);
16402                 tg3_netif_start(tp);
16403
16404 out:
16405                 tg3_full_unlock(tp);
16406
16407                 if (!err2)
16408                         tg3_phy_start(tp);
16409         }
16410
16411         return err;
16412 }
16413
16414 static int tg3_resume(struct device *device)
16415 {
16416         struct pci_dev *pdev = to_pci_dev(device);
16417         struct net_device *dev = pci_get_drvdata(pdev);
16418         struct tg3 *tp = netdev_priv(dev);
16419         int err;
16420
16421         if (!netif_running(dev))
16422                 return 0;
16423
16424         netif_device_attach(dev);
16425
16426         tg3_full_lock(tp, 0);
16427
16428         tg3_flag_set(tp, INIT_COMPLETE);
16429         err = tg3_restart_hw(tp, 1);
16430         if (err)
16431                 goto out;
16432
16433         tg3_timer_start(tp);
16434
16435         tg3_netif_start(tp);
16436
16437 out:
16438         tg3_full_unlock(tp);
16439
16440         if (!err)
16441                 tg3_phy_start(tp);
16442
16443         return err;
16444 }
16445
16446 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
16447 #define TG3_PM_OPS (&tg3_pm_ops)
16448
16449 #else
16450
16451 #define TG3_PM_OPS NULL
16452
16453 #endif /* CONFIG_PM_SLEEP */
16454
16455 /**
16456  * tg3_io_error_detected - called when PCI error is detected
16457  * @pdev: Pointer to PCI device
16458  * @state: The current pci connection state
16459  *
16460  * This function is called after a PCI bus error affecting
16461  * this device has been detected.
16462  */
16463 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
16464                                               pci_channel_state_t state)
16465 {
16466         struct net_device *netdev = pci_get_drvdata(pdev);
16467         struct tg3 *tp = netdev_priv(netdev);
16468         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
16469
16470         netdev_info(netdev, "PCI I/O error detected\n");
16471
16472         rtnl_lock();
16473
16474         if (!netif_running(netdev))
16475                 goto done;
16476
16477         tg3_phy_stop(tp);
16478
16479         tg3_netif_stop(tp);
16480
16481         tg3_timer_stop(tp);
16482
16483         /* Want to make sure that the reset task doesn't run */
16484         tg3_reset_task_cancel(tp);
16485
16486         netif_device_detach(netdev);
16487
16488         /* Clean up software state, even if MMIO is blocked */
16489         tg3_full_lock(tp, 0);
16490         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
16491         tg3_full_unlock(tp);
16492
16493 done:
16494         if (state == pci_channel_io_perm_failure)
16495                 err = PCI_ERS_RESULT_DISCONNECT;
16496         else
16497                 pci_disable_device(pdev);
16498
16499         rtnl_unlock();
16500
16501         return err;
16502 }
16503
16504 /**
16505  * tg3_io_slot_reset - called after the pci bus has been reset.
16506  * @pdev: Pointer to PCI device
16507  *
16508  * Restart the card from scratch, as if from a cold-boot.
16509  * At this point, the card has exprienced a hard reset,
16510  * followed by fixups by BIOS, and has its config space
16511  * set up identically to what it was at cold boot.
16512  */
16513 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16514 {
16515         struct net_device *netdev = pci_get_drvdata(pdev);
16516         struct tg3 *tp = netdev_priv(netdev);
16517         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16518         int err;
16519
16520         rtnl_lock();
16521
16522         if (pci_enable_device(pdev)) {
16523                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16524                 goto done;
16525         }
16526
16527         pci_set_master(pdev);
16528         pci_restore_state(pdev);
16529         pci_save_state(pdev);
16530
16531         if (!netif_running(netdev)) {
16532                 rc = PCI_ERS_RESULT_RECOVERED;
16533                 goto done;
16534         }
16535
16536         err = tg3_power_up(tp);
16537         if (err)
16538                 goto done;
16539
16540         rc = PCI_ERS_RESULT_RECOVERED;
16541
16542 done:
16543         rtnl_unlock();
16544
16545         return rc;
16546 }
16547
16548 /**
16549  * tg3_io_resume - called when traffic can start flowing again.
16550  * @pdev: Pointer to PCI device
16551  *
16552  * This callback is called when the error recovery driver tells
16553  * us that its OK to resume normal operation.
16554  */
16555 static void tg3_io_resume(struct pci_dev *pdev)
16556 {
16557         struct net_device *netdev = pci_get_drvdata(pdev);
16558         struct tg3 *tp = netdev_priv(netdev);
16559         int err;
16560
16561         rtnl_lock();
16562
16563         if (!netif_running(netdev))
16564                 goto done;
16565
16566         tg3_full_lock(tp, 0);
16567         tg3_flag_set(tp, INIT_COMPLETE);
16568         err = tg3_restart_hw(tp, 1);
16569         tg3_full_unlock(tp);
16570         if (err) {
16571                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16572                 goto done;
16573         }
16574
16575         netif_device_attach(netdev);
16576
16577         tg3_timer_start(tp);
16578
16579         tg3_netif_start(tp);
16580
16581         tg3_phy_start(tp);
16582
16583 done:
16584         rtnl_unlock();
16585 }
16586
16587 static struct pci_error_handlers tg3_err_handler = {
16588         .error_detected = tg3_io_error_detected,
16589         .slot_reset     = tg3_io_slot_reset,
16590         .resume         = tg3_io_resume
16591 };
16592
16593 static struct pci_driver tg3_driver = {
16594         .name           = DRV_MODULE_NAME,
16595         .id_table       = tg3_pci_tbl,
16596         .probe          = tg3_init_one,
16597         .remove         = __devexit_p(tg3_remove_one),
16598         .err_handler    = &tg3_err_handler,
16599         .driver.pm      = TG3_PM_OPS,
16600 };
16601
16602 static int __init tg3_init(void)
16603 {
16604         return pci_register_driver(&tg3_driver);
16605 }
16606
16607 static void __exit tg3_cleanup(void)
16608 {
16609         pci_unregister_driver(&tg3_driver);
16610 }
16611
16612 module_init(tg3_init);
16613 module_exit(tg3_cleanup);