]> Pileus Git - ~andy/linux/blob - drivers/net/tg3.c
d6047bd6805d44b62a9a7f6d2e676776ca6575df
[~andy/linux] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18 #include <linux/config.h>
19
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mii.h>
36 #include <linux/if_vlan.h>
37 #include <linux/ip.h>
38 #include <linux/tcp.h>
39 #include <linux/workqueue.h>
40 #include <linux/prefetch.h>
41 #include <linux/dma-mapping.h>
42
43 #include <net/checksum.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC64
51 #include <asm/idprom.h>
52 #include <asm/oplib.h>
53 #include <asm/pbm.h>
54 #endif
55
56 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
57 #define TG3_VLAN_TAG_USED 1
58 #else
59 #define TG3_VLAN_TAG_USED 0
60 #endif
61
62 #ifdef NETIF_F_TSO
63 #define TG3_TSO_SUPPORT 1
64 #else
65 #define TG3_TSO_SUPPORT 0
66 #endif
67
68 #include "tg3.h"
69
70 #define DRV_MODULE_NAME         "tg3"
71 #define PFX DRV_MODULE_NAME     ": "
72 #define DRV_MODULE_VERSION      "3.56"
73 #define DRV_MODULE_RELDATE      "Apr 1, 2006"
74
75 #define TG3_DEF_MAC_MODE        0
76 #define TG3_DEF_RX_MODE         0
77 #define TG3_DEF_TX_MODE         0
78 #define TG3_DEF_MSG_ENABLE        \
79         (NETIF_MSG_DRV          | \
80          NETIF_MSG_PROBE        | \
81          NETIF_MSG_LINK         | \
82          NETIF_MSG_TIMER        | \
83          NETIF_MSG_IFDOWN       | \
84          NETIF_MSG_IFUP         | \
85          NETIF_MSG_RX_ERR       | \
86          NETIF_MSG_TX_ERR)
87
88 /* length of time before we decide the hardware is borked,
89  * and dev->tx_timeout() should be called to fix the problem
90  */
91 #define TG3_TX_TIMEOUT                  (5 * HZ)
92
93 /* hardware minimum and maximum for a single frame's data payload */
94 #define TG3_MIN_MTU                     60
95 #define TG3_MAX_MTU(tp) \
96         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
97
98 /* These numbers seem to be hard coded in the NIC firmware somehow.
99  * You can't change the ring sizes, but you can change where you place
100  * them in the NIC onboard memory.
101  */
102 #define TG3_RX_RING_SIZE                512
103 #define TG3_DEF_RX_RING_PENDING         200
104 #define TG3_RX_JUMBO_RING_SIZE          256
105 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
106
107 /* Do not place this n-ring entries value into the tp struct itself,
108  * we really want to expose these constants to GCC so that modulo et
109  * al.  operations are done with shifts and masks instead of with
110  * hw multiply/modulo instructions.  Another solution would be to
111  * replace things like '% foo' with '& (foo - 1)'.
112  */
113 #define TG3_RX_RCB_RING_SIZE(tp)        \
114         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
115
116 #define TG3_TX_RING_SIZE                512
117 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
118
119 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
120                                  TG3_RX_RING_SIZE)
121 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122                                  TG3_RX_JUMBO_RING_SIZE)
123 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124                                    TG3_RX_RCB_RING_SIZE(tp))
125 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
126                                  TG3_TX_RING_SIZE)
127 #define TX_BUFFS_AVAIL(TP)                                              \
128         ((TP)->tx_pending -                                             \
129          (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
130 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
131
132 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
133 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
134
135 /* minimum number of free TX descriptors required to wake up TX process */
136 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
137
138 /* number of ETHTOOL_GSTATS u64's */
139 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
140
141 #define TG3_NUM_TEST            6
142
143 static char version[] __devinitdata =
144         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
145
146 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
147 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
148 MODULE_LICENSE("GPL");
149 MODULE_VERSION(DRV_MODULE_VERSION);
150
151 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
152 module_param(tg3_debug, int, 0);
153 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
154
155 static struct pci_device_id tg3_pci_tbl[] = {
156         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
157           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
159           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
161           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
163           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
165           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
167           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
169           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
171           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
173           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
175           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
177           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
179           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
181           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
183           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
185           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
187           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
189           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
191           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
193           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
195           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
197           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
199           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
201           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
203           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
205           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
207           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
209           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
211           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
213           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
214         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
215           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
216         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
217           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
218         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
219           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
221           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
223           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754,
225           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M,
227           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755,
229           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M,
231           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787,
233           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M,
235           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
237           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S,
239           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
241           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S,
243           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
244         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
245           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
246         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
247           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
248         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
249           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
250         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
251           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
252         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
253           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
254         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
255           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
256         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
257           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
258         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
259           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
260         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
261           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
262         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
263           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
264         { 0, }
265 };
266
267 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
268
269 static struct {
270         const char string[ETH_GSTRING_LEN];
271 } ethtool_stats_keys[TG3_NUM_STATS] = {
272         { "rx_octets" },
273         { "rx_fragments" },
274         { "rx_ucast_packets" },
275         { "rx_mcast_packets" },
276         { "rx_bcast_packets" },
277         { "rx_fcs_errors" },
278         { "rx_align_errors" },
279         { "rx_xon_pause_rcvd" },
280         { "rx_xoff_pause_rcvd" },
281         { "rx_mac_ctrl_rcvd" },
282         { "rx_xoff_entered" },
283         { "rx_frame_too_long_errors" },
284         { "rx_jabbers" },
285         { "rx_undersize_packets" },
286         { "rx_in_length_errors" },
287         { "rx_out_length_errors" },
288         { "rx_64_or_less_octet_packets" },
289         { "rx_65_to_127_octet_packets" },
290         { "rx_128_to_255_octet_packets" },
291         { "rx_256_to_511_octet_packets" },
292         { "rx_512_to_1023_octet_packets" },
293         { "rx_1024_to_1522_octet_packets" },
294         { "rx_1523_to_2047_octet_packets" },
295         { "rx_2048_to_4095_octet_packets" },
296         { "rx_4096_to_8191_octet_packets" },
297         { "rx_8192_to_9022_octet_packets" },
298
299         { "tx_octets" },
300         { "tx_collisions" },
301
302         { "tx_xon_sent" },
303         { "tx_xoff_sent" },
304         { "tx_flow_control" },
305         { "tx_mac_errors" },
306         { "tx_single_collisions" },
307         { "tx_mult_collisions" },
308         { "tx_deferred" },
309         { "tx_excessive_collisions" },
310         { "tx_late_collisions" },
311         { "tx_collide_2times" },
312         { "tx_collide_3times" },
313         { "tx_collide_4times" },
314         { "tx_collide_5times" },
315         { "tx_collide_6times" },
316         { "tx_collide_7times" },
317         { "tx_collide_8times" },
318         { "tx_collide_9times" },
319         { "tx_collide_10times" },
320         { "tx_collide_11times" },
321         { "tx_collide_12times" },
322         { "tx_collide_13times" },
323         { "tx_collide_14times" },
324         { "tx_collide_15times" },
325         { "tx_ucast_packets" },
326         { "tx_mcast_packets" },
327         { "tx_bcast_packets" },
328         { "tx_carrier_sense_errors" },
329         { "tx_discards" },
330         { "tx_errors" },
331
332         { "dma_writeq_full" },
333         { "dma_write_prioq_full" },
334         { "rxbds_empty" },
335         { "rx_discards" },
336         { "rx_errors" },
337         { "rx_threshold_hit" },
338
339         { "dma_readq_full" },
340         { "dma_read_prioq_full" },
341         { "tx_comp_queue_full" },
342
343         { "ring_set_send_prod_index" },
344         { "ring_status_update" },
345         { "nic_irqs" },
346         { "nic_avoided_irqs" },
347         { "nic_tx_threshold_hit" }
348 };
349
350 static struct {
351         const char string[ETH_GSTRING_LEN];
352 } ethtool_test_keys[TG3_NUM_TEST] = {
353         { "nvram test     (online) " },
354         { "link test      (online) " },
355         { "register test  (offline)" },
356         { "memory test    (offline)" },
357         { "loopback test  (offline)" },
358         { "interrupt test (offline)" },
359 };
360
361 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
362 {
363         writel(val, tp->regs + off);
364 }
365
366 static u32 tg3_read32(struct tg3 *tp, u32 off)
367 {
368         return (readl(tp->regs + off)); 
369 }
370
371 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
372 {
373         unsigned long flags;
374
375         spin_lock_irqsave(&tp->indirect_lock, flags);
376         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
377         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
378         spin_unlock_irqrestore(&tp->indirect_lock, flags);
379 }
380
381 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
382 {
383         writel(val, tp->regs + off);
384         readl(tp->regs + off);
385 }
386
387 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
388 {
389         unsigned long flags;
390         u32 val;
391
392         spin_lock_irqsave(&tp->indirect_lock, flags);
393         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
394         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
395         spin_unlock_irqrestore(&tp->indirect_lock, flags);
396         return val;
397 }
398
399 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
400 {
401         unsigned long flags;
402
403         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
404                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
405                                        TG3_64BIT_REG_LOW, val);
406                 return;
407         }
408         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
409                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
410                                        TG3_64BIT_REG_LOW, val);
411                 return;
412         }
413
414         spin_lock_irqsave(&tp->indirect_lock, flags);
415         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
416         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
417         spin_unlock_irqrestore(&tp->indirect_lock, flags);
418
419         /* In indirect mode when disabling interrupts, we also need
420          * to clear the interrupt bit in the GRC local ctrl register.
421          */
422         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
423             (val == 0x1)) {
424                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
425                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
426         }
427 }
428
429 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
430 {
431         unsigned long flags;
432         u32 val;
433
434         spin_lock_irqsave(&tp->indirect_lock, flags);
435         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
436         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
437         spin_unlock_irqrestore(&tp->indirect_lock, flags);
438         return val;
439 }
440
441 /* usec_wait specifies the wait time in usec when writing to certain registers
442  * where it is unsafe to read back the register without some delay.
443  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
444  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
445  */
446 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
447 {
448         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
449             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
450                 /* Non-posted methods */
451                 tp->write32(tp, off, val);
452         else {
453                 /* Posted method */
454                 tg3_write32(tp, off, val);
455                 if (usec_wait)
456                         udelay(usec_wait);
457                 tp->read32(tp, off);
458         }
459         /* Wait again after the read for the posted method to guarantee that
460          * the wait time is met.
461          */
462         if (usec_wait)
463                 udelay(usec_wait);
464 }
465
466 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
467 {
468         tp->write32_mbox(tp, off, val);
469         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
470             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
471                 tp->read32_mbox(tp, off);
472 }
473
474 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
475 {
476         void __iomem *mbox = tp->regs + off;
477         writel(val, mbox);
478         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
479                 writel(val, mbox);
480         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
481                 readl(mbox);
482 }
483
484 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
485 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
486 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
487 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
488 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
489
490 #define tw32(reg,val)           tp->write32(tp, reg, val)
491 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
492 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
493 #define tr32(reg)               tp->read32(tp, reg)
494
495 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
496 {
497         unsigned long flags;
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
501         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
502
503         /* Always leave this as zero. */
504         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
505         spin_unlock_irqrestore(&tp->indirect_lock, flags);
506 }
507
508 static void tg3_write_mem_fast(struct tg3 *tp, u32 off, u32 val)
509 {
510         /* If no workaround is needed, write to mem space directly */
511         if (tp->write32 != tg3_write_indirect_reg32)
512                 tw32(NIC_SRAM_WIN_BASE + off, val);
513         else
514                 tg3_write_mem(tp, off, val);
515 }
516
517 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
518 {
519         unsigned long flags;
520
521         spin_lock_irqsave(&tp->indirect_lock, flags);
522         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
523         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
524
525         /* Always leave this as zero. */
526         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
527         spin_unlock_irqrestore(&tp->indirect_lock, flags);
528 }
529
530 static void tg3_disable_ints(struct tg3 *tp)
531 {
532         tw32(TG3PCI_MISC_HOST_CTRL,
533              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
534         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
535 }
536
537 static inline void tg3_cond_int(struct tg3 *tp)
538 {
539         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
540             (tp->hw_status->status & SD_STATUS_UPDATED))
541                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
542 }
543
544 static void tg3_enable_ints(struct tg3 *tp)
545 {
546         tp->irq_sync = 0;
547         wmb();
548
549         tw32(TG3PCI_MISC_HOST_CTRL,
550              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
551         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
552                        (tp->last_tag << 24));
553         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
554                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
555                                (tp->last_tag << 24));
556         tg3_cond_int(tp);
557 }
558
559 static inline unsigned int tg3_has_work(struct tg3 *tp)
560 {
561         struct tg3_hw_status *sblk = tp->hw_status;
562         unsigned int work_exists = 0;
563
564         /* check for phy events */
565         if (!(tp->tg3_flags &
566               (TG3_FLAG_USE_LINKCHG_REG |
567                TG3_FLAG_POLL_SERDES))) {
568                 if (sblk->status & SD_STATUS_LINK_CHG)
569                         work_exists = 1;
570         }
571         /* check for RX/TX work to do */
572         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
573             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
574                 work_exists = 1;
575
576         return work_exists;
577 }
578
579 /* tg3_restart_ints
580  *  similar to tg3_enable_ints, but it accurately determines whether there
581  *  is new work pending and can return without flushing the PIO write
582  *  which reenables interrupts 
583  */
584 static void tg3_restart_ints(struct tg3 *tp)
585 {
586         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
587                      tp->last_tag << 24);
588         mmiowb();
589
590         /* When doing tagged status, this work check is unnecessary.
591          * The last_tag we write above tells the chip which piece of
592          * work we've completed.
593          */
594         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
595             tg3_has_work(tp))
596                 tw32(HOSTCC_MODE, tp->coalesce_mode |
597                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
598 }
599
600 static inline void tg3_netif_stop(struct tg3 *tp)
601 {
602         tp->dev->trans_start = jiffies; /* prevent tx timeout */
603         netif_poll_disable(tp->dev);
604         netif_tx_disable(tp->dev);
605 }
606
607 static inline void tg3_netif_start(struct tg3 *tp)
608 {
609         netif_wake_queue(tp->dev);
610         /* NOTE: unconditional netif_wake_queue is only appropriate
611          * so long as all callers are assured to have free tx slots
612          * (such as after tg3_init_hw)
613          */
614         netif_poll_enable(tp->dev);
615         tp->hw_status->status |= SD_STATUS_UPDATED;
616         tg3_enable_ints(tp);
617 }
618
619 static void tg3_switch_clocks(struct tg3 *tp)
620 {
621         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
622         u32 orig_clock_ctrl;
623
624         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
625                 return;
626
627         orig_clock_ctrl = clock_ctrl;
628         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
629                        CLOCK_CTRL_CLKRUN_OENABLE |
630                        0x1f);
631         tp->pci_clock_ctrl = clock_ctrl;
632
633         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
634                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
635                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
636                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
637                 }
638         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
639                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
640                             clock_ctrl |
641                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
642                             40);
643                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
644                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
645                             40);
646         }
647         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
648 }
649
650 #define PHY_BUSY_LOOPS  5000
651
652 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
653 {
654         u32 frame_val;
655         unsigned int loops;
656         int ret;
657
658         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
659                 tw32_f(MAC_MI_MODE,
660                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
661                 udelay(80);
662         }
663
664         *val = 0x0;
665
666         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
667                       MI_COM_PHY_ADDR_MASK);
668         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
669                       MI_COM_REG_ADDR_MASK);
670         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
671         
672         tw32_f(MAC_MI_COM, frame_val);
673
674         loops = PHY_BUSY_LOOPS;
675         while (loops != 0) {
676                 udelay(10);
677                 frame_val = tr32(MAC_MI_COM);
678
679                 if ((frame_val & MI_COM_BUSY) == 0) {
680                         udelay(5);
681                         frame_val = tr32(MAC_MI_COM);
682                         break;
683                 }
684                 loops -= 1;
685         }
686
687         ret = -EBUSY;
688         if (loops != 0) {
689                 *val = frame_val & MI_COM_DATA_MASK;
690                 ret = 0;
691         }
692
693         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
694                 tw32_f(MAC_MI_MODE, tp->mi_mode);
695                 udelay(80);
696         }
697
698         return ret;
699 }
700
701 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
702 {
703         u32 frame_val;
704         unsigned int loops;
705         int ret;
706
707         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
708                 tw32_f(MAC_MI_MODE,
709                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
710                 udelay(80);
711         }
712
713         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
714                       MI_COM_PHY_ADDR_MASK);
715         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
716                       MI_COM_REG_ADDR_MASK);
717         frame_val |= (val & MI_COM_DATA_MASK);
718         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
719         
720         tw32_f(MAC_MI_COM, frame_val);
721
722         loops = PHY_BUSY_LOOPS;
723         while (loops != 0) {
724                 udelay(10);
725                 frame_val = tr32(MAC_MI_COM);
726                 if ((frame_val & MI_COM_BUSY) == 0) {
727                         udelay(5);
728                         frame_val = tr32(MAC_MI_COM);
729                         break;
730                 }
731                 loops -= 1;
732         }
733
734         ret = -EBUSY;
735         if (loops != 0)
736                 ret = 0;
737
738         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
739                 tw32_f(MAC_MI_MODE, tp->mi_mode);
740                 udelay(80);
741         }
742
743         return ret;
744 }
745
746 static void tg3_phy_set_wirespeed(struct tg3 *tp)
747 {
748         u32 val;
749
750         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
751                 return;
752
753         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
754             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
755                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
756                              (val | (1 << 15) | (1 << 4)));
757 }
758
759 static int tg3_bmcr_reset(struct tg3 *tp)
760 {
761         u32 phy_control;
762         int limit, err;
763
764         /* OK, reset it, and poll the BMCR_RESET bit until it
765          * clears or we time out.
766          */
767         phy_control = BMCR_RESET;
768         err = tg3_writephy(tp, MII_BMCR, phy_control);
769         if (err != 0)
770                 return -EBUSY;
771
772         limit = 5000;
773         while (limit--) {
774                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
775                 if (err != 0)
776                         return -EBUSY;
777
778                 if ((phy_control & BMCR_RESET) == 0) {
779                         udelay(40);
780                         break;
781                 }
782                 udelay(10);
783         }
784         if (limit <= 0)
785                 return -EBUSY;
786
787         return 0;
788 }
789
790 static int tg3_wait_macro_done(struct tg3 *tp)
791 {
792         int limit = 100;
793
794         while (limit--) {
795                 u32 tmp32;
796
797                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
798                         if ((tmp32 & 0x1000) == 0)
799                                 break;
800                 }
801         }
802         if (limit <= 0)
803                 return -EBUSY;
804
805         return 0;
806 }
807
808 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
809 {
810         static const u32 test_pat[4][6] = {
811         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
812         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
813         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
814         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
815         };
816         int chan;
817
818         for (chan = 0; chan < 4; chan++) {
819                 int i;
820
821                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
822                              (chan * 0x2000) | 0x0200);
823                 tg3_writephy(tp, 0x16, 0x0002);
824
825                 for (i = 0; i < 6; i++)
826                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
827                                      test_pat[chan][i]);
828
829                 tg3_writephy(tp, 0x16, 0x0202);
830                 if (tg3_wait_macro_done(tp)) {
831                         *resetp = 1;
832                         return -EBUSY;
833                 }
834
835                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
836                              (chan * 0x2000) | 0x0200);
837                 tg3_writephy(tp, 0x16, 0x0082);
838                 if (tg3_wait_macro_done(tp)) {
839                         *resetp = 1;
840                         return -EBUSY;
841                 }
842
843                 tg3_writephy(tp, 0x16, 0x0802);
844                 if (tg3_wait_macro_done(tp)) {
845                         *resetp = 1;
846                         return -EBUSY;
847                 }
848
849                 for (i = 0; i < 6; i += 2) {
850                         u32 low, high;
851
852                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
853                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
854                             tg3_wait_macro_done(tp)) {
855                                 *resetp = 1;
856                                 return -EBUSY;
857                         }
858                         low &= 0x7fff;
859                         high &= 0x000f;
860                         if (low != test_pat[chan][i] ||
861                             high != test_pat[chan][i+1]) {
862                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
863                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
864                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
865
866                                 return -EBUSY;
867                         }
868                 }
869         }
870
871         return 0;
872 }
873
874 static int tg3_phy_reset_chanpat(struct tg3 *tp)
875 {
876         int chan;
877
878         for (chan = 0; chan < 4; chan++) {
879                 int i;
880
881                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
882                              (chan * 0x2000) | 0x0200);
883                 tg3_writephy(tp, 0x16, 0x0002);
884                 for (i = 0; i < 6; i++)
885                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
886                 tg3_writephy(tp, 0x16, 0x0202);
887                 if (tg3_wait_macro_done(tp))
888                         return -EBUSY;
889         }
890
891         return 0;
892 }
893
894 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
895 {
896         u32 reg32, phy9_orig;
897         int retries, do_phy_reset, err;
898
899         retries = 10;
900         do_phy_reset = 1;
901         do {
902                 if (do_phy_reset) {
903                         err = tg3_bmcr_reset(tp);
904                         if (err)
905                                 return err;
906                         do_phy_reset = 0;
907                 }
908
909                 /* Disable transmitter and interrupt.  */
910                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
911                         continue;
912
913                 reg32 |= 0x3000;
914                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
915
916                 /* Set full-duplex, 1000 mbps.  */
917                 tg3_writephy(tp, MII_BMCR,
918                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
919
920                 /* Set to master mode.  */
921                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
922                         continue;
923
924                 tg3_writephy(tp, MII_TG3_CTRL,
925                              (MII_TG3_CTRL_AS_MASTER |
926                               MII_TG3_CTRL_ENABLE_AS_MASTER));
927
928                 /* Enable SM_DSP_CLOCK and 6dB.  */
929                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
930
931                 /* Block the PHY control access.  */
932                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
933                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
934
935                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
936                 if (!err)
937                         break;
938         } while (--retries);
939
940         err = tg3_phy_reset_chanpat(tp);
941         if (err)
942                 return err;
943
944         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
945         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
946
947         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
948         tg3_writephy(tp, 0x16, 0x0000);
949
950         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
951             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
952                 /* Set Extended packet length bit for jumbo frames */
953                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
954         }
955         else {
956                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
957         }
958
959         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
960
961         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
962                 reg32 &= ~0x3000;
963                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
964         } else if (!err)
965                 err = -EBUSY;
966
967         return err;
968 }
969
970 /* This will reset the tigon3 PHY if there is no valid
971  * link unless the FORCE argument is non-zero.
972  */
973 static int tg3_phy_reset(struct tg3 *tp)
974 {
975         u32 phy_status;
976         int err;
977
978         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
979         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
980         if (err != 0)
981                 return -EBUSY;
982
983         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
984             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
985             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
986                 err = tg3_phy_reset_5703_4_5(tp);
987                 if (err)
988                         return err;
989                 goto out;
990         }
991
992         err = tg3_bmcr_reset(tp);
993         if (err)
994                 return err;
995
996 out:
997         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
998                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
999                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1000                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1001                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1002                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1003                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1004         }
1005         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1006                 tg3_writephy(tp, 0x1c, 0x8d68);
1007                 tg3_writephy(tp, 0x1c, 0x8d68);
1008         }
1009         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1010                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1011                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1012                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1013                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1014                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1015                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1016                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1017                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1018         }
1019         /* Set Extended packet length bit (bit 14) on all chips that */
1020         /* support jumbo frames */
1021         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1022                 /* Cannot do read-modify-write on 5401 */
1023                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1024         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1025                 u32 phy_reg;
1026
1027                 /* Set bit 14 with read-modify-write to preserve other bits */
1028                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1029                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1030                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1031         }
1032
1033         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1034          * jumbo frames transmission.
1035          */
1036         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1037                 u32 phy_reg;
1038
1039                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1040                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1041                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1042         }
1043
1044         tg3_phy_set_wirespeed(tp);
1045         return 0;
1046 }
1047
1048 static void tg3_frob_aux_power(struct tg3 *tp)
1049 {
1050         struct tg3 *tp_peer = tp;
1051
1052         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1053                 return;
1054
1055         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1056             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1057                 struct net_device *dev_peer;
1058
1059                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1060                 /* remove_one() may have been run on the peer. */
1061                 if (!dev_peer)
1062                         tp_peer = tp;
1063                 else
1064                         tp_peer = netdev_priv(dev_peer);
1065         }
1066
1067         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1068             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1069             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1070             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1071                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1072                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1073                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1074                                     (GRC_LCLCTRL_GPIO_OE0 |
1075                                      GRC_LCLCTRL_GPIO_OE1 |
1076                                      GRC_LCLCTRL_GPIO_OE2 |
1077                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1078                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1079                                     100);
1080                 } else {
1081                         u32 no_gpio2;
1082                         u32 grc_local_ctrl = 0;
1083
1084                         if (tp_peer != tp &&
1085                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1086                                 return;
1087
1088                         /* Workaround to prevent overdrawing Amps. */
1089                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1090                             ASIC_REV_5714) {
1091                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1092                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1093                                             grc_local_ctrl, 100);
1094                         }
1095
1096                         /* On 5753 and variants, GPIO2 cannot be used. */
1097                         no_gpio2 = tp->nic_sram_data_cfg &
1098                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1099
1100                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1101                                          GRC_LCLCTRL_GPIO_OE1 |
1102                                          GRC_LCLCTRL_GPIO_OE2 |
1103                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1104                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1105                         if (no_gpio2) {
1106                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1107                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1108                         }
1109                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1110                                                     grc_local_ctrl, 100);
1111
1112                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1113
1114                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1115                                                     grc_local_ctrl, 100);
1116
1117                         if (!no_gpio2) {
1118                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1119                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1120                                             grc_local_ctrl, 100);
1121                         }
1122                 }
1123         } else {
1124                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1125                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1126                         if (tp_peer != tp &&
1127                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1128                                 return;
1129
1130                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1131                                     (GRC_LCLCTRL_GPIO_OE1 |
1132                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1133
1134                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1135                                     GRC_LCLCTRL_GPIO_OE1, 100);
1136
1137                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1138                                     (GRC_LCLCTRL_GPIO_OE1 |
1139                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1140                 }
1141         }
1142 }
1143
1144 static int tg3_setup_phy(struct tg3 *, int);
1145
1146 #define RESET_KIND_SHUTDOWN     0
1147 #define RESET_KIND_INIT         1
1148 #define RESET_KIND_SUSPEND      2
1149
1150 static void tg3_write_sig_post_reset(struct tg3 *, int);
1151 static int tg3_halt_cpu(struct tg3 *, u32);
1152 static int tg3_nvram_lock(struct tg3 *);
1153 static void tg3_nvram_unlock(struct tg3 *);
1154
1155 static void tg3_power_down_phy(struct tg3 *tp)
1156 {
1157         /* The PHY should not be powered down on some chips because
1158          * of bugs.
1159          */
1160         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1161             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1162             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1163              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1164                 return;
1165         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1166 }
1167
1168 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1169 {
1170         u32 misc_host_ctrl;
1171         u16 power_control, power_caps;
1172         int pm = tp->pm_cap;
1173
1174         /* Make sure register accesses (indirect or otherwise)
1175          * will function correctly.
1176          */
1177         pci_write_config_dword(tp->pdev,
1178                                TG3PCI_MISC_HOST_CTRL,
1179                                tp->misc_host_ctrl);
1180
1181         pci_read_config_word(tp->pdev,
1182                              pm + PCI_PM_CTRL,
1183                              &power_control);
1184         power_control |= PCI_PM_CTRL_PME_STATUS;
1185         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1186         switch (state) {
1187         case PCI_D0:
1188                 power_control |= 0;
1189                 pci_write_config_word(tp->pdev,
1190                                       pm + PCI_PM_CTRL,
1191                                       power_control);
1192                 udelay(100);    /* Delay after power state change */
1193
1194                 /* Switch out of Vaux if it is not a LOM */
1195                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1196                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1197
1198                 return 0;
1199
1200         case PCI_D1:
1201                 power_control |= 1;
1202                 break;
1203
1204         case PCI_D2:
1205                 power_control |= 2;
1206                 break;
1207
1208         case PCI_D3hot:
1209                 power_control |= 3;
1210                 break;
1211
1212         default:
1213                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1214                        "requested.\n",
1215                        tp->dev->name, state);
1216                 return -EINVAL;
1217         };
1218
1219         power_control |= PCI_PM_CTRL_PME_ENABLE;
1220
1221         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1222         tw32(TG3PCI_MISC_HOST_CTRL,
1223              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1224
1225         if (tp->link_config.phy_is_low_power == 0) {
1226                 tp->link_config.phy_is_low_power = 1;
1227                 tp->link_config.orig_speed = tp->link_config.speed;
1228                 tp->link_config.orig_duplex = tp->link_config.duplex;
1229                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1230         }
1231
1232         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1233                 tp->link_config.speed = SPEED_10;
1234                 tp->link_config.duplex = DUPLEX_HALF;
1235                 tp->link_config.autoneg = AUTONEG_ENABLE;
1236                 tg3_setup_phy(tp, 0);
1237         }
1238
1239         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1240                 int i;
1241                 u32 val;
1242
1243                 for (i = 0; i < 200; i++) {
1244                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1245                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1246                                 break;
1247                         msleep(1);
1248                 }
1249         }
1250         tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1251                                              WOL_DRV_STATE_SHUTDOWN |
1252                                              WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1253
1254         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1255
1256         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1257                 u32 mac_mode;
1258
1259                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1260                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1261                         udelay(40);
1262
1263                         mac_mode = MAC_MODE_PORT_MODE_MII;
1264
1265                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1266                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1267                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1268                 } else {
1269                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1270                 }
1271
1272                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1273                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1274
1275                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1276                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1277                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1278
1279                 tw32_f(MAC_MODE, mac_mode);
1280                 udelay(100);
1281
1282                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1283                 udelay(10);
1284         }
1285
1286         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1287             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1288              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1289                 u32 base_val;
1290
1291                 base_val = tp->pci_clock_ctrl;
1292                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1293                              CLOCK_CTRL_TXCLK_DISABLE);
1294
1295                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1296                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1297         } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1298                 /* do nothing */
1299         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1300                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1301                 u32 newbits1, newbits2;
1302
1303                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1304                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1305                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1306                                     CLOCK_CTRL_TXCLK_DISABLE |
1307                                     CLOCK_CTRL_ALTCLK);
1308                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1309                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1310                         newbits1 = CLOCK_CTRL_625_CORE;
1311                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1312                 } else {
1313                         newbits1 = CLOCK_CTRL_ALTCLK;
1314                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1315                 }
1316
1317                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1318                             40);
1319
1320                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1321                             40);
1322
1323                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1324                         u32 newbits3;
1325
1326                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1327                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1328                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1329                                             CLOCK_CTRL_TXCLK_DISABLE |
1330                                             CLOCK_CTRL_44MHZ_CORE);
1331                         } else {
1332                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1333                         }
1334
1335                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1336                                     tp->pci_clock_ctrl | newbits3, 40);
1337                 }
1338         }
1339
1340         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1341             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1342                 /* Turn off the PHY */
1343                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1344                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1345                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1346                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1347                         tg3_power_down_phy(tp);
1348                 }
1349         }
1350
1351         tg3_frob_aux_power(tp);
1352
1353         /* Workaround for unstable PLL clock */
1354         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1355             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1356                 u32 val = tr32(0x7d00);
1357
1358                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1359                 tw32(0x7d00, val);
1360                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1361                         int err;
1362
1363                         err = tg3_nvram_lock(tp);
1364                         tg3_halt_cpu(tp, RX_CPU_BASE);
1365                         if (!err)
1366                                 tg3_nvram_unlock(tp);
1367                 }
1368         }
1369
1370         /* Finally, set the new power state. */
1371         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1372         udelay(100);    /* Delay after power state change */
1373
1374         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1375
1376         return 0;
1377 }
1378
1379 static void tg3_link_report(struct tg3 *tp)
1380 {
1381         if (!netif_carrier_ok(tp->dev)) {
1382                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1383         } else {
1384                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1385                        tp->dev->name,
1386                        (tp->link_config.active_speed == SPEED_1000 ?
1387                         1000 :
1388                         (tp->link_config.active_speed == SPEED_100 ?
1389                          100 : 10)),
1390                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1391                         "full" : "half"));
1392
1393                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1394                        "%s for RX.\n",
1395                        tp->dev->name,
1396                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1397                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1398         }
1399 }
1400
1401 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1402 {
1403         u32 new_tg3_flags = 0;
1404         u32 old_rx_mode = tp->rx_mode;
1405         u32 old_tx_mode = tp->tx_mode;
1406
1407         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1408
1409                 /* Convert 1000BaseX flow control bits to 1000BaseT
1410                  * bits before resolving flow control.
1411                  */
1412                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1413                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1414                                        ADVERTISE_PAUSE_ASYM);
1415                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1416
1417                         if (local_adv & ADVERTISE_1000XPAUSE)
1418                                 local_adv |= ADVERTISE_PAUSE_CAP;
1419                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1420                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1421                         if (remote_adv & LPA_1000XPAUSE)
1422                                 remote_adv |= LPA_PAUSE_CAP;
1423                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1424                                 remote_adv |= LPA_PAUSE_ASYM;
1425                 }
1426
1427                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1428                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1429                                 if (remote_adv & LPA_PAUSE_CAP)
1430                                         new_tg3_flags |=
1431                                                 (TG3_FLAG_RX_PAUSE |
1432                                                 TG3_FLAG_TX_PAUSE);
1433                                 else if (remote_adv & LPA_PAUSE_ASYM)
1434                                         new_tg3_flags |=
1435                                                 (TG3_FLAG_RX_PAUSE);
1436                         } else {
1437                                 if (remote_adv & LPA_PAUSE_CAP)
1438                                         new_tg3_flags |=
1439                                                 (TG3_FLAG_RX_PAUSE |
1440                                                 TG3_FLAG_TX_PAUSE);
1441                         }
1442                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1443                         if ((remote_adv & LPA_PAUSE_CAP) &&
1444                         (remote_adv & LPA_PAUSE_ASYM))
1445                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1446                 }
1447
1448                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1449                 tp->tg3_flags |= new_tg3_flags;
1450         } else {
1451                 new_tg3_flags = tp->tg3_flags;
1452         }
1453
1454         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1455                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1456         else
1457                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1458
1459         if (old_rx_mode != tp->rx_mode) {
1460                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1461         }
1462         
1463         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1464                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1465         else
1466                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1467
1468         if (old_tx_mode != tp->tx_mode) {
1469                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1470         }
1471 }
1472
1473 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1474 {
1475         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1476         case MII_TG3_AUX_STAT_10HALF:
1477                 *speed = SPEED_10;
1478                 *duplex = DUPLEX_HALF;
1479                 break;
1480
1481         case MII_TG3_AUX_STAT_10FULL:
1482                 *speed = SPEED_10;
1483                 *duplex = DUPLEX_FULL;
1484                 break;
1485
1486         case MII_TG3_AUX_STAT_100HALF:
1487                 *speed = SPEED_100;
1488                 *duplex = DUPLEX_HALF;
1489                 break;
1490
1491         case MII_TG3_AUX_STAT_100FULL:
1492                 *speed = SPEED_100;
1493                 *duplex = DUPLEX_FULL;
1494                 break;
1495
1496         case MII_TG3_AUX_STAT_1000HALF:
1497                 *speed = SPEED_1000;
1498                 *duplex = DUPLEX_HALF;
1499                 break;
1500
1501         case MII_TG3_AUX_STAT_1000FULL:
1502                 *speed = SPEED_1000;
1503                 *duplex = DUPLEX_FULL;
1504                 break;
1505
1506         default:
1507                 *speed = SPEED_INVALID;
1508                 *duplex = DUPLEX_INVALID;
1509                 break;
1510         };
1511 }
1512
1513 static void tg3_phy_copper_begin(struct tg3 *tp)
1514 {
1515         u32 new_adv;
1516         int i;
1517
1518         if (tp->link_config.phy_is_low_power) {
1519                 /* Entering low power mode.  Disable gigabit and
1520                  * 100baseT advertisements.
1521                  */
1522                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1523
1524                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1525                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1526                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1527                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1528
1529                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1530         } else if (tp->link_config.speed == SPEED_INVALID) {
1531                 tp->link_config.advertising =
1532                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1533                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1534                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1535                          ADVERTISED_Autoneg | ADVERTISED_MII);
1536
1537                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1538                         tp->link_config.advertising &=
1539                                 ~(ADVERTISED_1000baseT_Half |
1540                                   ADVERTISED_1000baseT_Full);
1541
1542                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1543                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1544                         new_adv |= ADVERTISE_10HALF;
1545                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1546                         new_adv |= ADVERTISE_10FULL;
1547                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1548                         new_adv |= ADVERTISE_100HALF;
1549                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1550                         new_adv |= ADVERTISE_100FULL;
1551                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1552
1553                 if (tp->link_config.advertising &
1554                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1555                         new_adv = 0;
1556                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1557                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1558                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1559                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1560                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1561                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1562                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1563                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1564                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1565                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1566                 } else {
1567                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1568                 }
1569         } else {
1570                 /* Asking for a specific link mode. */
1571                 if (tp->link_config.speed == SPEED_1000) {
1572                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1573                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1574
1575                         if (tp->link_config.duplex == DUPLEX_FULL)
1576                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1577                         else
1578                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1579                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1580                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1581                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1582                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1583                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1584                 } else {
1585                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1586
1587                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1588                         if (tp->link_config.speed == SPEED_100) {
1589                                 if (tp->link_config.duplex == DUPLEX_FULL)
1590                                         new_adv |= ADVERTISE_100FULL;
1591                                 else
1592                                         new_adv |= ADVERTISE_100HALF;
1593                         } else {
1594                                 if (tp->link_config.duplex == DUPLEX_FULL)
1595                                         new_adv |= ADVERTISE_10FULL;
1596                                 else
1597                                         new_adv |= ADVERTISE_10HALF;
1598                         }
1599                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1600                 }
1601         }
1602
1603         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1604             tp->link_config.speed != SPEED_INVALID) {
1605                 u32 bmcr, orig_bmcr;
1606
1607                 tp->link_config.active_speed = tp->link_config.speed;
1608                 tp->link_config.active_duplex = tp->link_config.duplex;
1609
1610                 bmcr = 0;
1611                 switch (tp->link_config.speed) {
1612                 default:
1613                 case SPEED_10:
1614                         break;
1615
1616                 case SPEED_100:
1617                         bmcr |= BMCR_SPEED100;
1618                         break;
1619
1620                 case SPEED_1000:
1621                         bmcr |= TG3_BMCR_SPEED1000;
1622                         break;
1623                 };
1624
1625                 if (tp->link_config.duplex == DUPLEX_FULL)
1626                         bmcr |= BMCR_FULLDPLX;
1627
1628                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1629                     (bmcr != orig_bmcr)) {
1630                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1631                         for (i = 0; i < 1500; i++) {
1632                                 u32 tmp;
1633
1634                                 udelay(10);
1635                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1636                                     tg3_readphy(tp, MII_BMSR, &tmp))
1637                                         continue;
1638                                 if (!(tmp & BMSR_LSTATUS)) {
1639                                         udelay(40);
1640                                         break;
1641                                 }
1642                         }
1643                         tg3_writephy(tp, MII_BMCR, bmcr);
1644                         udelay(40);
1645                 }
1646         } else {
1647                 tg3_writephy(tp, MII_BMCR,
1648                              BMCR_ANENABLE | BMCR_ANRESTART);
1649         }
1650 }
1651
1652 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1653 {
1654         int err;
1655
1656         /* Turn off tap power management. */
1657         /* Set Extended packet length bit */
1658         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1659
1660         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1661         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1662
1663         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1664         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1665
1666         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1667         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1668
1669         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1670         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1671
1672         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1673         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1674
1675         udelay(40);
1676
1677         return err;
1678 }
1679
1680 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1681 {
1682         u32 adv_reg, all_mask;
1683
1684         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1685                 return 0;
1686
1687         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1688                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1689         if ((adv_reg & all_mask) != all_mask)
1690                 return 0;
1691         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1692                 u32 tg3_ctrl;
1693
1694                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1695                         return 0;
1696
1697                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1698                             MII_TG3_CTRL_ADV_1000_FULL);
1699                 if ((tg3_ctrl & all_mask) != all_mask)
1700                         return 0;
1701         }
1702         return 1;
1703 }
1704
1705 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1706 {
1707         int current_link_up;
1708         u32 bmsr, dummy;
1709         u16 current_speed;
1710         u8 current_duplex;
1711         int i, err;
1712
1713         tw32(MAC_EVENT, 0);
1714
1715         tw32_f(MAC_STATUS,
1716              (MAC_STATUS_SYNC_CHANGED |
1717               MAC_STATUS_CFG_CHANGED |
1718               MAC_STATUS_MI_COMPLETION |
1719               MAC_STATUS_LNKSTATE_CHANGED));
1720         udelay(40);
1721
1722         tp->mi_mode = MAC_MI_MODE_BASE;
1723         tw32_f(MAC_MI_MODE, tp->mi_mode);
1724         udelay(80);
1725
1726         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1727
1728         /* Some third-party PHYs need to be reset on link going
1729          * down.
1730          */
1731         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1732              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1733              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1734             netif_carrier_ok(tp->dev)) {
1735                 tg3_readphy(tp, MII_BMSR, &bmsr);
1736                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1737                     !(bmsr & BMSR_LSTATUS))
1738                         force_reset = 1;
1739         }
1740         if (force_reset)
1741                 tg3_phy_reset(tp);
1742
1743         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1744                 tg3_readphy(tp, MII_BMSR, &bmsr);
1745                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1746                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1747                         bmsr = 0;
1748
1749                 if (!(bmsr & BMSR_LSTATUS)) {
1750                         err = tg3_init_5401phy_dsp(tp);
1751                         if (err)
1752                                 return err;
1753
1754                         tg3_readphy(tp, MII_BMSR, &bmsr);
1755                         for (i = 0; i < 1000; i++) {
1756                                 udelay(10);
1757                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1758                                     (bmsr & BMSR_LSTATUS)) {
1759                                         udelay(40);
1760                                         break;
1761                                 }
1762                         }
1763
1764                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1765                             !(bmsr & BMSR_LSTATUS) &&
1766                             tp->link_config.active_speed == SPEED_1000) {
1767                                 err = tg3_phy_reset(tp);
1768                                 if (!err)
1769                                         err = tg3_init_5401phy_dsp(tp);
1770                                 if (err)
1771                                         return err;
1772                         }
1773                 }
1774         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1775                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1776                 /* 5701 {A0,B0} CRC bug workaround */
1777                 tg3_writephy(tp, 0x15, 0x0a75);
1778                 tg3_writephy(tp, 0x1c, 0x8c68);
1779                 tg3_writephy(tp, 0x1c, 0x8d68);
1780                 tg3_writephy(tp, 0x1c, 0x8c68);
1781         }
1782
1783         /* Clear pending interrupts... */
1784         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1785         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1786
1787         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1788                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1789         else
1790                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1791
1792         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1793             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1794                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1795                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1796                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1797                 else
1798                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1799         }
1800
1801         current_link_up = 0;
1802         current_speed = SPEED_INVALID;
1803         current_duplex = DUPLEX_INVALID;
1804
1805         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1806                 u32 val;
1807
1808                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1809                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1810                 if (!(val & (1 << 10))) {
1811                         val |= (1 << 10);
1812                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1813                         goto relink;
1814                 }
1815         }
1816
1817         bmsr = 0;
1818         for (i = 0; i < 100; i++) {
1819                 tg3_readphy(tp, MII_BMSR, &bmsr);
1820                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1821                     (bmsr & BMSR_LSTATUS))
1822                         break;
1823                 udelay(40);
1824         }
1825
1826         if (bmsr & BMSR_LSTATUS) {
1827                 u32 aux_stat, bmcr;
1828
1829                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1830                 for (i = 0; i < 2000; i++) {
1831                         udelay(10);
1832                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1833                             aux_stat)
1834                                 break;
1835                 }
1836
1837                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1838                                              &current_speed,
1839                                              &current_duplex);
1840
1841                 bmcr = 0;
1842                 for (i = 0; i < 200; i++) {
1843                         tg3_readphy(tp, MII_BMCR, &bmcr);
1844                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1845                                 continue;
1846                         if (bmcr && bmcr != 0x7fff)
1847                                 break;
1848                         udelay(10);
1849                 }
1850
1851                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1852                         if (bmcr & BMCR_ANENABLE) {
1853                                 current_link_up = 1;
1854
1855                                 /* Force autoneg restart if we are exiting
1856                                  * low power mode.
1857                                  */
1858                                 if (!tg3_copper_is_advertising_all(tp))
1859                                         current_link_up = 0;
1860                         } else {
1861                                 current_link_up = 0;
1862                         }
1863                 } else {
1864                         if (!(bmcr & BMCR_ANENABLE) &&
1865                             tp->link_config.speed == current_speed &&
1866                             tp->link_config.duplex == current_duplex) {
1867                                 current_link_up = 1;
1868                         } else {
1869                                 current_link_up = 0;
1870                         }
1871                 }
1872
1873                 tp->link_config.active_speed = current_speed;
1874                 tp->link_config.active_duplex = current_duplex;
1875         }
1876
1877         if (current_link_up == 1 &&
1878             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1879             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1880                 u32 local_adv, remote_adv;
1881
1882                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1883                         local_adv = 0;
1884                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1885
1886                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1887                         remote_adv = 0;
1888
1889                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1890
1891                 /* If we are not advertising full pause capability,
1892                  * something is wrong.  Bring the link down and reconfigure.
1893                  */
1894                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1895                         current_link_up = 0;
1896                 } else {
1897                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1898                 }
1899         }
1900 relink:
1901         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1902                 u32 tmp;
1903
1904                 tg3_phy_copper_begin(tp);
1905
1906                 tg3_readphy(tp, MII_BMSR, &tmp);
1907                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1908                     (tmp & BMSR_LSTATUS))
1909                         current_link_up = 1;
1910         }
1911
1912         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1913         if (current_link_up == 1) {
1914                 if (tp->link_config.active_speed == SPEED_100 ||
1915                     tp->link_config.active_speed == SPEED_10)
1916                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1917                 else
1918                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1919         } else
1920                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1921
1922         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1923         if (tp->link_config.active_duplex == DUPLEX_HALF)
1924                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1925
1926         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1927         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1928                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1929                     (current_link_up == 1 &&
1930                      tp->link_config.active_speed == SPEED_10))
1931                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1932         } else {
1933                 if (current_link_up == 1)
1934                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1935         }
1936
1937         /* ??? Without this setting Netgear GA302T PHY does not
1938          * ??? send/receive packets...
1939          */
1940         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1941             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1942                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1943                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1944                 udelay(80);
1945         }
1946
1947         tw32_f(MAC_MODE, tp->mac_mode);
1948         udelay(40);
1949
1950         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1951                 /* Polled via timer. */
1952                 tw32_f(MAC_EVENT, 0);
1953         } else {
1954                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1955         }
1956         udelay(40);
1957
1958         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1959             current_link_up == 1 &&
1960             tp->link_config.active_speed == SPEED_1000 &&
1961             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1962              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1963                 udelay(120);
1964                 tw32_f(MAC_STATUS,
1965                      (MAC_STATUS_SYNC_CHANGED |
1966                       MAC_STATUS_CFG_CHANGED));
1967                 udelay(40);
1968                 tg3_write_mem(tp,
1969                               NIC_SRAM_FIRMWARE_MBOX,
1970                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1971         }
1972
1973         if (current_link_up != netif_carrier_ok(tp->dev)) {
1974                 if (current_link_up)
1975                         netif_carrier_on(tp->dev);
1976                 else
1977                         netif_carrier_off(tp->dev);
1978                 tg3_link_report(tp);
1979         }
1980
1981         return 0;
1982 }
1983
1984 struct tg3_fiber_aneginfo {
1985         int state;
1986 #define ANEG_STATE_UNKNOWN              0
1987 #define ANEG_STATE_AN_ENABLE            1
1988 #define ANEG_STATE_RESTART_INIT         2
1989 #define ANEG_STATE_RESTART              3
1990 #define ANEG_STATE_DISABLE_LINK_OK      4
1991 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1992 #define ANEG_STATE_ABILITY_DETECT       6
1993 #define ANEG_STATE_ACK_DETECT_INIT      7
1994 #define ANEG_STATE_ACK_DETECT           8
1995 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1996 #define ANEG_STATE_COMPLETE_ACK         10
1997 #define ANEG_STATE_IDLE_DETECT_INIT     11
1998 #define ANEG_STATE_IDLE_DETECT          12
1999 #define ANEG_STATE_LINK_OK              13
2000 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2001 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2002
2003         u32 flags;
2004 #define MR_AN_ENABLE            0x00000001
2005 #define MR_RESTART_AN           0x00000002
2006 #define MR_AN_COMPLETE          0x00000004
2007 #define MR_PAGE_RX              0x00000008
2008 #define MR_NP_LOADED            0x00000010
2009 #define MR_TOGGLE_TX            0x00000020
2010 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2011 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2012 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2013 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2014 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2015 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2016 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2017 #define MR_TOGGLE_RX            0x00002000
2018 #define MR_NP_RX                0x00004000
2019
2020 #define MR_LINK_OK              0x80000000
2021
2022         unsigned long link_time, cur_time;
2023
2024         u32 ability_match_cfg;
2025         int ability_match_count;
2026
2027         char ability_match, idle_match, ack_match;
2028
2029         u32 txconfig, rxconfig;
2030 #define ANEG_CFG_NP             0x00000080
2031 #define ANEG_CFG_ACK            0x00000040
2032 #define ANEG_CFG_RF2            0x00000020
2033 #define ANEG_CFG_RF1            0x00000010
2034 #define ANEG_CFG_PS2            0x00000001
2035 #define ANEG_CFG_PS1            0x00008000
2036 #define ANEG_CFG_HD             0x00004000
2037 #define ANEG_CFG_FD             0x00002000
2038 #define ANEG_CFG_INVAL          0x00001f06
2039
2040 };
2041 #define ANEG_OK         0
2042 #define ANEG_DONE       1
2043 #define ANEG_TIMER_ENAB 2
2044 #define ANEG_FAILED     -1
2045
2046 #define ANEG_STATE_SETTLE_TIME  10000
2047
2048 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2049                                    struct tg3_fiber_aneginfo *ap)
2050 {
2051         unsigned long delta;
2052         u32 rx_cfg_reg;
2053         int ret;
2054
2055         if (ap->state == ANEG_STATE_UNKNOWN) {
2056                 ap->rxconfig = 0;
2057                 ap->link_time = 0;
2058                 ap->cur_time = 0;
2059                 ap->ability_match_cfg = 0;
2060                 ap->ability_match_count = 0;
2061                 ap->ability_match = 0;
2062                 ap->idle_match = 0;
2063                 ap->ack_match = 0;
2064         }
2065         ap->cur_time++;
2066
2067         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2068                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2069
2070                 if (rx_cfg_reg != ap->ability_match_cfg) {
2071                         ap->ability_match_cfg = rx_cfg_reg;
2072                         ap->ability_match = 0;
2073                         ap->ability_match_count = 0;
2074                 } else {
2075                         if (++ap->ability_match_count > 1) {
2076                                 ap->ability_match = 1;
2077                                 ap->ability_match_cfg = rx_cfg_reg;
2078                         }
2079                 }
2080                 if (rx_cfg_reg & ANEG_CFG_ACK)
2081                         ap->ack_match = 1;
2082                 else
2083                         ap->ack_match = 0;
2084
2085                 ap->idle_match = 0;
2086         } else {
2087                 ap->idle_match = 1;
2088                 ap->ability_match_cfg = 0;
2089                 ap->ability_match_count = 0;
2090                 ap->ability_match = 0;
2091                 ap->ack_match = 0;
2092
2093                 rx_cfg_reg = 0;
2094         }
2095
2096         ap->rxconfig = rx_cfg_reg;
2097         ret = ANEG_OK;
2098
2099         switch(ap->state) {
2100         case ANEG_STATE_UNKNOWN:
2101                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2102                         ap->state = ANEG_STATE_AN_ENABLE;
2103
2104                 /* fallthru */
2105         case ANEG_STATE_AN_ENABLE:
2106                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2107                 if (ap->flags & MR_AN_ENABLE) {
2108                         ap->link_time = 0;
2109                         ap->cur_time = 0;
2110                         ap->ability_match_cfg = 0;
2111                         ap->ability_match_count = 0;
2112                         ap->ability_match = 0;
2113                         ap->idle_match = 0;
2114                         ap->ack_match = 0;
2115
2116                         ap->state = ANEG_STATE_RESTART_INIT;
2117                 } else {
2118                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2119                 }
2120                 break;
2121
2122         case ANEG_STATE_RESTART_INIT:
2123                 ap->link_time = ap->cur_time;
2124                 ap->flags &= ~(MR_NP_LOADED);
2125                 ap->txconfig = 0;
2126                 tw32(MAC_TX_AUTO_NEG, 0);
2127                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2128                 tw32_f(MAC_MODE, tp->mac_mode);
2129                 udelay(40);
2130
2131                 ret = ANEG_TIMER_ENAB;
2132                 ap->state = ANEG_STATE_RESTART;
2133
2134                 /* fallthru */
2135         case ANEG_STATE_RESTART:
2136                 delta = ap->cur_time - ap->link_time;
2137                 if (delta > ANEG_STATE_SETTLE_TIME) {
2138                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2139                 } else {
2140                         ret = ANEG_TIMER_ENAB;
2141                 }
2142                 break;
2143
2144         case ANEG_STATE_DISABLE_LINK_OK:
2145                 ret = ANEG_DONE;
2146                 break;
2147
2148         case ANEG_STATE_ABILITY_DETECT_INIT:
2149                 ap->flags &= ~(MR_TOGGLE_TX);
2150                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2151                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2152                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2153                 tw32_f(MAC_MODE, tp->mac_mode);
2154                 udelay(40);
2155
2156                 ap->state = ANEG_STATE_ABILITY_DETECT;
2157                 break;
2158
2159         case ANEG_STATE_ABILITY_DETECT:
2160                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2161                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2162                 }
2163                 break;
2164
2165         case ANEG_STATE_ACK_DETECT_INIT:
2166                 ap->txconfig |= ANEG_CFG_ACK;
2167                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2168                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2169                 tw32_f(MAC_MODE, tp->mac_mode);
2170                 udelay(40);
2171
2172                 ap->state = ANEG_STATE_ACK_DETECT;
2173
2174                 /* fallthru */
2175         case ANEG_STATE_ACK_DETECT:
2176                 if (ap->ack_match != 0) {
2177                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2178                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2179                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2180                         } else {
2181                                 ap->state = ANEG_STATE_AN_ENABLE;
2182                         }
2183                 } else if (ap->ability_match != 0 &&
2184                            ap->rxconfig == 0) {
2185                         ap->state = ANEG_STATE_AN_ENABLE;
2186                 }
2187                 break;
2188
2189         case ANEG_STATE_COMPLETE_ACK_INIT:
2190                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2191                         ret = ANEG_FAILED;
2192                         break;
2193                 }
2194                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2195                                MR_LP_ADV_HALF_DUPLEX |
2196                                MR_LP_ADV_SYM_PAUSE |
2197                                MR_LP_ADV_ASYM_PAUSE |
2198                                MR_LP_ADV_REMOTE_FAULT1 |
2199                                MR_LP_ADV_REMOTE_FAULT2 |
2200                                MR_LP_ADV_NEXT_PAGE |
2201                                MR_TOGGLE_RX |
2202                                MR_NP_RX);
2203                 if (ap->rxconfig & ANEG_CFG_FD)
2204                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2205                 if (ap->rxconfig & ANEG_CFG_HD)
2206                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2207                 if (ap->rxconfig & ANEG_CFG_PS1)
2208                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2209                 if (ap->rxconfig & ANEG_CFG_PS2)
2210                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2211                 if (ap->rxconfig & ANEG_CFG_RF1)
2212                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2213                 if (ap->rxconfig & ANEG_CFG_RF2)
2214                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2215                 if (ap->rxconfig & ANEG_CFG_NP)
2216                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2217
2218                 ap->link_time = ap->cur_time;
2219
2220                 ap->flags ^= (MR_TOGGLE_TX);
2221                 if (ap->rxconfig & 0x0008)
2222                         ap->flags |= MR_TOGGLE_RX;
2223                 if (ap->rxconfig & ANEG_CFG_NP)
2224                         ap->flags |= MR_NP_RX;
2225                 ap->flags |= MR_PAGE_RX;
2226
2227                 ap->state = ANEG_STATE_COMPLETE_ACK;
2228                 ret = ANEG_TIMER_ENAB;
2229                 break;
2230
2231         case ANEG_STATE_COMPLETE_ACK:
2232                 if (ap->ability_match != 0 &&
2233                     ap->rxconfig == 0) {
2234                         ap->state = ANEG_STATE_AN_ENABLE;
2235                         break;
2236                 }
2237                 delta = ap->cur_time - ap->link_time;
2238                 if (delta > ANEG_STATE_SETTLE_TIME) {
2239                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2240                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2241                         } else {
2242                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2243                                     !(ap->flags & MR_NP_RX)) {
2244                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2245                                 } else {
2246                                         ret = ANEG_FAILED;
2247                                 }
2248                         }
2249                 }
2250                 break;
2251
2252         case ANEG_STATE_IDLE_DETECT_INIT:
2253                 ap->link_time = ap->cur_time;
2254                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2255                 tw32_f(MAC_MODE, tp->mac_mode);
2256                 udelay(40);
2257
2258                 ap->state = ANEG_STATE_IDLE_DETECT;
2259                 ret = ANEG_TIMER_ENAB;
2260                 break;
2261
2262         case ANEG_STATE_IDLE_DETECT:
2263                 if (ap->ability_match != 0 &&
2264                     ap->rxconfig == 0) {
2265                         ap->state = ANEG_STATE_AN_ENABLE;
2266                         break;
2267                 }
2268                 delta = ap->cur_time - ap->link_time;
2269                 if (delta > ANEG_STATE_SETTLE_TIME) {
2270                         /* XXX another gem from the Broadcom driver :( */
2271                         ap->state = ANEG_STATE_LINK_OK;
2272                 }
2273                 break;
2274
2275         case ANEG_STATE_LINK_OK:
2276                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2277                 ret = ANEG_DONE;
2278                 break;
2279
2280         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2281                 /* ??? unimplemented */
2282                 break;
2283
2284         case ANEG_STATE_NEXT_PAGE_WAIT:
2285                 /* ??? unimplemented */
2286                 break;
2287
2288         default:
2289                 ret = ANEG_FAILED;
2290                 break;
2291         };
2292
2293         return ret;
2294 }
2295
2296 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2297 {
2298         int res = 0;
2299         struct tg3_fiber_aneginfo aninfo;
2300         int status = ANEG_FAILED;
2301         unsigned int tick;
2302         u32 tmp;
2303
2304         tw32_f(MAC_TX_AUTO_NEG, 0);
2305
2306         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2307         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2308         udelay(40);
2309
2310         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2311         udelay(40);
2312
2313         memset(&aninfo, 0, sizeof(aninfo));
2314         aninfo.flags |= MR_AN_ENABLE;
2315         aninfo.state = ANEG_STATE_UNKNOWN;
2316         aninfo.cur_time = 0;
2317         tick = 0;
2318         while (++tick < 195000) {
2319                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2320                 if (status == ANEG_DONE || status == ANEG_FAILED)
2321                         break;
2322
2323                 udelay(1);
2324         }
2325
2326         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2327         tw32_f(MAC_MODE, tp->mac_mode);
2328         udelay(40);
2329
2330         *flags = aninfo.flags;
2331
2332         if (status == ANEG_DONE &&
2333             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2334                              MR_LP_ADV_FULL_DUPLEX)))
2335                 res = 1;
2336
2337         return res;
2338 }
2339
2340 static void tg3_init_bcm8002(struct tg3 *tp)
2341 {
2342         u32 mac_status = tr32(MAC_STATUS);
2343         int i;
2344
2345         /* Reset when initting first time or we have a link. */
2346         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2347             !(mac_status & MAC_STATUS_PCS_SYNCED))
2348                 return;
2349
2350         /* Set PLL lock range. */
2351         tg3_writephy(tp, 0x16, 0x8007);
2352
2353         /* SW reset */
2354         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2355
2356         /* Wait for reset to complete. */
2357         /* XXX schedule_timeout() ... */
2358         for (i = 0; i < 500; i++)
2359                 udelay(10);
2360
2361         /* Config mode; select PMA/Ch 1 regs. */
2362         tg3_writephy(tp, 0x10, 0x8411);
2363
2364         /* Enable auto-lock and comdet, select txclk for tx. */
2365         tg3_writephy(tp, 0x11, 0x0a10);
2366
2367         tg3_writephy(tp, 0x18, 0x00a0);
2368         tg3_writephy(tp, 0x16, 0x41ff);
2369
2370         /* Assert and deassert POR. */
2371         tg3_writephy(tp, 0x13, 0x0400);
2372         udelay(40);
2373         tg3_writephy(tp, 0x13, 0x0000);
2374
2375         tg3_writephy(tp, 0x11, 0x0a50);
2376         udelay(40);
2377         tg3_writephy(tp, 0x11, 0x0a10);
2378
2379         /* Wait for signal to stabilize */
2380         /* XXX schedule_timeout() ... */
2381         for (i = 0; i < 15000; i++)
2382                 udelay(10);
2383
2384         /* Deselect the channel register so we can read the PHYID
2385          * later.
2386          */
2387         tg3_writephy(tp, 0x10, 0x8011);
2388 }
2389
2390 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2391 {
2392         u32 sg_dig_ctrl, sg_dig_status;
2393         u32 serdes_cfg, expected_sg_dig_ctrl;
2394         int workaround, port_a;
2395         int current_link_up;
2396
2397         serdes_cfg = 0;
2398         expected_sg_dig_ctrl = 0;
2399         workaround = 0;
2400         port_a = 1;
2401         current_link_up = 0;
2402
2403         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2404             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2405                 workaround = 1;
2406                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2407                         port_a = 0;
2408
2409                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2410                 /* preserve bits 20-23 for voltage regulator */
2411                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2412         }
2413
2414         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2415
2416         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2417                 if (sg_dig_ctrl & (1 << 31)) {
2418                         if (workaround) {
2419                                 u32 val = serdes_cfg;
2420
2421                                 if (port_a)
2422                                         val |= 0xc010000;
2423                                 else
2424                                         val |= 0x4010000;
2425                                 tw32_f(MAC_SERDES_CFG, val);
2426                         }
2427                         tw32_f(SG_DIG_CTRL, 0x01388400);
2428                 }
2429                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2430                         tg3_setup_flow_control(tp, 0, 0);
2431                         current_link_up = 1;
2432                 }
2433                 goto out;
2434         }
2435
2436         /* Want auto-negotiation.  */
2437         expected_sg_dig_ctrl = 0x81388400;
2438
2439         /* Pause capability */
2440         expected_sg_dig_ctrl |= (1 << 11);
2441
2442         /* Asymettric pause */
2443         expected_sg_dig_ctrl |= (1 << 12);
2444
2445         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2446                 if (workaround)
2447                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2448                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2449                 udelay(5);
2450                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2451
2452                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2453         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2454                                  MAC_STATUS_SIGNAL_DET)) {
2455                 int i;
2456
2457                 /* Giver time to negotiate (~200ms) */
2458                 for (i = 0; i < 40000; i++) {
2459                         sg_dig_status = tr32(SG_DIG_STATUS);
2460                         if (sg_dig_status & (0x3))
2461                                 break;
2462                         udelay(5);
2463                 }
2464                 mac_status = tr32(MAC_STATUS);
2465
2466                 if ((sg_dig_status & (1 << 1)) &&
2467                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2468                         u32 local_adv, remote_adv;
2469
2470                         local_adv = ADVERTISE_PAUSE_CAP;
2471                         remote_adv = 0;
2472                         if (sg_dig_status & (1 << 19))
2473                                 remote_adv |= LPA_PAUSE_CAP;
2474                         if (sg_dig_status & (1 << 20))
2475                                 remote_adv |= LPA_PAUSE_ASYM;
2476
2477                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2478                         current_link_up = 1;
2479                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2480                 } else if (!(sg_dig_status & (1 << 1))) {
2481                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2482                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2483                         else {
2484                                 if (workaround) {
2485                                         u32 val = serdes_cfg;
2486
2487                                         if (port_a)
2488                                                 val |= 0xc010000;
2489                                         else
2490                                                 val |= 0x4010000;
2491
2492                                         tw32_f(MAC_SERDES_CFG, val);
2493                                 }
2494
2495                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2496                                 udelay(40);
2497
2498                                 /* Link parallel detection - link is up */
2499                                 /* only if we have PCS_SYNC and not */
2500                                 /* receiving config code words */
2501                                 mac_status = tr32(MAC_STATUS);
2502                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2503                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2504                                         tg3_setup_flow_control(tp, 0, 0);
2505                                         current_link_up = 1;
2506                                 }
2507                         }
2508                 }
2509         }
2510
2511 out:
2512         return current_link_up;
2513 }
2514
2515 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2516 {
2517         int current_link_up = 0;
2518
2519         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2520                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2521                 goto out;
2522         }
2523
2524         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2525                 u32 flags;
2526                 int i;
2527   
2528                 if (fiber_autoneg(tp, &flags)) {
2529                         u32 local_adv, remote_adv;
2530
2531                         local_adv = ADVERTISE_PAUSE_CAP;
2532                         remote_adv = 0;
2533                         if (flags & MR_LP_ADV_SYM_PAUSE)
2534                                 remote_adv |= LPA_PAUSE_CAP;
2535                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2536                                 remote_adv |= LPA_PAUSE_ASYM;
2537
2538                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2539
2540                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2541                         current_link_up = 1;
2542                 }
2543                 for (i = 0; i < 30; i++) {
2544                         udelay(20);
2545                         tw32_f(MAC_STATUS,
2546                                (MAC_STATUS_SYNC_CHANGED |
2547                                 MAC_STATUS_CFG_CHANGED));
2548                         udelay(40);
2549                         if ((tr32(MAC_STATUS) &
2550                              (MAC_STATUS_SYNC_CHANGED |
2551                               MAC_STATUS_CFG_CHANGED)) == 0)
2552                                 break;
2553                 }
2554
2555                 mac_status = tr32(MAC_STATUS);
2556                 if (current_link_up == 0 &&
2557                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2558                     !(mac_status & MAC_STATUS_RCVD_CFG))
2559                         current_link_up = 1;
2560         } else {
2561                 /* Forcing 1000FD link up. */
2562                 current_link_up = 1;
2563                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2564
2565                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2566                 udelay(40);
2567         }
2568
2569 out:
2570         return current_link_up;
2571 }
2572
2573 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2574 {
2575         u32 orig_pause_cfg;
2576         u16 orig_active_speed;
2577         u8 orig_active_duplex;
2578         u32 mac_status;
2579         int current_link_up;
2580         int i;
2581
2582         orig_pause_cfg =
2583                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2584                                   TG3_FLAG_TX_PAUSE));
2585         orig_active_speed = tp->link_config.active_speed;
2586         orig_active_duplex = tp->link_config.active_duplex;
2587
2588         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2589             netif_carrier_ok(tp->dev) &&
2590             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2591                 mac_status = tr32(MAC_STATUS);
2592                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2593                                MAC_STATUS_SIGNAL_DET |
2594                                MAC_STATUS_CFG_CHANGED |
2595                                MAC_STATUS_RCVD_CFG);
2596                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2597                                    MAC_STATUS_SIGNAL_DET)) {
2598                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2599                                             MAC_STATUS_CFG_CHANGED));
2600                         return 0;
2601                 }
2602         }
2603
2604         tw32_f(MAC_TX_AUTO_NEG, 0);
2605
2606         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2607         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2608         tw32_f(MAC_MODE, tp->mac_mode);
2609         udelay(40);
2610
2611         if (tp->phy_id == PHY_ID_BCM8002)
2612                 tg3_init_bcm8002(tp);
2613
2614         /* Enable link change event even when serdes polling.  */
2615         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2616         udelay(40);
2617
2618         current_link_up = 0;
2619         mac_status = tr32(MAC_STATUS);
2620
2621         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2622                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2623         else
2624                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2625
2626         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2627         tw32_f(MAC_MODE, tp->mac_mode);
2628         udelay(40);
2629
2630         tp->hw_status->status =
2631                 (SD_STATUS_UPDATED |
2632                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2633
2634         for (i = 0; i < 100; i++) {
2635                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2636                                     MAC_STATUS_CFG_CHANGED));
2637                 udelay(5);
2638                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2639                                          MAC_STATUS_CFG_CHANGED)) == 0)
2640                         break;
2641         }
2642
2643         mac_status = tr32(MAC_STATUS);
2644         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2645                 current_link_up = 0;
2646                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2647                         tw32_f(MAC_MODE, (tp->mac_mode |
2648                                           MAC_MODE_SEND_CONFIGS));
2649                         udelay(1);
2650                         tw32_f(MAC_MODE, tp->mac_mode);
2651                 }
2652         }
2653
2654         if (current_link_up == 1) {
2655                 tp->link_config.active_speed = SPEED_1000;
2656                 tp->link_config.active_duplex = DUPLEX_FULL;
2657                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2658                                     LED_CTRL_LNKLED_OVERRIDE |
2659                                     LED_CTRL_1000MBPS_ON));
2660         } else {
2661                 tp->link_config.active_speed = SPEED_INVALID;
2662                 tp->link_config.active_duplex = DUPLEX_INVALID;
2663                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2664                                     LED_CTRL_LNKLED_OVERRIDE |
2665                                     LED_CTRL_TRAFFIC_OVERRIDE));
2666         }
2667
2668         if (current_link_up != netif_carrier_ok(tp->dev)) {
2669                 if (current_link_up)
2670                         netif_carrier_on(tp->dev);
2671                 else
2672                         netif_carrier_off(tp->dev);
2673                 tg3_link_report(tp);
2674         } else {
2675                 u32 now_pause_cfg =
2676                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2677                                          TG3_FLAG_TX_PAUSE);
2678                 if (orig_pause_cfg != now_pause_cfg ||
2679                     orig_active_speed != tp->link_config.active_speed ||
2680                     orig_active_duplex != tp->link_config.active_duplex)
2681                         tg3_link_report(tp);
2682         }
2683
2684         return 0;
2685 }
2686
2687 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2688 {
2689         int current_link_up, err = 0;
2690         u32 bmsr, bmcr;
2691         u16 current_speed;
2692         u8 current_duplex;
2693
2694         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2695         tw32_f(MAC_MODE, tp->mac_mode);
2696         udelay(40);
2697
2698         tw32(MAC_EVENT, 0);
2699
2700         tw32_f(MAC_STATUS,
2701              (MAC_STATUS_SYNC_CHANGED |
2702               MAC_STATUS_CFG_CHANGED |
2703               MAC_STATUS_MI_COMPLETION |
2704               MAC_STATUS_LNKSTATE_CHANGED));
2705         udelay(40);
2706
2707         if (force_reset)
2708                 tg3_phy_reset(tp);
2709
2710         current_link_up = 0;
2711         current_speed = SPEED_INVALID;
2712         current_duplex = DUPLEX_INVALID;
2713
2714         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2715         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2716         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2717                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2718                         bmsr |= BMSR_LSTATUS;
2719                 else
2720                         bmsr &= ~BMSR_LSTATUS;
2721         }
2722
2723         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2724
2725         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2726             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2727                 /* do nothing, just check for link up at the end */
2728         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2729                 u32 adv, new_adv;
2730
2731                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2732                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2733                                   ADVERTISE_1000XPAUSE |
2734                                   ADVERTISE_1000XPSE_ASYM |
2735                                   ADVERTISE_SLCT);
2736
2737                 /* Always advertise symmetric PAUSE just like copper */
2738                 new_adv |= ADVERTISE_1000XPAUSE;
2739
2740                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2741                         new_adv |= ADVERTISE_1000XHALF;
2742                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2743                         new_adv |= ADVERTISE_1000XFULL;
2744
2745                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2746                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2747                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2748                         tg3_writephy(tp, MII_BMCR, bmcr);
2749
2750                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2751                         tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2752                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2753
2754                         return err;
2755                 }
2756         } else {
2757                 u32 new_bmcr;
2758
2759                 bmcr &= ~BMCR_SPEED1000;
2760                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2761
2762                 if (tp->link_config.duplex == DUPLEX_FULL)
2763                         new_bmcr |= BMCR_FULLDPLX;
2764
2765                 if (new_bmcr != bmcr) {
2766                         /* BMCR_SPEED1000 is a reserved bit that needs
2767                          * to be set on write.
2768                          */
2769                         new_bmcr |= BMCR_SPEED1000;
2770
2771                         /* Force a linkdown */
2772                         if (netif_carrier_ok(tp->dev)) {
2773                                 u32 adv;
2774
2775                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2776                                 adv &= ~(ADVERTISE_1000XFULL |
2777                                          ADVERTISE_1000XHALF |
2778                                          ADVERTISE_SLCT);
2779                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2780                                 tg3_writephy(tp, MII_BMCR, bmcr |
2781                                                            BMCR_ANRESTART |
2782                                                            BMCR_ANENABLE);
2783                                 udelay(10);
2784                                 netif_carrier_off(tp->dev);
2785                         }
2786                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2787                         bmcr = new_bmcr;
2788                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2789                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2790                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2791                             ASIC_REV_5714) {
2792                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2793                                         bmsr |= BMSR_LSTATUS;
2794                                 else
2795                                         bmsr &= ~BMSR_LSTATUS;
2796                         }
2797                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2798                 }
2799         }
2800
2801         if (bmsr & BMSR_LSTATUS) {
2802                 current_speed = SPEED_1000;
2803                 current_link_up = 1;
2804                 if (bmcr & BMCR_FULLDPLX)
2805                         current_duplex = DUPLEX_FULL;
2806                 else
2807                         current_duplex = DUPLEX_HALF;
2808
2809                 if (bmcr & BMCR_ANENABLE) {
2810                         u32 local_adv, remote_adv, common;
2811
2812                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2813                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2814                         common = local_adv & remote_adv;
2815                         if (common & (ADVERTISE_1000XHALF |
2816                                       ADVERTISE_1000XFULL)) {
2817                                 if (common & ADVERTISE_1000XFULL)
2818                                         current_duplex = DUPLEX_FULL;
2819                                 else
2820                                         current_duplex = DUPLEX_HALF;
2821
2822                                 tg3_setup_flow_control(tp, local_adv,
2823                                                        remote_adv);
2824                         }
2825                         else
2826                                 current_link_up = 0;
2827                 }
2828         }
2829
2830         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2831         if (tp->link_config.active_duplex == DUPLEX_HALF)
2832                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2833
2834         tw32_f(MAC_MODE, tp->mac_mode);
2835         udelay(40);
2836
2837         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2838
2839         tp->link_config.active_speed = current_speed;
2840         tp->link_config.active_duplex = current_duplex;
2841
2842         if (current_link_up != netif_carrier_ok(tp->dev)) {
2843                 if (current_link_up)
2844                         netif_carrier_on(tp->dev);
2845                 else {
2846                         netif_carrier_off(tp->dev);
2847                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2848                 }
2849                 tg3_link_report(tp);
2850         }
2851         return err;
2852 }
2853
2854 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2855 {
2856         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2857                 /* Give autoneg time to complete. */
2858                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2859                 return;
2860         }
2861         if (!netif_carrier_ok(tp->dev) &&
2862             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2863                 u32 bmcr;
2864
2865                 tg3_readphy(tp, MII_BMCR, &bmcr);
2866                 if (bmcr & BMCR_ANENABLE) {
2867                         u32 phy1, phy2;
2868
2869                         /* Select shadow register 0x1f */
2870                         tg3_writephy(tp, 0x1c, 0x7c00);
2871                         tg3_readphy(tp, 0x1c, &phy1);
2872
2873                         /* Select expansion interrupt status register */
2874                         tg3_writephy(tp, 0x17, 0x0f01);
2875                         tg3_readphy(tp, 0x15, &phy2);
2876                         tg3_readphy(tp, 0x15, &phy2);
2877
2878                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2879                                 /* We have signal detect and not receiving
2880                                  * config code words, link is up by parallel
2881                                  * detection.
2882                                  */
2883
2884                                 bmcr &= ~BMCR_ANENABLE;
2885                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2886                                 tg3_writephy(tp, MII_BMCR, bmcr);
2887                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2888                         }
2889                 }
2890         }
2891         else if (netif_carrier_ok(tp->dev) &&
2892                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2893                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2894                 u32 phy2;
2895
2896                 /* Select expansion interrupt status register */
2897                 tg3_writephy(tp, 0x17, 0x0f01);
2898                 tg3_readphy(tp, 0x15, &phy2);
2899                 if (phy2 & 0x20) {
2900                         u32 bmcr;
2901
2902                         /* Config code words received, turn on autoneg. */
2903                         tg3_readphy(tp, MII_BMCR, &bmcr);
2904                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2905
2906                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2907
2908                 }
2909         }
2910 }
2911
2912 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2913 {
2914         int err;
2915
2916         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2917                 err = tg3_setup_fiber_phy(tp, force_reset);
2918         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2919                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2920         } else {
2921                 err = tg3_setup_copper_phy(tp, force_reset);
2922         }
2923
2924         if (tp->link_config.active_speed == SPEED_1000 &&
2925             tp->link_config.active_duplex == DUPLEX_HALF)
2926                 tw32(MAC_TX_LENGTHS,
2927                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2928                       (6 << TX_LENGTHS_IPG_SHIFT) |
2929                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2930         else
2931                 tw32(MAC_TX_LENGTHS,
2932                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2933                       (6 << TX_LENGTHS_IPG_SHIFT) |
2934                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2935
2936         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2937                 if (netif_carrier_ok(tp->dev)) {
2938                         tw32(HOSTCC_STAT_COAL_TICKS,
2939                              tp->coal.stats_block_coalesce_usecs);
2940                 } else {
2941                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2942                 }
2943         }
2944
2945         return err;
2946 }
2947
2948 /* Tigon3 never reports partial packet sends.  So we do not
2949  * need special logic to handle SKBs that have not had all
2950  * of their frags sent yet, like SunGEM does.
2951  */
2952 static void tg3_tx(struct tg3 *tp)
2953 {
2954         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2955         u32 sw_idx = tp->tx_cons;
2956
2957         while (sw_idx != hw_idx) {
2958                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2959                 struct sk_buff *skb = ri->skb;
2960                 int i;
2961
2962                 BUG_ON(skb == NULL);
2963                 pci_unmap_single(tp->pdev,
2964                                  pci_unmap_addr(ri, mapping),
2965                                  skb_headlen(skb),
2966                                  PCI_DMA_TODEVICE);
2967
2968                 ri->skb = NULL;
2969
2970                 sw_idx = NEXT_TX(sw_idx);
2971
2972                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2973                         BUG_ON(sw_idx == hw_idx);
2974
2975                         ri = &tp->tx_buffers[sw_idx];
2976                         BUG_ON(ri->skb != NULL);
2977
2978                         pci_unmap_page(tp->pdev,
2979                                        pci_unmap_addr(ri, mapping),
2980                                        skb_shinfo(skb)->frags[i].size,
2981                                        PCI_DMA_TODEVICE);
2982
2983                         sw_idx = NEXT_TX(sw_idx);
2984                 }
2985
2986                 dev_kfree_skb(skb);
2987         }
2988
2989         tp->tx_cons = sw_idx;
2990
2991         if (unlikely(netif_queue_stopped(tp->dev))) {
2992                 spin_lock(&tp->tx_lock);
2993                 if (netif_queue_stopped(tp->dev) &&
2994                     (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2995                         netif_wake_queue(tp->dev);
2996                 spin_unlock(&tp->tx_lock);
2997         }
2998 }
2999
3000 /* Returns size of skb allocated or < 0 on error.
3001  *
3002  * We only need to fill in the address because the other members
3003  * of the RX descriptor are invariant, see tg3_init_rings.
3004  *
3005  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3006  * posting buffers we only dirty the first cache line of the RX
3007  * descriptor (containing the address).  Whereas for the RX status
3008  * buffers the cpu only reads the last cacheline of the RX descriptor
3009  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3010  */
3011 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3012                             int src_idx, u32 dest_idx_unmasked)
3013 {
3014         struct tg3_rx_buffer_desc *desc;
3015         struct ring_info *map, *src_map;
3016         struct sk_buff *skb;
3017         dma_addr_t mapping;
3018         int skb_size, dest_idx;
3019
3020         src_map = NULL;
3021         switch (opaque_key) {
3022         case RXD_OPAQUE_RING_STD:
3023                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3024                 desc = &tp->rx_std[dest_idx];
3025                 map = &tp->rx_std_buffers[dest_idx];
3026                 if (src_idx >= 0)
3027                         src_map = &tp->rx_std_buffers[src_idx];
3028                 skb_size = tp->rx_pkt_buf_sz;
3029                 break;
3030
3031         case RXD_OPAQUE_RING_JUMBO:
3032                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3033                 desc = &tp->rx_jumbo[dest_idx];
3034                 map = &tp->rx_jumbo_buffers[dest_idx];
3035                 if (src_idx >= 0)
3036                         src_map = &tp->rx_jumbo_buffers[src_idx];
3037                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3038                 break;
3039
3040         default:
3041                 return -EINVAL;
3042         };
3043
3044         /* Do not overwrite any of the map or rp information
3045          * until we are sure we can commit to a new buffer.
3046          *
3047          * Callers depend upon this behavior and assume that
3048          * we leave everything unchanged if we fail.
3049          */
3050         skb = dev_alloc_skb(skb_size);
3051         if (skb == NULL)
3052                 return -ENOMEM;
3053
3054         skb->dev = tp->dev;
3055         skb_reserve(skb, tp->rx_offset);
3056
3057         mapping = pci_map_single(tp->pdev, skb->data,
3058                                  skb_size - tp->rx_offset,
3059                                  PCI_DMA_FROMDEVICE);
3060
3061         map->skb = skb;
3062         pci_unmap_addr_set(map, mapping, mapping);
3063
3064         if (src_map != NULL)
3065                 src_map->skb = NULL;
3066
3067         desc->addr_hi = ((u64)mapping >> 32);
3068         desc->addr_lo = ((u64)mapping & 0xffffffff);
3069
3070         return skb_size;
3071 }
3072
3073 /* We only need to move over in the address because the other
3074  * members of the RX descriptor are invariant.  See notes above
3075  * tg3_alloc_rx_skb for full details.
3076  */
3077 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3078                            int src_idx, u32 dest_idx_unmasked)
3079 {
3080         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3081         struct ring_info *src_map, *dest_map;
3082         int dest_idx;
3083
3084         switch (opaque_key) {
3085         case RXD_OPAQUE_RING_STD:
3086                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3087                 dest_desc = &tp->rx_std[dest_idx];
3088                 dest_map = &tp->rx_std_buffers[dest_idx];
3089                 src_desc = &tp->rx_std[src_idx];
3090                 src_map = &tp->rx_std_buffers[src_idx];
3091                 break;
3092
3093         case RXD_OPAQUE_RING_JUMBO:
3094                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3095                 dest_desc = &tp->rx_jumbo[dest_idx];
3096                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3097                 src_desc = &tp->rx_jumbo[src_idx];
3098                 src_map = &tp->rx_jumbo_buffers[src_idx];
3099                 break;
3100
3101         default:
3102                 return;
3103         };
3104
3105         dest_map->skb = src_map->skb;
3106         pci_unmap_addr_set(dest_map, mapping,
3107                            pci_unmap_addr(src_map, mapping));
3108         dest_desc->addr_hi = src_desc->addr_hi;
3109         dest_desc->addr_lo = src_desc->addr_lo;
3110
3111         src_map->skb = NULL;
3112 }
3113
3114 #if TG3_VLAN_TAG_USED
3115 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3116 {
3117         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3118 }
3119 #endif
3120
3121 /* The RX ring scheme is composed of multiple rings which post fresh
3122  * buffers to the chip, and one special ring the chip uses to report
3123  * status back to the host.
3124  *
3125  * The special ring reports the status of received packets to the
3126  * host.  The chip does not write into the original descriptor the
3127  * RX buffer was obtained from.  The chip simply takes the original
3128  * descriptor as provided by the host, updates the status and length
3129  * field, then writes this into the next status ring entry.
3130  *
3131  * Each ring the host uses to post buffers to the chip is described
3132  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3133  * it is first placed into the on-chip ram.  When the packet's length
3134  * is known, it walks down the TG3_BDINFO entries to select the ring.
3135  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3136  * which is within the range of the new packet's length is chosen.
3137  *
3138  * The "separate ring for rx status" scheme may sound queer, but it makes
3139  * sense from a cache coherency perspective.  If only the host writes
3140  * to the buffer post rings, and only the chip writes to the rx status
3141  * rings, then cache lines never move beyond shared-modified state.
3142  * If both the host and chip were to write into the same ring, cache line
3143  * eviction could occur since both entities want it in an exclusive state.
3144  */
3145 static int tg3_rx(struct tg3 *tp, int budget)
3146 {
3147         u32 work_mask;
3148         u32 sw_idx = tp->rx_rcb_ptr;
3149         u16 hw_idx;
3150         int received;
3151
3152         hw_idx = tp->hw_status->idx[0].rx_producer;
3153         /*
3154          * We need to order the read of hw_idx and the read of
3155          * the opaque cookie.
3156          */
3157         rmb();
3158         work_mask = 0;
3159         received = 0;
3160         while (sw_idx != hw_idx && budget > 0) {
3161                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3162                 unsigned int len;
3163                 struct sk_buff *skb;
3164                 dma_addr_t dma_addr;
3165                 u32 opaque_key, desc_idx, *post_ptr;
3166
3167                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3168                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3169                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3170                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3171                                                   mapping);
3172                         skb = tp->rx_std_buffers[desc_idx].skb;
3173                         post_ptr = &tp->rx_std_ptr;
3174                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3175                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3176                                                   mapping);
3177                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3178                         post_ptr = &tp->rx_jumbo_ptr;
3179                 }
3180                 else {
3181                         goto next_pkt_nopost;
3182                 }
3183
3184                 work_mask |= opaque_key;
3185
3186                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3187                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3188                 drop_it:
3189                         tg3_recycle_rx(tp, opaque_key,
3190                                        desc_idx, *post_ptr);
3191                 drop_it_no_recycle:
3192                         /* Other statistics kept track of by card. */
3193                         tp->net_stats.rx_dropped++;
3194                         goto next_pkt;
3195                 }
3196
3197                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3198
3199                 if (len > RX_COPY_THRESHOLD 
3200                         && tp->rx_offset == 2
3201                         /* rx_offset != 2 iff this is a 5701 card running
3202                          * in PCI-X mode [see tg3_get_invariants()] */
3203                 ) {
3204                         int skb_size;
3205
3206                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3207                                                     desc_idx, *post_ptr);
3208                         if (skb_size < 0)
3209                                 goto drop_it;
3210
3211                         pci_unmap_single(tp->pdev, dma_addr,
3212                                          skb_size - tp->rx_offset,
3213                                          PCI_DMA_FROMDEVICE);
3214
3215                         skb_put(skb, len);
3216                 } else {
3217                         struct sk_buff *copy_skb;
3218
3219                         tg3_recycle_rx(tp, opaque_key,
3220                                        desc_idx, *post_ptr);
3221
3222                         copy_skb = dev_alloc_skb(len + 2);
3223                         if (copy_skb == NULL)
3224                                 goto drop_it_no_recycle;
3225
3226                         copy_skb->dev = tp->dev;
3227                         skb_reserve(copy_skb, 2);
3228                         skb_put(copy_skb, len);
3229                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3230                         memcpy(copy_skb->data, skb->data, len);
3231                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3232
3233                         /* We'll reuse the original ring buffer. */
3234                         skb = copy_skb;
3235                 }
3236
3237                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3238                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3239                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3240                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3241                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3242                 else
3243                         skb->ip_summed = CHECKSUM_NONE;
3244
3245                 skb->protocol = eth_type_trans(skb, tp->dev);
3246 #if TG3_VLAN_TAG_USED
3247                 if (tp->vlgrp != NULL &&
3248                     desc->type_flags & RXD_FLAG_VLAN) {
3249                         tg3_vlan_rx(tp, skb,
3250                                     desc->err_vlan & RXD_VLAN_MASK);
3251                 } else
3252 #endif
3253                         netif_receive_skb(skb);
3254
3255                 tp->dev->last_rx = jiffies;
3256                 received++;
3257                 budget--;
3258
3259 next_pkt:
3260                 (*post_ptr)++;
3261 next_pkt_nopost:
3262                 sw_idx++;
3263                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3264
3265                 /* Refresh hw_idx to see if there is new work */
3266                 if (sw_idx == hw_idx) {
3267                         hw_idx = tp->hw_status->idx[0].rx_producer;
3268                         rmb();
3269                 }
3270         }
3271
3272         /* ACK the status ring. */
3273         tp->rx_rcb_ptr = sw_idx;
3274         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3275
3276         /* Refill RX ring(s). */
3277         if (work_mask & RXD_OPAQUE_RING_STD) {
3278                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3279                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3280                              sw_idx);
3281         }
3282         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3283                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3284                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3285                              sw_idx);
3286         }
3287         mmiowb();
3288
3289         return received;
3290 }
3291
3292 static int tg3_poll(struct net_device *netdev, int *budget)
3293 {
3294         struct tg3 *tp = netdev_priv(netdev);
3295         struct tg3_hw_status *sblk = tp->hw_status;
3296         int done;
3297
3298         /* handle link change and other phy events */
3299         if (!(tp->tg3_flags &
3300               (TG3_FLAG_USE_LINKCHG_REG |
3301                TG3_FLAG_POLL_SERDES))) {
3302                 if (sblk->status & SD_STATUS_LINK_CHG) {
3303                         sblk->status = SD_STATUS_UPDATED |
3304                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3305                         spin_lock(&tp->lock);
3306                         tg3_setup_phy(tp, 0);
3307                         spin_unlock(&tp->lock);
3308                 }
3309         }
3310
3311         /* run TX completion thread */
3312         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3313                 tg3_tx(tp);
3314         }
3315
3316         /* run RX thread, within the bounds set by NAPI.
3317          * All RX "locking" is done by ensuring outside
3318          * code synchronizes with dev->poll()
3319          */
3320         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3321                 int orig_budget = *budget;
3322                 int work_done;
3323
3324                 if (orig_budget > netdev->quota)
3325                         orig_budget = netdev->quota;
3326
3327                 work_done = tg3_rx(tp, orig_budget);
3328
3329                 *budget -= work_done;
3330                 netdev->quota -= work_done;
3331         }
3332
3333         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3334                 tp->last_tag = sblk->status_tag;
3335                 rmb();
3336         } else
3337                 sblk->status &= ~SD_STATUS_UPDATED;
3338
3339         /* if no more work, tell net stack and NIC we're done */
3340         done = !tg3_has_work(tp);
3341         if (done) {
3342                 netif_rx_complete(netdev);
3343                 tg3_restart_ints(tp);
3344         }
3345
3346         return (done ? 0 : 1);
3347 }
3348
3349 static void tg3_irq_quiesce(struct tg3 *tp)
3350 {
3351         BUG_ON(tp->irq_sync);
3352
3353         tp->irq_sync = 1;
3354         smp_mb();
3355
3356         synchronize_irq(tp->pdev->irq);
3357 }
3358
3359 static inline int tg3_irq_sync(struct tg3 *tp)
3360 {
3361         return tp->irq_sync;
3362 }
3363
3364 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3365  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3366  * with as well.  Most of the time, this is not necessary except when
3367  * shutting down the device.
3368  */
3369 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3370 {
3371         if (irq_sync)
3372                 tg3_irq_quiesce(tp);
3373         spin_lock_bh(&tp->lock);
3374         spin_lock(&tp->tx_lock);
3375 }
3376
3377 static inline void tg3_full_unlock(struct tg3 *tp)
3378 {
3379         spin_unlock(&tp->tx_lock);
3380         spin_unlock_bh(&tp->lock);
3381 }
3382
3383 /* One-shot MSI handler - Chip automatically disables interrupt
3384  * after sending MSI so driver doesn't have to do it.
3385  */
3386 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs)
3387 {
3388         struct net_device *dev = dev_id;
3389         struct tg3 *tp = netdev_priv(dev);
3390
3391         prefetch(tp->hw_status);
3392         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3393
3394         if (likely(!tg3_irq_sync(tp)))
3395                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3396
3397         return IRQ_HANDLED;
3398 }
3399
3400 /* MSI ISR - No need to check for interrupt sharing and no need to
3401  * flush status block and interrupt mailbox. PCI ordering rules
3402  * guarantee that MSI will arrive after the status block.
3403  */
3404 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3405 {
3406         struct net_device *dev = dev_id;
3407         struct tg3 *tp = netdev_priv(dev);
3408
3409         prefetch(tp->hw_status);
3410         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3411         /*
3412          * Writing any value to intr-mbox-0 clears PCI INTA# and
3413          * chip-internal interrupt pending events.
3414          * Writing non-zero to intr-mbox-0 additional tells the
3415          * NIC to stop sending us irqs, engaging "in-intr-handler"
3416          * event coalescing.
3417          */
3418         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3419         if (likely(!tg3_irq_sync(tp)))
3420                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3421
3422         return IRQ_RETVAL(1);
3423 }
3424
3425 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3426 {
3427         struct net_device *dev = dev_id;
3428         struct tg3 *tp = netdev_priv(dev);
3429         struct tg3_hw_status *sblk = tp->hw_status;
3430         unsigned int handled = 1;
3431
3432         /* In INTx mode, it is possible for the interrupt to arrive at
3433          * the CPU before the status block posted prior to the interrupt.
3434          * Reading the PCI State register will confirm whether the
3435          * interrupt is ours and will flush the status block.
3436          */
3437         if ((sblk->status & SD_STATUS_UPDATED) ||
3438             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3439                 /*
3440                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3441                  * chip-internal interrupt pending events.
3442                  * Writing non-zero to intr-mbox-0 additional tells the
3443                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3444                  * event coalescing.
3445                  */
3446                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3447                              0x00000001);
3448                 if (tg3_irq_sync(tp))
3449                         goto out;
3450                 sblk->status &= ~SD_STATUS_UPDATED;
3451                 if (likely(tg3_has_work(tp))) {
3452                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3453                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3454                 } else {
3455                         /* No work, shared interrupt perhaps?  re-enable
3456                          * interrupts, and flush that PCI write
3457                          */
3458                         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3459                                 0x00000000);
3460                 }
3461         } else {        /* shared interrupt */
3462                 handled = 0;
3463         }
3464 out:
3465         return IRQ_RETVAL(handled);
3466 }
3467
3468 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3469 {
3470         struct net_device *dev = dev_id;
3471         struct tg3 *tp = netdev_priv(dev);
3472         struct tg3_hw_status *sblk = tp->hw_status;
3473         unsigned int handled = 1;
3474
3475         /* In INTx mode, it is possible for the interrupt to arrive at
3476          * the CPU before the status block posted prior to the interrupt.
3477          * Reading the PCI State register will confirm whether the
3478          * interrupt is ours and will flush the status block.
3479          */
3480         if ((sblk->status_tag != tp->last_tag) ||
3481             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3482                 /*
3483                  * writing any value to intr-mbox-0 clears PCI INTA# and
3484                  * chip-internal interrupt pending events.
3485                  * writing non-zero to intr-mbox-0 additional tells the
3486                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3487                  * event coalescing.
3488                  */
3489                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3490                              0x00000001);
3491                 if (tg3_irq_sync(tp))
3492                         goto out;
3493                 if (netif_rx_schedule_prep(dev)) {
3494                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3495                         /* Update last_tag to mark that this status has been
3496                          * seen. Because interrupt may be shared, we may be
3497                          * racing with tg3_poll(), so only update last_tag
3498                          * if tg3_poll() is not scheduled.
3499                          */
3500                         tp->last_tag = sblk->status_tag;
3501                         __netif_rx_schedule(dev);
3502                 }
3503         } else {        /* shared interrupt */
3504                 handled = 0;
3505         }
3506 out:
3507         return IRQ_RETVAL(handled);
3508 }
3509
3510 /* ISR for interrupt test */
3511 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3512                 struct pt_regs *regs)
3513 {
3514         struct net_device *dev = dev_id;
3515         struct tg3 *tp = netdev_priv(dev);
3516         struct tg3_hw_status *sblk = tp->hw_status;
3517
3518         if ((sblk->status & SD_STATUS_UPDATED) ||
3519             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3520                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3521                              0x00000001);
3522                 return IRQ_RETVAL(1);
3523         }
3524         return IRQ_RETVAL(0);
3525 }
3526
3527 static int tg3_init_hw(struct tg3 *);
3528 static int tg3_halt(struct tg3 *, int, int);
3529
3530 #ifdef CONFIG_NET_POLL_CONTROLLER
3531 static void tg3_poll_controller(struct net_device *dev)
3532 {
3533         struct tg3 *tp = netdev_priv(dev);
3534
3535         tg3_interrupt(tp->pdev->irq, dev, NULL);
3536 }
3537 #endif
3538
3539 static void tg3_reset_task(void *_data)
3540 {
3541         struct tg3 *tp = _data;
3542         unsigned int restart_timer;
3543
3544         tg3_full_lock(tp, 0);
3545         tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3546
3547         if (!netif_running(tp->dev)) {
3548                 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3549                 tg3_full_unlock(tp);
3550                 return;
3551         }
3552
3553         tg3_full_unlock(tp);
3554
3555         tg3_netif_stop(tp);
3556
3557         tg3_full_lock(tp, 1);
3558
3559         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3560         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3561
3562         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3563         tg3_init_hw(tp);
3564
3565         tg3_netif_start(tp);
3566
3567         if (restart_timer)
3568                 mod_timer(&tp->timer, jiffies + 1);
3569
3570         tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3571
3572         tg3_full_unlock(tp);
3573 }
3574
3575 static void tg3_tx_timeout(struct net_device *dev)
3576 {
3577         struct tg3 *tp = netdev_priv(dev);
3578
3579         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3580                dev->name);
3581
3582         schedule_work(&tp->reset_task);
3583 }
3584
3585 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3586 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3587 {
3588         u32 base = (u32) mapping & 0xffffffff;
3589
3590         return ((base > 0xffffdcc0) &&
3591                 (base + len + 8 < base));
3592 }
3593
3594 /* Test for DMA addresses > 40-bit */
3595 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3596                                           int len)
3597 {
3598 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3599         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3600                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3601         return 0;
3602 #else
3603         return 0;
3604 #endif
3605 }
3606
3607 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3608
3609 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3610 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3611                                        u32 last_plus_one, u32 *start,
3612                                        u32 base_flags, u32 mss)
3613 {
3614         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3615         dma_addr_t new_addr = 0;
3616         u32 entry = *start;
3617         int i, ret = 0;
3618
3619         if (!new_skb) {
3620                 ret = -1;
3621         } else {
3622                 /* New SKB is guaranteed to be linear. */
3623                 entry = *start;
3624                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3625                                           PCI_DMA_TODEVICE);
3626                 /* Make sure new skb does not cross any 4G boundaries.
3627                  * Drop the packet if it does.
3628                  */
3629                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3630                         ret = -1;
3631                         dev_kfree_skb(new_skb);
3632                         new_skb = NULL;
3633                 } else {
3634                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3635                                     base_flags, 1 | (mss << 1));
3636                         *start = NEXT_TX(entry);
3637                 }
3638         }
3639
3640         /* Now clean up the sw ring entries. */
3641         i = 0;
3642         while (entry != last_plus_one) {
3643                 int len;
3644
3645                 if (i == 0)
3646                         len = skb_headlen(skb);
3647                 else
3648                         len = skb_shinfo(skb)->frags[i-1].size;
3649                 pci_unmap_single(tp->pdev,
3650                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3651                                  len, PCI_DMA_TODEVICE);
3652                 if (i == 0) {
3653                         tp->tx_buffers[entry].skb = new_skb;
3654                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3655                 } else {
3656                         tp->tx_buffers[entry].skb = NULL;
3657                 }
3658                 entry = NEXT_TX(entry);
3659                 i++;
3660         }
3661
3662         dev_kfree_skb(skb);
3663
3664         return ret;
3665 }
3666
3667 static void tg3_set_txd(struct tg3 *tp, int entry,
3668                         dma_addr_t mapping, int len, u32 flags,
3669                         u32 mss_and_is_end)
3670 {
3671         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3672         int is_end = (mss_and_is_end & 0x1);
3673         u32 mss = (mss_and_is_end >> 1);
3674         u32 vlan_tag = 0;
3675
3676         if (is_end)
3677                 flags |= TXD_FLAG_END;
3678         if (flags & TXD_FLAG_VLAN) {
3679                 vlan_tag = flags >> 16;
3680                 flags &= 0xffff;
3681         }
3682         vlan_tag |= (mss << TXD_MSS_SHIFT);
3683
3684         txd->addr_hi = ((u64) mapping >> 32);
3685         txd->addr_lo = ((u64) mapping & 0xffffffff);
3686         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3687         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3688 }
3689
3690 /* hard_start_xmit for devices that don't have any bugs and
3691  * support TG3_FLG2_HW_TSO_2 only.
3692  */
3693 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3694 {
3695         struct tg3 *tp = netdev_priv(dev);
3696         dma_addr_t mapping;
3697         u32 len, entry, base_flags, mss;
3698
3699         len = skb_headlen(skb);
3700
3701         /* No BH disabling for tx_lock here.  We are running in BH disabled
3702          * context and TX reclaim runs via tp->poll inside of a software
3703          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3704          * no IRQ context deadlocks to worry about either.  Rejoice!
3705          */
3706         if (!spin_trylock(&tp->tx_lock))
3707                 return NETDEV_TX_LOCKED;
3708
3709         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3710                 if (!netif_queue_stopped(dev)) {
3711                         netif_stop_queue(dev);
3712
3713                         /* This is a hard error, log it. */
3714                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3715                                "queue awake!\n", dev->name);
3716                 }
3717                 spin_unlock(&tp->tx_lock);
3718                 return NETDEV_TX_BUSY;
3719         }
3720
3721         entry = tp->tx_prod;
3722         base_flags = 0;
3723 #if TG3_TSO_SUPPORT != 0
3724         mss = 0;
3725         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3726             (mss = skb_shinfo(skb)->tso_size) != 0) {
3727                 int tcp_opt_len, ip_tcp_len;
3728
3729                 if (skb_header_cloned(skb) &&
3730                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3731                         dev_kfree_skb(skb);
3732                         goto out_unlock;
3733                 }
3734
3735                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3736                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3737
3738                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3739                                TXD_FLAG_CPU_POST_DMA);
3740
3741                 skb->nh.iph->check = 0;
3742                 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3743
3744                 skb->h.th->check = 0;
3745
3746                 mss |= (ip_tcp_len + tcp_opt_len) << 9;
3747         }
3748         else if (skb->ip_summed == CHECKSUM_HW)
3749                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3750 #else
3751         mss = 0;
3752         if (skb->ip_summed == CHECKSUM_HW)
3753                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3754 #endif
3755 #if TG3_VLAN_TAG_USED
3756         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3757                 base_flags |= (TXD_FLAG_VLAN |
3758                                (vlan_tx_tag_get(skb) << 16));
3759 #endif
3760
3761         /* Queue skb data, a.k.a. the main skb fragment. */
3762         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3763
3764         tp->tx_buffers[entry].skb = skb;
3765         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3766
3767         tg3_set_txd(tp, entry, mapping, len, base_flags,
3768                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3769
3770         entry = NEXT_TX(entry);
3771
3772         /* Now loop through additional data fragments, and queue them. */
3773         if (skb_shinfo(skb)->nr_frags > 0) {
3774                 unsigned int i, last;
3775
3776                 last = skb_shinfo(skb)->nr_frags - 1;
3777                 for (i = 0; i <= last; i++) {
3778                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3779
3780                         len = frag->size;
3781                         mapping = pci_map_page(tp->pdev,
3782                                                frag->page,
3783                                                frag->page_offset,
3784                                                len, PCI_DMA_TODEVICE);
3785
3786                         tp->tx_buffers[entry].skb = NULL;
3787                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3788
3789                         tg3_set_txd(tp, entry, mapping, len,
3790                                     base_flags, (i == last) | (mss << 1));
3791
3792                         entry = NEXT_TX(entry);
3793                 }
3794         }
3795
3796         /* Packets are ready, update Tx producer idx local and on card. */
3797         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3798
3799         tp->tx_prod = entry;
3800         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
3801                 netif_stop_queue(dev);
3802                 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3803                         netif_wake_queue(tp->dev);
3804         }
3805
3806 out_unlock:
3807         mmiowb();
3808         spin_unlock(&tp->tx_lock);
3809
3810         dev->trans_start = jiffies;
3811
3812         return NETDEV_TX_OK;
3813 }
3814
3815 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
3816  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
3817  */
3818 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
3819 {
3820         struct tg3 *tp = netdev_priv(dev);
3821         dma_addr_t mapping;
3822         u32 len, entry, base_flags, mss;
3823         int would_hit_hwbug;
3824
3825         len = skb_headlen(skb);
3826
3827         /* No BH disabling for tx_lock here.  We are running in BH disabled
3828          * context and TX reclaim runs via tp->poll inside of a software
3829          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3830          * no IRQ context deadlocks to worry about either.  Rejoice!
3831          */
3832         if (!spin_trylock(&tp->tx_lock))
3833                 return NETDEV_TX_LOCKED; 
3834
3835         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3836                 if (!netif_queue_stopped(dev)) {
3837                         netif_stop_queue(dev);
3838
3839                         /* This is a hard error, log it. */
3840                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3841                                "queue awake!\n", dev->name);
3842                 }
3843                 spin_unlock(&tp->tx_lock);
3844                 return NETDEV_TX_BUSY;
3845         }
3846
3847         entry = tp->tx_prod;
3848         base_flags = 0;
3849         if (skb->ip_summed == CHECKSUM_HW)
3850                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3851 #if TG3_TSO_SUPPORT != 0
3852         mss = 0;
3853         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3854             (mss = skb_shinfo(skb)->tso_size) != 0) {
3855                 int tcp_opt_len, ip_tcp_len;
3856
3857                 if (skb_header_cloned(skb) &&
3858                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3859                         dev_kfree_skb(skb);
3860                         goto out_unlock;
3861                 }
3862
3863                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3864                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3865
3866                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3867                                TXD_FLAG_CPU_POST_DMA);
3868
3869                 skb->nh.iph->check = 0;
3870                 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3871                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3872                         skb->h.th->check = 0;
3873                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3874                 }
3875                 else {
3876                         skb->h.th->check =
3877                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3878                                                    skb->nh.iph->daddr,
3879                                                    0, IPPROTO_TCP, 0);
3880                 }
3881
3882                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3883                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3884                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3885                                 int tsflags;
3886
3887                                 tsflags = ((skb->nh.iph->ihl - 5) +
3888                                            (tcp_opt_len >> 2));
3889                                 mss |= (tsflags << 11);
3890                         }
3891                 } else {
3892                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3893                                 int tsflags;
3894
3895                                 tsflags = ((skb->nh.iph->ihl - 5) +
3896                                            (tcp_opt_len >> 2));
3897                                 base_flags |= tsflags << 12;
3898                         }
3899                 }
3900         }
3901 #else
3902         mss = 0;
3903 #endif
3904 #if TG3_VLAN_TAG_USED
3905         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3906                 base_flags |= (TXD_FLAG_VLAN |
3907                                (vlan_tx_tag_get(skb) << 16));
3908 #endif
3909
3910         /* Queue skb data, a.k.a. the main skb fragment. */
3911         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3912
3913         tp->tx_buffers[entry].skb = skb;
3914         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3915
3916         would_hit_hwbug = 0;
3917
3918         if (tg3_4g_overflow_test(mapping, len))
3919                 would_hit_hwbug = 1;
3920
3921         tg3_set_txd(tp, entry, mapping, len, base_flags,
3922                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3923
3924         entry = NEXT_TX(entry);
3925
3926         /* Now loop through additional data fragments, and queue them. */
3927         if (skb_shinfo(skb)->nr_frags > 0) {
3928                 unsigned int i, last;
3929
3930                 last = skb_shinfo(skb)->nr_frags - 1;
3931                 for (i = 0; i <= last; i++) {
3932                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3933
3934                         len = frag->size;
3935                         mapping = pci_map_page(tp->pdev,
3936                                                frag->page,
3937                                                frag->page_offset,
3938                                                len, PCI_DMA_TODEVICE);
3939
3940                         tp->tx_buffers[entry].skb = NULL;
3941                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3942
3943                         if (tg3_4g_overflow_test(mapping, len))
3944                                 would_hit_hwbug = 1;
3945
3946                         if (tg3_40bit_overflow_test(tp, mapping, len))
3947                                 would_hit_hwbug = 1;
3948
3949                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3950                                 tg3_set_txd(tp, entry, mapping, len,
3951                                             base_flags, (i == last)|(mss << 1));
3952                         else
3953                                 tg3_set_txd(tp, entry, mapping, len,
3954                                             base_flags, (i == last));
3955
3956                         entry = NEXT_TX(entry);
3957                 }
3958         }
3959
3960         if (would_hit_hwbug) {
3961                 u32 last_plus_one = entry;
3962                 u32 start;
3963
3964                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
3965                 start &= (TG3_TX_RING_SIZE - 1);
3966
3967                 /* If the workaround fails due to memory/mapping
3968                  * failure, silently drop this packet.
3969                  */
3970                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
3971                                                 &start, base_flags, mss))
3972                         goto out_unlock;
3973
3974                 entry = start;
3975         }
3976
3977         /* Packets are ready, update Tx producer idx local and on card. */
3978         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3979
3980         tp->tx_prod = entry;
3981         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
3982                 netif_stop_queue(dev);
3983                 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3984                         netif_wake_queue(tp->dev);
3985         }
3986
3987 out_unlock:
3988         mmiowb();
3989         spin_unlock(&tp->tx_lock);
3990
3991         dev->trans_start = jiffies;
3992
3993         return NETDEV_TX_OK;
3994 }
3995
3996 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3997                                int new_mtu)
3998 {
3999         dev->mtu = new_mtu;
4000
4001         if (new_mtu > ETH_DATA_LEN) {
4002                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4003                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4004                         ethtool_op_set_tso(dev, 0);
4005                 }
4006                 else
4007                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4008         } else {
4009                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4010                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4011                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4012         }
4013 }
4014
4015 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4016 {
4017         struct tg3 *tp = netdev_priv(dev);
4018
4019         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4020                 return -EINVAL;
4021
4022         if (!netif_running(dev)) {
4023                 /* We'll just catch it later when the
4024                  * device is up'd.
4025                  */
4026                 tg3_set_mtu(dev, tp, new_mtu);
4027                 return 0;
4028         }
4029
4030         tg3_netif_stop(tp);
4031
4032         tg3_full_lock(tp, 1);
4033
4034         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4035
4036         tg3_set_mtu(dev, tp, new_mtu);
4037
4038         tg3_init_hw(tp);
4039
4040         tg3_netif_start(tp);
4041
4042         tg3_full_unlock(tp);
4043
4044         return 0;
4045 }
4046
4047 /* Free up pending packets in all rx/tx rings.
4048  *
4049  * The chip has been shut down and the driver detached from
4050  * the networking, so no interrupts or new tx packets will
4051  * end up in the driver.  tp->{tx,}lock is not held and we are not
4052  * in an interrupt context and thus may sleep.
4053  */
4054 static void tg3_free_rings(struct tg3 *tp)
4055 {
4056         struct ring_info *rxp;
4057         int i;
4058
4059         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4060                 rxp = &tp->rx_std_buffers[i];
4061
4062                 if (rxp->skb == NULL)
4063                         continue;
4064                 pci_unmap_single(tp->pdev,
4065                                  pci_unmap_addr(rxp, mapping),
4066                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4067                                  PCI_DMA_FROMDEVICE);
4068                 dev_kfree_skb_any(rxp->skb);
4069                 rxp->skb = NULL;
4070         }
4071
4072         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4073                 rxp = &tp->rx_jumbo_buffers[i];
4074
4075                 if (rxp->skb == NULL)
4076                         continue;
4077                 pci_unmap_single(tp->pdev,
4078                                  pci_unmap_addr(rxp, mapping),
4079                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4080                                  PCI_DMA_FROMDEVICE);
4081                 dev_kfree_skb_any(rxp->skb);
4082                 rxp->skb = NULL;
4083         }
4084
4085         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4086                 struct tx_ring_info *txp;
4087                 struct sk_buff *skb;
4088                 int j;
4089
4090                 txp = &tp->tx_buffers[i];
4091                 skb = txp->skb;
4092
4093                 if (skb == NULL) {
4094                         i++;
4095                         continue;
4096                 }
4097
4098                 pci_unmap_single(tp->pdev,
4099                                  pci_unmap_addr(txp, mapping),
4100                                  skb_headlen(skb),
4101                                  PCI_DMA_TODEVICE);
4102                 txp->skb = NULL;
4103
4104                 i++;
4105
4106                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4107                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4108                         pci_unmap_page(tp->pdev,
4109                                        pci_unmap_addr(txp, mapping),
4110                                        skb_shinfo(skb)->frags[j].size,
4111                                        PCI_DMA_TODEVICE);
4112                         i++;
4113                 }
4114
4115                 dev_kfree_skb_any(skb);
4116         }
4117 }
4118
4119 /* Initialize tx/rx rings for packet processing.
4120  *
4121  * The chip has been shut down and the driver detached from
4122  * the networking, so no interrupts or new tx packets will
4123  * end up in the driver.  tp->{tx,}lock are held and thus
4124  * we may not sleep.
4125  */
4126 static void tg3_init_rings(struct tg3 *tp)
4127 {
4128         u32 i;
4129
4130         /* Free up all the SKBs. */
4131         tg3_free_rings(tp);
4132
4133         /* Zero out all descriptors. */
4134         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4135         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4136         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4137         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4138
4139         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4140         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4141             (tp->dev->mtu > ETH_DATA_LEN))
4142                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4143
4144         /* Initialize invariants of the rings, we only set this
4145          * stuff once.  This works because the card does not
4146          * write into the rx buffer posting rings.
4147          */
4148         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4149                 struct tg3_rx_buffer_desc *rxd;
4150
4151                 rxd = &tp->rx_std[i];
4152                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4153                         << RXD_LEN_SHIFT;
4154                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4155                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4156                                (i << RXD_OPAQUE_INDEX_SHIFT));
4157         }
4158
4159         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4160                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4161                         struct tg3_rx_buffer_desc *rxd;
4162
4163                         rxd = &tp->rx_jumbo[i];
4164                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4165                                 << RXD_LEN_SHIFT;
4166                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4167                                 RXD_FLAG_JUMBO;
4168                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4169                                (i << RXD_OPAQUE_INDEX_SHIFT));
4170                 }
4171         }
4172
4173         /* Now allocate fresh SKBs for each rx ring. */
4174         for (i = 0; i < tp->rx_pending; i++) {
4175                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
4176                                      -1, i) < 0)
4177                         break;
4178         }
4179
4180         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4181                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4182                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4183                                              -1, i) < 0)
4184                                 break;
4185                 }
4186         }
4187 }
4188
4189 /*
4190  * Must not be invoked with interrupt sources disabled and
4191  * the hardware shutdown down.
4192  */
4193 static void tg3_free_consistent(struct tg3 *tp)
4194 {
4195         kfree(tp->rx_std_buffers);
4196         tp->rx_std_buffers = NULL;
4197         if (tp->rx_std) {
4198                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4199                                     tp->rx_std, tp->rx_std_mapping);
4200                 tp->rx_std = NULL;
4201         }
4202         if (tp->rx_jumbo) {
4203                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4204                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4205                 tp->rx_jumbo = NULL;
4206         }
4207         if (tp->rx_rcb) {
4208                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4209                                     tp->rx_rcb, tp->rx_rcb_mapping);
4210                 tp->rx_rcb = NULL;
4211         }
4212         if (tp->tx_ring) {
4213                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4214                         tp->tx_ring, tp->tx_desc_mapping);
4215                 tp->tx_ring = NULL;
4216         }
4217         if (tp->hw_status) {
4218                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4219                                     tp->hw_status, tp->status_mapping);
4220                 tp->hw_status = NULL;
4221         }
4222         if (tp->hw_stats) {
4223                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4224                                     tp->hw_stats, tp->stats_mapping);
4225                 tp->hw_stats = NULL;
4226         }
4227 }
4228
4229 /*
4230  * Must not be invoked with interrupt sources disabled and
4231  * the hardware shutdown down.  Can sleep.
4232  */
4233 static int tg3_alloc_consistent(struct tg3 *tp)
4234 {
4235         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4236                                       (TG3_RX_RING_SIZE +
4237                                        TG3_RX_JUMBO_RING_SIZE)) +
4238                                      (sizeof(struct tx_ring_info) *
4239                                       TG3_TX_RING_SIZE),
4240                                      GFP_KERNEL);
4241         if (!tp->rx_std_buffers)
4242                 return -ENOMEM;
4243
4244         memset(tp->rx_std_buffers, 0,
4245                (sizeof(struct ring_info) *
4246                 (TG3_RX_RING_SIZE +
4247                  TG3_RX_JUMBO_RING_SIZE)) +
4248                (sizeof(struct tx_ring_info) *
4249                 TG3_TX_RING_SIZE));
4250
4251         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4252         tp->tx_buffers = (struct tx_ring_info *)
4253                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4254
4255         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4256                                           &tp->rx_std_mapping);
4257         if (!tp->rx_std)
4258                 goto err_out;
4259
4260         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4261                                             &tp->rx_jumbo_mapping);
4262
4263         if (!tp->rx_jumbo)
4264                 goto err_out;
4265
4266         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4267                                           &tp->rx_rcb_mapping);
4268         if (!tp->rx_rcb)
4269                 goto err_out;
4270
4271         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4272                                            &tp->tx_desc_mapping);
4273         if (!tp->tx_ring)
4274                 goto err_out;
4275
4276         tp->hw_status = pci_alloc_consistent(tp->pdev,
4277                                              TG3_HW_STATUS_SIZE,
4278                                              &tp->status_mapping);
4279         if (!tp->hw_status)
4280                 goto err_out;
4281
4282         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4283                                             sizeof(struct tg3_hw_stats),
4284                                             &tp->stats_mapping);
4285         if (!tp->hw_stats)
4286                 goto err_out;
4287
4288         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4289         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4290
4291         return 0;
4292
4293 err_out:
4294         tg3_free_consistent(tp);
4295         return -ENOMEM;
4296 }
4297
4298 #define MAX_WAIT_CNT 1000
4299
4300 /* To stop a block, clear the enable bit and poll till it
4301  * clears.  tp->lock is held.
4302  */
4303 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4304 {
4305         unsigned int i;
4306         u32 val;
4307
4308         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4309                 switch (ofs) {
4310                 case RCVLSC_MODE:
4311                 case DMAC_MODE:
4312                 case MBFREE_MODE:
4313                 case BUFMGR_MODE:
4314                 case MEMARB_MODE:
4315                         /* We can't enable/disable these bits of the
4316                          * 5705/5750, just say success.
4317                          */
4318                         return 0;
4319
4320                 default:
4321                         break;
4322                 };
4323         }
4324
4325         val = tr32(ofs);
4326         val &= ~enable_bit;
4327         tw32_f(ofs, val);
4328
4329         for (i = 0; i < MAX_WAIT_CNT; i++) {
4330                 udelay(100);
4331                 val = tr32(ofs);
4332                 if ((val & enable_bit) == 0)
4333                         break;
4334         }
4335
4336         if (i == MAX_WAIT_CNT && !silent) {
4337                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4338                        "ofs=%lx enable_bit=%x\n",
4339                        ofs, enable_bit);
4340                 return -ENODEV;
4341         }
4342
4343         return 0;
4344 }
4345
4346 /* tp->lock is held. */
4347 static int tg3_abort_hw(struct tg3 *tp, int silent)
4348 {
4349         int i, err;
4350
4351         tg3_disable_ints(tp);
4352
4353         tp->rx_mode &= ~RX_MODE_ENABLE;
4354         tw32_f(MAC_RX_MODE, tp->rx_mode);
4355         udelay(10);
4356
4357         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4358         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4359         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4360         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4361         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4362         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4363
4364         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4365         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4366         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4367         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4368         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4369         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4370         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4371
4372         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4373         tw32_f(MAC_MODE, tp->mac_mode);
4374         udelay(40);
4375
4376         tp->tx_mode &= ~TX_MODE_ENABLE;
4377         tw32_f(MAC_TX_MODE, tp->tx_mode);
4378
4379         for (i = 0; i < MAX_WAIT_CNT; i++) {
4380                 udelay(100);
4381                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4382                         break;
4383         }
4384         if (i >= MAX_WAIT_CNT) {
4385                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4386                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4387                        tp->dev->name, tr32(MAC_TX_MODE));
4388                 err |= -ENODEV;
4389         }
4390
4391         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4392         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4393         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4394
4395         tw32(FTQ_RESET, 0xffffffff);
4396         tw32(FTQ_RESET, 0x00000000);
4397
4398         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4399         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4400
4401         if (tp->hw_status)
4402                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4403         if (tp->hw_stats)
4404                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4405
4406         return err;
4407 }
4408
4409 /* tp->lock is held. */
4410 static int tg3_nvram_lock(struct tg3 *tp)
4411 {
4412         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4413                 int i;
4414
4415                 if (tp->nvram_lock_cnt == 0) {
4416                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4417                         for (i = 0; i < 8000; i++) {
4418                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4419                                         break;
4420                                 udelay(20);
4421                         }
4422                         if (i == 8000) {
4423                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4424                                 return -ENODEV;
4425                         }
4426                 }
4427                 tp->nvram_lock_cnt++;
4428         }
4429         return 0;
4430 }
4431
4432 /* tp->lock is held. */
4433 static void tg3_nvram_unlock(struct tg3 *tp)
4434 {
4435         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4436                 if (tp->nvram_lock_cnt > 0)
4437                         tp->nvram_lock_cnt--;
4438                 if (tp->nvram_lock_cnt == 0)
4439                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4440         }
4441 }
4442
4443 /* tp->lock is held. */
4444 static void tg3_enable_nvram_access(struct tg3 *tp)
4445 {
4446         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4447             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4448                 u32 nvaccess = tr32(NVRAM_ACCESS);
4449
4450                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4451         }
4452 }
4453
4454 /* tp->lock is held. */
4455 static void tg3_disable_nvram_access(struct tg3 *tp)
4456 {
4457         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4458             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4459                 u32 nvaccess = tr32(NVRAM_ACCESS);
4460
4461                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4462         }
4463 }
4464
4465 /* tp->lock is held. */
4466 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4467 {
4468         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4469                 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4470                               NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4471
4472         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4473                 switch (kind) {
4474                 case RESET_KIND_INIT:
4475                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4476                                       DRV_STATE_START);
4477                         break;
4478
4479                 case RESET_KIND_SHUTDOWN:
4480                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4481                                       DRV_STATE_UNLOAD);
4482                         break;
4483
4484                 case RESET_KIND_SUSPEND:
4485                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4486                                       DRV_STATE_SUSPEND);
4487                         break;
4488
4489                 default:
4490                         break;
4491                 };
4492         }
4493 }
4494
4495 /* tp->lock is held. */
4496 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4497 {
4498         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4499                 switch (kind) {
4500                 case RESET_KIND_INIT:
4501                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4502                                       DRV_STATE_START_DONE);
4503                         break;
4504
4505                 case RESET_KIND_SHUTDOWN:
4506                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4507                                       DRV_STATE_UNLOAD_DONE);
4508                         break;
4509
4510                 default:
4511                         break;
4512                 };
4513         }
4514 }
4515
4516 /* tp->lock is held. */
4517 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4518 {
4519         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4520                 switch (kind) {
4521                 case RESET_KIND_INIT:
4522                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4523                                       DRV_STATE_START);
4524                         break;
4525
4526                 case RESET_KIND_SHUTDOWN:
4527                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4528                                       DRV_STATE_UNLOAD);
4529                         break;
4530
4531                 case RESET_KIND_SUSPEND:
4532                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4533                                       DRV_STATE_SUSPEND);
4534                         break;
4535
4536                 default:
4537                         break;
4538                 };
4539         }
4540 }
4541
4542 static void tg3_stop_fw(struct tg3 *);
4543
4544 /* tp->lock is held. */
4545 static int tg3_chip_reset(struct tg3 *tp)
4546 {
4547         u32 val;
4548         void (*write_op)(struct tg3 *, u32, u32);
4549         int i;
4550
4551         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4552                 tg3_nvram_lock(tp);
4553                 /* No matching tg3_nvram_unlock() after this because
4554                  * chip reset below will undo the nvram lock.
4555                  */
4556                 tp->nvram_lock_cnt = 0;
4557         }
4558
4559         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4560             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
4561             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4562                 tw32(GRC_FASTBOOT_PC, 0);
4563
4564         /*
4565          * We must avoid the readl() that normally takes place.
4566          * It locks machines, causes machine checks, and other
4567          * fun things.  So, temporarily disable the 5701
4568          * hardware workaround, while we do the reset.
4569          */
4570         write_op = tp->write32;
4571         if (write_op == tg3_write_flush_reg32)
4572                 tp->write32 = tg3_write32;
4573
4574         /* do the reset */
4575         val = GRC_MISC_CFG_CORECLK_RESET;
4576
4577         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4578                 if (tr32(0x7e2c) == 0x60) {
4579                         tw32(0x7e2c, 0x20);
4580                 }
4581                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4582                         tw32(GRC_MISC_CFG, (1 << 29));
4583                         val |= (1 << 29);
4584                 }
4585         }
4586
4587         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4588                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4589         tw32(GRC_MISC_CFG, val);
4590
4591         /* restore 5701 hardware bug workaround write method */
4592         tp->write32 = write_op;
4593
4594         /* Unfortunately, we have to delay before the PCI read back.
4595          * Some 575X chips even will not respond to a PCI cfg access
4596          * when the reset command is given to the chip.
4597          *
4598          * How do these hardware designers expect things to work
4599          * properly if the PCI write is posted for a long period
4600          * of time?  It is always necessary to have some method by
4601          * which a register read back can occur to push the write
4602          * out which does the reset.
4603          *
4604          * For most tg3 variants the trick below was working.
4605          * Ho hum...
4606          */
4607         udelay(120);
4608
4609         /* Flush PCI posted writes.  The normal MMIO registers
4610          * are inaccessible at this time so this is the only
4611          * way to make this reliably (actually, this is no longer
4612          * the case, see above).  I tried to use indirect
4613          * register read/write but this upset some 5701 variants.
4614          */
4615         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4616
4617         udelay(120);
4618
4619         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4620                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4621                         int i;
4622                         u32 cfg_val;
4623
4624                         /* Wait for link training to complete.  */
4625                         for (i = 0; i < 5000; i++)
4626                                 udelay(100);
4627
4628                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4629                         pci_write_config_dword(tp->pdev, 0xc4,
4630                                                cfg_val | (1 << 15));
4631                 }
4632                 /* Set PCIE max payload size and clear error status.  */
4633                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4634         }
4635
4636         /* Re-enable indirect register accesses. */
4637         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4638                                tp->misc_host_ctrl);
4639
4640         /* Set MAX PCI retry to zero. */
4641         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4642         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4643             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4644                 val |= PCISTATE_RETRY_SAME_DMA;
4645         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4646
4647         pci_restore_state(tp->pdev);
4648
4649         /* Make sure PCI-X relaxed ordering bit is clear. */
4650         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4651         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4652         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4653
4654         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4655                 u32 val;
4656
4657                 /* Chip reset on 5780 will reset MSI enable bit,
4658                  * so need to restore it.
4659                  */
4660                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4661                         u16 ctrl;
4662
4663                         pci_read_config_word(tp->pdev,
4664                                              tp->msi_cap + PCI_MSI_FLAGS,
4665                                              &ctrl);
4666                         pci_write_config_word(tp->pdev,
4667                                               tp->msi_cap + PCI_MSI_FLAGS,
4668                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4669                         val = tr32(MSGINT_MODE);
4670                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4671                 }
4672
4673                 val = tr32(MEMARB_MODE);
4674                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4675
4676         } else
4677                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4678
4679         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4680                 tg3_stop_fw(tp);
4681                 tw32(0x5000, 0x400);
4682         }
4683
4684         tw32(GRC_MODE, tp->grc_mode);
4685
4686         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4687                 u32 val = tr32(0xc4);
4688
4689                 tw32(0xc4, val | (1 << 15));
4690         }
4691
4692         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4693             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4694                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4695                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4696                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4697                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4698         }
4699
4700         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4701                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4702                 tw32_f(MAC_MODE, tp->mac_mode);
4703         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4704                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4705                 tw32_f(MAC_MODE, tp->mac_mode);
4706         } else
4707                 tw32_f(MAC_MODE, 0);
4708         udelay(40);
4709
4710         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4711                 /* Wait for firmware initialization to complete. */
4712                 for (i = 0; i < 100000; i++) {
4713                         tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4714                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4715                                 break;
4716                         udelay(10);
4717                 }
4718                 if (i >= 100000) {
4719                         printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4720                                "firmware will not restart magic=%08x\n",
4721                                tp->dev->name, val);
4722                         return -ENODEV;
4723                 }
4724         }
4725
4726         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4727             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4728                 u32 val = tr32(0x7c00);
4729
4730                 tw32(0x7c00, val | (1 << 25));
4731         }
4732
4733         /* Reprobe ASF enable state.  */
4734         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4735         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4736         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4737         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4738                 u32 nic_cfg;
4739
4740                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4741                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4742                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4743                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4744                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4745                 }
4746         }
4747
4748         return 0;
4749 }
4750
4751 /* tp->lock is held. */
4752 static void tg3_stop_fw(struct tg3 *tp)
4753 {
4754         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4755                 u32 val;
4756                 int i;
4757
4758                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4759                 val = tr32(GRC_RX_CPU_EVENT);
4760                 val |= (1 << 14);
4761                 tw32(GRC_RX_CPU_EVENT, val);
4762
4763                 /* Wait for RX cpu to ACK the event.  */
4764                 for (i = 0; i < 100; i++) {
4765                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4766                                 break;
4767                         udelay(1);
4768                 }
4769         }
4770 }
4771
4772 /* tp->lock is held. */
4773 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4774 {
4775         int err;
4776
4777         tg3_stop_fw(tp);
4778
4779         tg3_write_sig_pre_reset(tp, kind);
4780
4781         tg3_abort_hw(tp, silent);
4782         err = tg3_chip_reset(tp);
4783
4784         tg3_write_sig_legacy(tp, kind);
4785         tg3_write_sig_post_reset(tp, kind);
4786
4787         if (err)
4788                 return err;
4789
4790         return 0;
4791 }
4792
4793 #define TG3_FW_RELEASE_MAJOR    0x0
4794 #define TG3_FW_RELASE_MINOR     0x0
4795 #define TG3_FW_RELEASE_FIX      0x0
4796 #define TG3_FW_START_ADDR       0x08000000
4797 #define TG3_FW_TEXT_ADDR        0x08000000
4798 #define TG3_FW_TEXT_LEN         0x9c0
4799 #define TG3_FW_RODATA_ADDR      0x080009c0
4800 #define TG3_FW_RODATA_LEN       0x60
4801 #define TG3_FW_DATA_ADDR        0x08000a40
4802 #define TG3_FW_DATA_LEN         0x20
4803 #define TG3_FW_SBSS_ADDR        0x08000a60
4804 #define TG3_FW_SBSS_LEN         0xc
4805 #define TG3_FW_BSS_ADDR         0x08000a70
4806 #define TG3_FW_BSS_LEN          0x10
4807
4808 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4809         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4810         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4811         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4812         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4813         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4814         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4815         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4816         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4817         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4818         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4819         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4820         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4821         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4822         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4823         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4824         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4825         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4826         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4827         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4828         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4829         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4830         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4831         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4832         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4833         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4834         0, 0, 0, 0, 0, 0,
4835         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4836         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4837         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4838         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4839         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4840         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4841         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4842         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4843         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4844         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4845         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4846         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4847         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4848         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4849         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4850         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4851         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4852         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4853         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4854         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4855         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4856         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4857         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4858         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4859         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4860         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4861         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4862         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4863         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4864         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4865         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4866         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4867         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4868         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4869         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4870         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4871         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4872         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4873         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4874         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4875         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4876         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4877         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4878         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4879         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4880         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4881         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4882         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4883         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4884         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4885         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4886         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4887         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4888         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4889         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4890         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4891         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4892         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4893         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4894         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4895         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4896         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4897         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4898         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4899         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4900 };
4901
4902 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4903         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4904         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4905         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4906         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4907         0x00000000
4908 };
4909
4910 #if 0 /* All zeros, don't eat up space with it. */
4911 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4912         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4913         0x00000000, 0x00000000, 0x00000000, 0x00000000
4914 };
4915 #endif
4916
4917 #define RX_CPU_SCRATCH_BASE     0x30000
4918 #define RX_CPU_SCRATCH_SIZE     0x04000
4919 #define TX_CPU_SCRATCH_BASE     0x34000
4920 #define TX_CPU_SCRATCH_SIZE     0x04000
4921
4922 /* tp->lock is held. */
4923 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4924 {
4925         int i;
4926
4927         BUG_ON(offset == TX_CPU_BASE &&
4928             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
4929
4930         if (offset == RX_CPU_BASE) {
4931                 for (i = 0; i < 10000; i++) {
4932                         tw32(offset + CPU_STATE, 0xffffffff);
4933                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4934                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4935                                 break;
4936                 }
4937
4938                 tw32(offset + CPU_STATE, 0xffffffff);
4939                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4940                 udelay(10);
4941         } else {
4942                 for (i = 0; i < 10000; i++) {
4943                         tw32(offset + CPU_STATE, 0xffffffff);
4944                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4945                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4946                                 break;
4947                 }
4948         }
4949
4950         if (i >= 10000) {
4951                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4952                        "and %s CPU\n",
4953                        tp->dev->name,
4954                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4955                 return -ENODEV;
4956         }
4957
4958         /* Clear firmware's nvram arbitration. */
4959         if (tp->tg3_flags & TG3_FLAG_NVRAM)
4960                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
4961         return 0;
4962 }
4963
4964 struct fw_info {
4965         unsigned int text_base;
4966         unsigned int text_len;
4967         u32 *text_data;
4968         unsigned int rodata_base;
4969         unsigned int rodata_len;
4970         u32 *rodata_data;
4971         unsigned int data_base;
4972         unsigned int data_len;
4973         u32 *data_data;
4974 };
4975
4976 /* tp->lock is held. */
4977 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4978                                  int cpu_scratch_size, struct fw_info *info)
4979 {
4980         int err, lock_err, i;
4981         void (*write_op)(struct tg3 *, u32, u32);
4982
4983         if (cpu_base == TX_CPU_BASE &&
4984             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4985                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4986                        "TX cpu firmware on %s which is 5705.\n",
4987                        tp->dev->name);
4988                 return -EINVAL;
4989         }
4990
4991         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4992                 write_op = tg3_write_mem;
4993         else
4994                 write_op = tg3_write_indirect_reg32;
4995
4996         /* It is possible that bootcode is still loading at this point.
4997          * Get the nvram lock first before halting the cpu.
4998          */
4999         lock_err = tg3_nvram_lock(tp);
5000         err = tg3_halt_cpu(tp, cpu_base);
5001         if (!lock_err)
5002                 tg3_nvram_unlock(tp);
5003         if (err)
5004                 goto out;
5005
5006         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5007                 write_op(tp, cpu_scratch_base + i, 0);
5008         tw32(cpu_base + CPU_STATE, 0xffffffff);
5009         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5010         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5011                 write_op(tp, (cpu_scratch_base +
5012                               (info->text_base & 0xffff) +
5013                               (i * sizeof(u32))),
5014                          (info->text_data ?
5015                           info->text_data[i] : 0));
5016         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5017                 write_op(tp, (cpu_scratch_base +
5018                               (info->rodata_base & 0xffff) +
5019                               (i * sizeof(u32))),
5020                          (info->rodata_data ?
5021                           info->rodata_data[i] : 0));
5022         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5023                 write_op(tp, (cpu_scratch_base +
5024                               (info->data_base & 0xffff) +
5025                               (i * sizeof(u32))),
5026                          (info->data_data ?
5027                           info->data_data[i] : 0));
5028
5029         err = 0;
5030
5031 out:
5032         return err;
5033 }
5034
5035 /* tp->lock is held. */
5036 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5037 {
5038         struct fw_info info;
5039         int err, i;
5040
5041         info.text_base = TG3_FW_TEXT_ADDR;
5042         info.text_len = TG3_FW_TEXT_LEN;
5043         info.text_data = &tg3FwText[0];
5044         info.rodata_base = TG3_FW_RODATA_ADDR;
5045         info.rodata_len = TG3_FW_RODATA_LEN;
5046         info.rodata_data = &tg3FwRodata[0];
5047         info.data_base = TG3_FW_DATA_ADDR;
5048         info.data_len = TG3_FW_DATA_LEN;
5049         info.data_data = NULL;
5050
5051         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5052                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5053                                     &info);
5054         if (err)
5055                 return err;
5056
5057         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5058                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5059                                     &info);
5060         if (err)
5061                 return err;
5062
5063         /* Now startup only the RX cpu. */
5064         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5065         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5066
5067         for (i = 0; i < 5; i++) {
5068                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5069                         break;
5070                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5071                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5072                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5073                 udelay(1000);
5074         }
5075         if (i >= 5) {
5076                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5077                        "to set RX CPU PC, is %08x should be %08x\n",
5078                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5079                        TG3_FW_TEXT_ADDR);
5080                 return -ENODEV;
5081         }
5082         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5083         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5084
5085         return 0;
5086 }
5087
5088 #if TG3_TSO_SUPPORT != 0
5089
5090 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5091 #define TG3_TSO_FW_RELASE_MINOR         0x6
5092 #define TG3_TSO_FW_RELEASE_FIX          0x0
5093 #define TG3_TSO_FW_START_ADDR           0x08000000
5094 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5095 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5096 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5097 #define TG3_TSO_FW_RODATA_LEN           0x60
5098 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5099 #define TG3_TSO_FW_DATA_LEN             0x30
5100 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5101 #define TG3_TSO_FW_SBSS_LEN             0x2c
5102 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5103 #define TG3_TSO_FW_BSS_LEN              0x894
5104
5105 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5106         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5107         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5108         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5109         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5110         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5111         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5112         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5113         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5114         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5115         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5116         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5117         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5118         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5119         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5120         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5121         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5122         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5123         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5124         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5125         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5126         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5127         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5128         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5129         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5130         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5131         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5132         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5133         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5134         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5135         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5136         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5137         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5138         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5139         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5140         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5141         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5142         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5143         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5144         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5145         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5146         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5147         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5148         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5149         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5150         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5151         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5152         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5153         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5154         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5155         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5156         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5157         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5158         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5159         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5160         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5161         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5162         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5163         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5164         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5165         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5166         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5167         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5168         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5169         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5170         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5171         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5172         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5173         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5174         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5175         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5176         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5177         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5178         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5179         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5180         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5181         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5182         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5183         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5184         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5185         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5186         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5187         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5188         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5189         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5190         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5191         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5192         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5193         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5194         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5195         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5196         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5197         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5198         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5199         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5200         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5201         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5202         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5203         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5204         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5205         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5206         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5207         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5208         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5209         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5210         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5211         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5212         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5213         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5214         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5215         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5216         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5217         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5218         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5219         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5220         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5221         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5222         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5223         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5224         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5225         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5226         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5227         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5228         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5229         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5230         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5231         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5232         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5233         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5234         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5235         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5236         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5237         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5238         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5239         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5240         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5241         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5242         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5243         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5244         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5245         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5246         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5247         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5248         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5249         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5250         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5251         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5252         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5253         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5254         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5255         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5256         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5257         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5258         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5259         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5260         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5261         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5262         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5263         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5264         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5265         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5266         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5267         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5268         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5269         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5270         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5271         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5272         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5273         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5274         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5275         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5276         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5277         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5278         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5279         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5280         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5281         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5282         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5283         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5284         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5285         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5286         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5287         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5288         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5289         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5290         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5291         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5292         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5293         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5294         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5295         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5296         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5297         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5298         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5299         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5300         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5301         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5302         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5303         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5304         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5305         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5306         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5307         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5308         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5309         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5310         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5311         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5312         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5313         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5314         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5315         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5316         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5317         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5318         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5319         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5320         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5321         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5322         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5323         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5324         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5325         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5326         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5327         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5328         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5329         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5330         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5331         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5332         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5333         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5334         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5335         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5336         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5337         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5338         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5339         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5340         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5341         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5342         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5343         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5344         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5345         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5346         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5347         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5348         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5349         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5350         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5351         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5352         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5353         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5354         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5355         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5356         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5357         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5358         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5359         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5360         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5361         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5362         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5363         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5364         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5365         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5366         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5367         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5368         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5369         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5370         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5371         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5372         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5373         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5374         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5375         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5376         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5377         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5378         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5379         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5380         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5381         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5382         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5383         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5384         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5385         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5386         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5387         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5388         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5389         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5390 };
5391
5392 static u32 tg3TsoFwRodata[] = {
5393         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5394         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5395         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5396         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5397         0x00000000,
5398 };
5399
5400 static u32 tg3TsoFwData[] = {
5401         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5402         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5403         0x00000000,
5404 };
5405
5406 /* 5705 needs a special version of the TSO firmware.  */
5407 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5408 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5409 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5410 #define TG3_TSO5_FW_START_ADDR          0x00010000
5411 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5412 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5413 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5414 #define TG3_TSO5_FW_RODATA_LEN          0x50
5415 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5416 #define TG3_TSO5_FW_DATA_LEN            0x20
5417 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5418 #define TG3_TSO5_FW_SBSS_LEN            0x28
5419 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5420 #define TG3_TSO5_FW_BSS_LEN             0x88
5421
5422 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5423         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5424         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5425         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5426         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5427         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5428         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5429         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5430         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5431         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5432         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5433         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5434         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5435         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5436         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5437         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5438         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5439         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5440         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5441         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5442         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5443         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5444         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5445         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5446         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5447         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5448         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5449         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5450         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5451         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5452         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5453         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5454         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5455         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5456         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5457         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5458         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5459         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5460         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5461         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5462         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5463         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5464         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5465         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5466         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5467         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5468         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5469         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5470         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5471         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5472         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5473         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5474         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5475         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5476         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5477         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5478         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5479         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5480         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5481         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5482         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5483         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5484         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5485         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5486         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5487         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5488         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5489         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5490         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5491         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5492         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5493         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5494         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5495         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5496         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5497         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5498         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5499         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5500         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5501         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5502         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5503         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5504         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5505         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5506         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5507         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5508         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5509         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5510         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5511         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5512         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5513         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5514         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5515         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5516         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5517         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5518         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5519         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5520         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5521         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5522         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5523         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5524         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5525         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5526         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5527         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5528         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5529         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5530         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5531         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5532         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5533         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5534         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5535         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5536         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5537         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5538         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5539         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5540         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5541         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5542         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5543         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5544         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5545         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5546         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5547         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5548         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5549         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5550         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5551         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5552         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5553         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5554         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5555         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5556         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5557         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5558         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5559         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5560         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5561         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5562         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5563         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5564         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5565         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5566         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5567         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5568         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5569         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5570         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5571         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5572         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5573         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5574         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5575         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5576         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5577         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5578         0x00000000, 0x00000000, 0x00000000,
5579 };
5580
5581 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5582         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5583         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5584         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5585         0x00000000, 0x00000000, 0x00000000,
5586 };
5587
5588 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5589         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5590         0x00000000, 0x00000000, 0x00000000,
5591 };
5592
5593 /* tp->lock is held. */
5594 static int tg3_load_tso_firmware(struct tg3 *tp)
5595 {
5596         struct fw_info info;
5597         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5598         int err, i;
5599
5600         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5601                 return 0;
5602
5603         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5604                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5605                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5606                 info.text_data = &tg3Tso5FwText[0];
5607                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5608                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5609                 info.rodata_data = &tg3Tso5FwRodata[0];
5610                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5611                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5612                 info.data_data = &tg3Tso5FwData[0];
5613                 cpu_base = RX_CPU_BASE;
5614                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5615                 cpu_scratch_size = (info.text_len +
5616                                     info.rodata_len +
5617                                     info.data_len +
5618                                     TG3_TSO5_FW_SBSS_LEN +
5619                                     TG3_TSO5_FW_BSS_LEN);
5620         } else {
5621                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5622                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5623                 info.text_data = &tg3TsoFwText[0];
5624                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5625                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5626                 info.rodata_data = &tg3TsoFwRodata[0];
5627                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5628                 info.data_len = TG3_TSO_FW_DATA_LEN;
5629                 info.data_data = &tg3TsoFwData[0];
5630                 cpu_base = TX_CPU_BASE;
5631                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5632                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5633         }
5634
5635         err = tg3_load_firmware_cpu(tp, cpu_base,
5636                                     cpu_scratch_base, cpu_scratch_size,
5637                                     &info);
5638         if (err)
5639                 return err;
5640
5641         /* Now startup the cpu. */
5642         tw32(cpu_base + CPU_STATE, 0xffffffff);
5643         tw32_f(cpu_base + CPU_PC,    info.text_base);
5644
5645         for (i = 0; i < 5; i++) {
5646                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5647                         break;
5648                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5649                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5650                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5651                 udelay(1000);
5652         }
5653         if (i >= 5) {
5654                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5655                        "to set CPU PC, is %08x should be %08x\n",
5656                        tp->dev->name, tr32(cpu_base + CPU_PC),
5657                        info.text_base);
5658                 return -ENODEV;
5659         }
5660         tw32(cpu_base + CPU_STATE, 0xffffffff);
5661         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5662         return 0;
5663 }
5664
5665 #endif /* TG3_TSO_SUPPORT != 0 */
5666
5667 /* tp->lock is held. */
5668 static void __tg3_set_mac_addr(struct tg3 *tp)
5669 {
5670         u32 addr_high, addr_low;
5671         int i;
5672
5673         addr_high = ((tp->dev->dev_addr[0] << 8) |
5674                      tp->dev->dev_addr[1]);
5675         addr_low = ((tp->dev->dev_addr[2] << 24) |
5676                     (tp->dev->dev_addr[3] << 16) |
5677                     (tp->dev->dev_addr[4] <<  8) |
5678                     (tp->dev->dev_addr[5] <<  0));
5679         for (i = 0; i < 4; i++) {
5680                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5681                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5682         }
5683
5684         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5685             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5686                 for (i = 0; i < 12; i++) {
5687                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5688                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5689                 }
5690         }
5691
5692         addr_high = (tp->dev->dev_addr[0] +
5693                      tp->dev->dev_addr[1] +
5694                      tp->dev->dev_addr[2] +
5695                      tp->dev->dev_addr[3] +
5696                      tp->dev->dev_addr[4] +
5697                      tp->dev->dev_addr[5]) &
5698                 TX_BACKOFF_SEED_MASK;
5699         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5700 }
5701
5702 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5703 {
5704         struct tg3 *tp = netdev_priv(dev);
5705         struct sockaddr *addr = p;
5706
5707         if (!is_valid_ether_addr(addr->sa_data))
5708                 return -EINVAL;
5709
5710         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5711
5712         if (!netif_running(dev))
5713                 return 0;
5714
5715         spin_lock_bh(&tp->lock);
5716         __tg3_set_mac_addr(tp);
5717         spin_unlock_bh(&tp->lock);
5718
5719         return 0;
5720 }
5721
5722 /* tp->lock is held. */
5723 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5724                            dma_addr_t mapping, u32 maxlen_flags,
5725                            u32 nic_addr)
5726 {
5727         tg3_write_mem(tp,
5728                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5729                       ((u64) mapping >> 32));
5730         tg3_write_mem(tp,
5731                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5732                       ((u64) mapping & 0xffffffff));
5733         tg3_write_mem(tp,
5734                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5735                        maxlen_flags);
5736
5737         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5738                 tg3_write_mem(tp,
5739                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5740                               nic_addr);
5741 }
5742
5743 static void __tg3_set_rx_mode(struct net_device *);
5744 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5745 {
5746         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5747         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5748         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5749         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5750         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5751                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5752                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5753         }
5754         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5755         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5756         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5757                 u32 val = ec->stats_block_coalesce_usecs;
5758
5759                 if (!netif_carrier_ok(tp->dev))
5760                         val = 0;
5761
5762                 tw32(HOSTCC_STAT_COAL_TICKS, val);
5763         }
5764 }
5765
5766 /* tp->lock is held. */
5767 static int tg3_reset_hw(struct tg3 *tp)
5768 {
5769         u32 val, rdmac_mode;
5770         int i, err, limit;
5771
5772         tg3_disable_ints(tp);
5773
5774         tg3_stop_fw(tp);
5775
5776         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5777
5778         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5779                 tg3_abort_hw(tp, 1);
5780         }
5781
5782         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
5783                 tg3_phy_reset(tp);
5784
5785         err = tg3_chip_reset(tp);
5786         if (err)
5787                 return err;
5788
5789         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5790
5791         /* This works around an issue with Athlon chipsets on
5792          * B3 tigon3 silicon.  This bit has no effect on any
5793          * other revision.  But do not set this on PCI Express
5794          * chips.
5795          */
5796         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5797                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5798         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5799
5800         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5801             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5802                 val = tr32(TG3PCI_PCISTATE);
5803                 val |= PCISTATE_RETRY_SAME_DMA;
5804                 tw32(TG3PCI_PCISTATE, val);
5805         }
5806
5807         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5808                 /* Enable some hw fixes.  */
5809                 val = tr32(TG3PCI_MSI_DATA);
5810                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5811                 tw32(TG3PCI_MSI_DATA, val);
5812         }
5813
5814         /* Descriptor ring init may make accesses to the
5815          * NIC SRAM area to setup the TX descriptors, so we
5816          * can only do this after the hardware has been
5817          * successfully reset.
5818          */
5819         tg3_init_rings(tp);
5820
5821         /* This value is determined during the probe time DMA
5822          * engine test, tg3_test_dma.
5823          */
5824         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5825
5826         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5827                           GRC_MODE_4X_NIC_SEND_RINGS |
5828                           GRC_MODE_NO_TX_PHDR_CSUM |
5829                           GRC_MODE_NO_RX_PHDR_CSUM);
5830         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5831         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5832                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5833         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5834                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5835
5836         tw32(GRC_MODE,
5837              tp->grc_mode |
5838              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5839
5840         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5841         val = tr32(GRC_MISC_CFG);
5842         val &= ~0xff;
5843         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5844         tw32(GRC_MISC_CFG, val);
5845
5846         /* Initialize MBUF/DESC pool. */
5847         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5848                 /* Do nothing.  */
5849         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5850                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5851                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5852                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5853                 else
5854                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5855                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5856                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5857         }
5858 #if TG3_TSO_SUPPORT != 0
5859         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5860                 int fw_len;
5861
5862                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5863                           TG3_TSO5_FW_RODATA_LEN +
5864                           TG3_TSO5_FW_DATA_LEN +
5865                           TG3_TSO5_FW_SBSS_LEN +
5866                           TG3_TSO5_FW_BSS_LEN);
5867                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5868                 tw32(BUFMGR_MB_POOL_ADDR,
5869                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5870                 tw32(BUFMGR_MB_POOL_SIZE,
5871                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5872         }
5873 #endif
5874
5875         if (tp->dev->mtu <= ETH_DATA_LEN) {
5876                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5877                      tp->bufmgr_config.mbuf_read_dma_low_water);
5878                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5879                      tp->bufmgr_config.mbuf_mac_rx_low_water);
5880                 tw32(BUFMGR_MB_HIGH_WATER,
5881                      tp->bufmgr_config.mbuf_high_water);
5882         } else {
5883                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5884                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5885                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5886                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5887                 tw32(BUFMGR_MB_HIGH_WATER,
5888                      tp->bufmgr_config.mbuf_high_water_jumbo);
5889         }
5890         tw32(BUFMGR_DMA_LOW_WATER,
5891              tp->bufmgr_config.dma_low_water);
5892         tw32(BUFMGR_DMA_HIGH_WATER,
5893              tp->bufmgr_config.dma_high_water);
5894
5895         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5896         for (i = 0; i < 2000; i++) {
5897                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5898                         break;
5899                 udelay(10);
5900         }
5901         if (i >= 2000) {
5902                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5903                        tp->dev->name);
5904                 return -ENODEV;
5905         }
5906
5907         /* Setup replenish threshold. */
5908         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5909
5910         /* Initialize TG3_BDINFO's at:
5911          *  RCVDBDI_STD_BD:     standard eth size rx ring
5912          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5913          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5914          *
5915          * like so:
5916          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5917          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5918          *                              ring attribute flags
5919          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5920          *
5921          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5922          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5923          *
5924          * The size of each ring is fixed in the firmware, but the location is
5925          * configurable.
5926          */
5927         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5928              ((u64) tp->rx_std_mapping >> 32));
5929         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5930              ((u64) tp->rx_std_mapping & 0xffffffff));
5931         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5932              NIC_SRAM_RX_BUFFER_DESC);
5933
5934         /* Don't even try to program the JUMBO/MINI buffer descriptor
5935          * configs on 5705.
5936          */
5937         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5938                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5939                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5940         } else {
5941                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5942                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5943
5944                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5945                      BDINFO_FLAGS_DISABLED);
5946
5947                 /* Setup replenish threshold. */
5948                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5949
5950                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5951                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5952                              ((u64) tp->rx_jumbo_mapping >> 32));
5953                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5954                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5955                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5956                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5957                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5958                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5959                 } else {
5960                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5961                              BDINFO_FLAGS_DISABLED);
5962                 }
5963
5964         }
5965
5966         /* There is only one send ring on 5705/5750, no need to explicitly
5967          * disable the others.
5968          */
5969         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5970                 /* Clear out send RCB ring in SRAM. */
5971                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5972                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5973                                       BDINFO_FLAGS_DISABLED);
5974         }
5975
5976         tp->tx_prod = 0;
5977         tp->tx_cons = 0;
5978         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5979         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5980
5981         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5982                        tp->tx_desc_mapping,
5983                        (TG3_TX_RING_SIZE <<
5984                         BDINFO_FLAGS_MAXLEN_SHIFT),
5985                        NIC_SRAM_TX_BUFFER_DESC);
5986
5987         /* There is only one receive return ring on 5705/5750, no need
5988          * to explicitly disable the others.
5989          */
5990         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5991                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5992                      i += TG3_BDINFO_SIZE) {
5993                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5994                                       BDINFO_FLAGS_DISABLED);
5995                 }
5996         }
5997
5998         tp->rx_rcb_ptr = 0;
5999         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6000
6001         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6002                        tp->rx_rcb_mapping,
6003                        (TG3_RX_RCB_RING_SIZE(tp) <<
6004                         BDINFO_FLAGS_MAXLEN_SHIFT),
6005                        0);
6006
6007         tp->rx_std_ptr = tp->rx_pending;
6008         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6009                      tp->rx_std_ptr);
6010
6011         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6012                                                 tp->rx_jumbo_pending : 0;
6013         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6014                      tp->rx_jumbo_ptr);
6015
6016         /* Initialize MAC address and backoff seed. */
6017         __tg3_set_mac_addr(tp);
6018
6019         /* MTU + ethernet header + FCS + optional VLAN tag */
6020         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6021
6022         /* The slot time is changed by tg3_setup_phy if we
6023          * run at gigabit with half duplex.
6024          */
6025         tw32(MAC_TX_LENGTHS,
6026              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6027              (6 << TX_LENGTHS_IPG_SHIFT) |
6028              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6029
6030         /* Receive rules. */
6031         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6032         tw32(RCVLPC_CONFIG, 0x0181);
6033
6034         /* Calculate RDMAC_MODE setting early, we need it to determine
6035          * the RCVLPC_STATE_ENABLE mask.
6036          */
6037         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6038                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6039                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6040                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6041                       RDMAC_MODE_LNGREAD_ENAB);
6042         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6043                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
6044
6045         /* If statement applies to 5705 and 5750 PCI devices only */
6046         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6047              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6048             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6049                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6050                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6051                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6052                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6053                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6054                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6055                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6056                 }
6057         }
6058
6059         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6060                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6061
6062 #if TG3_TSO_SUPPORT != 0
6063         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6064                 rdmac_mode |= (1 << 27);
6065 #endif
6066
6067         /* Receive/send statistics. */
6068         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6069             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6070                 val = tr32(RCVLPC_STATS_ENABLE);
6071                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6072                 tw32(RCVLPC_STATS_ENABLE, val);
6073         } else {
6074                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6075         }
6076         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6077         tw32(SNDDATAI_STATSENAB, 0xffffff);
6078         tw32(SNDDATAI_STATSCTRL,
6079              (SNDDATAI_SCTRL_ENABLE |
6080               SNDDATAI_SCTRL_FASTUPD));
6081
6082         /* Setup host coalescing engine. */
6083         tw32(HOSTCC_MODE, 0);
6084         for (i = 0; i < 2000; i++) {
6085                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6086                         break;
6087                 udelay(10);
6088         }
6089
6090         __tg3_set_coalesce(tp, &tp->coal);
6091
6092         /* set status block DMA address */
6093         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6094              ((u64) tp->status_mapping >> 32));
6095         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6096              ((u64) tp->status_mapping & 0xffffffff));
6097
6098         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6099                 /* Status/statistics block address.  See tg3_timer,
6100                  * the tg3_periodic_fetch_stats call there, and
6101                  * tg3_get_stats to see how this works for 5705/5750 chips.
6102                  */
6103                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6104                      ((u64) tp->stats_mapping >> 32));
6105                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6106                      ((u64) tp->stats_mapping & 0xffffffff));
6107                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6108                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6109         }
6110
6111         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6112
6113         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6114         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6115         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6116                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6117
6118         /* Clear statistics/status block in chip, and status block in ram. */
6119         for (i = NIC_SRAM_STATS_BLK;
6120              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6121              i += sizeof(u32)) {
6122                 tg3_write_mem(tp, i, 0);
6123                 udelay(40);
6124         }
6125         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6126
6127         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6128                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6129                 /* reset to prevent losing 1st rx packet intermittently */
6130                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6131                 udelay(10);
6132         }
6133
6134         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6135                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6136         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6137         udelay(40);
6138
6139         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6140          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
6141          * register to preserve the GPIO settings for LOMs. The GPIOs,
6142          * whether used as inputs or outputs, are set by boot code after
6143          * reset.
6144          */
6145         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
6146                 u32 gpio_mask;
6147
6148                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
6149                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
6150
6151                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6152                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6153                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6154
6155                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6156                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6157
6158                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6159
6160                 /* GPIO1 must be driven high for eeprom write protect */
6161                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6162                                        GRC_LCLCTRL_GPIO_OUTPUT1);
6163         }
6164         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6165         udelay(100);
6166
6167         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6168         tp->last_tag = 0;
6169
6170         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6171                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6172                 udelay(40);
6173         }
6174
6175         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6176                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6177                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6178                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6179                WDMAC_MODE_LNGREAD_ENAB);
6180
6181         /* If statement applies to 5705 and 5750 PCI devices only */
6182         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6183              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6184             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6185                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6186                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6187                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6188                         /* nothing */
6189                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6190                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6191                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6192                         val |= WDMAC_MODE_RX_ACCEL;
6193                 }
6194         }
6195
6196         /* Enable host coalescing bug fix */
6197         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6198             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
6199                 val |= (1 << 29);
6200
6201         tw32_f(WDMAC_MODE, val);
6202         udelay(40);
6203
6204         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6205                 val = tr32(TG3PCI_X_CAPS);
6206                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6207                         val &= ~PCIX_CAPS_BURST_MASK;
6208                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6209                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6210                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6211                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6212                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6213                                 val |= (tp->split_mode_max_reqs <<
6214                                         PCIX_CAPS_SPLIT_SHIFT);
6215                 }
6216                 tw32(TG3PCI_X_CAPS, val);
6217         }
6218
6219         tw32_f(RDMAC_MODE, rdmac_mode);
6220         udelay(40);
6221
6222         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6223         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6224                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6225         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6226         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6227         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6228         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6229         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6230 #if TG3_TSO_SUPPORT != 0
6231         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6232                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6233 #endif
6234         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6235         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6236
6237         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6238                 err = tg3_load_5701_a0_firmware_fix(tp);
6239                 if (err)
6240                         return err;
6241         }
6242
6243 #if TG3_TSO_SUPPORT != 0
6244         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6245                 err = tg3_load_tso_firmware(tp);
6246                 if (err)
6247                         return err;
6248         }
6249 #endif
6250
6251         tp->tx_mode = TX_MODE_ENABLE;
6252         tw32_f(MAC_TX_MODE, tp->tx_mode);
6253         udelay(100);
6254
6255         tp->rx_mode = RX_MODE_ENABLE;
6256         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6257                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6258
6259         tw32_f(MAC_RX_MODE, tp->rx_mode);
6260         udelay(10);
6261
6262         if (tp->link_config.phy_is_low_power) {
6263                 tp->link_config.phy_is_low_power = 0;
6264                 tp->link_config.speed = tp->link_config.orig_speed;
6265                 tp->link_config.duplex = tp->link_config.orig_duplex;
6266                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6267         }
6268
6269         tp->mi_mode = MAC_MI_MODE_BASE;
6270         tw32_f(MAC_MI_MODE, tp->mi_mode);
6271         udelay(80);
6272
6273         tw32(MAC_LED_CTRL, tp->led_ctrl);
6274
6275         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6276         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6277                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6278                 udelay(10);
6279         }
6280         tw32_f(MAC_RX_MODE, tp->rx_mode);
6281         udelay(10);
6282
6283         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6284                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6285                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6286                         /* Set drive transmission level to 1.2V  */
6287                         /* only if the signal pre-emphasis bit is not set  */
6288                         val = tr32(MAC_SERDES_CFG);
6289                         val &= 0xfffff000;
6290                         val |= 0x880;
6291                         tw32(MAC_SERDES_CFG, val);
6292                 }
6293                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6294                         tw32(MAC_SERDES_CFG, 0x616000);
6295         }
6296
6297         /* Prevent chip from dropping frames when flow control
6298          * is enabled.
6299          */
6300         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6301
6302         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6303             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6304                 /* Use hardware link auto-negotiation */
6305                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6306         }
6307
6308         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6309             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6310                 u32 tmp;
6311
6312                 tmp = tr32(SERDES_RX_CTRL);
6313                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6314                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6315                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6316                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6317         }
6318
6319         err = tg3_setup_phy(tp, 1);
6320         if (err)
6321                 return err;
6322
6323         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6324                 u32 tmp;
6325
6326                 /* Clear CRC stats. */
6327                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6328                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
6329                         tg3_readphy(tp, 0x14, &tmp);
6330                 }
6331         }
6332
6333         __tg3_set_rx_mode(tp->dev);
6334
6335         /* Initialize receive rules. */
6336         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6337         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6338         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6339         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6340
6341         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6342             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6343                 limit = 8;
6344         else
6345                 limit = 16;
6346         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6347                 limit -= 4;
6348         switch (limit) {
6349         case 16:
6350                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6351         case 15:
6352                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6353         case 14:
6354                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6355         case 13:
6356                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6357         case 12:
6358                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6359         case 11:
6360                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6361         case 10:
6362                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6363         case 9:
6364                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6365         case 8:
6366                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6367         case 7:
6368                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6369         case 6:
6370                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6371         case 5:
6372                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6373         case 4:
6374                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6375         case 3:
6376                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6377         case 2:
6378         case 1:
6379
6380         default:
6381                 break;
6382         };
6383
6384         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6385
6386         return 0;
6387 }
6388
6389 /* Called at device open time to get the chip ready for
6390  * packet processing.  Invoked with tp->lock held.
6391  */
6392 static int tg3_init_hw(struct tg3 *tp)
6393 {
6394         int err;
6395
6396         /* Force the chip into D0. */
6397         err = tg3_set_power_state(tp, PCI_D0);
6398         if (err)
6399                 goto out;
6400
6401         tg3_switch_clocks(tp);
6402
6403         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6404
6405         err = tg3_reset_hw(tp);
6406
6407 out:
6408         return err;
6409 }
6410
6411 #define TG3_STAT_ADD32(PSTAT, REG) \
6412 do {    u32 __val = tr32(REG); \
6413         (PSTAT)->low += __val; \
6414         if ((PSTAT)->low < __val) \
6415                 (PSTAT)->high += 1; \
6416 } while (0)
6417
6418 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6419 {
6420         struct tg3_hw_stats *sp = tp->hw_stats;
6421
6422         if (!netif_carrier_ok(tp->dev))
6423                 return;
6424
6425         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6426         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6427         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6428         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6429         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6430         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6431         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6432         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6433         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6434         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6435         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6436         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6437         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6438
6439         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6440         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6441         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6442         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6443         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6444         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6445         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6446         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6447         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6448         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6449         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6450         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6451         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6452         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6453 }
6454
6455 static void tg3_timer(unsigned long __opaque)
6456 {
6457         struct tg3 *tp = (struct tg3 *) __opaque;
6458
6459         if (tp->irq_sync)
6460                 goto restart_timer;
6461
6462         spin_lock(&tp->lock);
6463
6464         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6465                 /* All of this garbage is because when using non-tagged
6466                  * IRQ status the mailbox/status_block protocol the chip
6467                  * uses with the cpu is race prone.
6468                  */
6469                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6470                         tw32(GRC_LOCAL_CTRL,
6471                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6472                 } else {
6473                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6474                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6475                 }
6476
6477                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6478                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6479                         spin_unlock(&tp->lock);
6480                         schedule_work(&tp->reset_task);
6481                         return;
6482                 }
6483         }
6484
6485         /* This part only runs once per second. */
6486         if (!--tp->timer_counter) {
6487                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6488                         tg3_periodic_fetch_stats(tp);
6489
6490                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6491                         u32 mac_stat;
6492                         int phy_event;
6493
6494                         mac_stat = tr32(MAC_STATUS);
6495
6496                         phy_event = 0;
6497                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6498                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6499                                         phy_event = 1;
6500                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6501                                 phy_event = 1;
6502
6503                         if (phy_event)
6504                                 tg3_setup_phy(tp, 0);
6505                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6506                         u32 mac_stat = tr32(MAC_STATUS);
6507                         int need_setup = 0;
6508
6509                         if (netif_carrier_ok(tp->dev) &&
6510                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6511                                 need_setup = 1;
6512                         }
6513                         if (! netif_carrier_ok(tp->dev) &&
6514                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6515                                          MAC_STATUS_SIGNAL_DET))) {
6516                                 need_setup = 1;
6517                         }
6518                         if (need_setup) {
6519                                 tw32_f(MAC_MODE,
6520                                      (tp->mac_mode &
6521                                       ~MAC_MODE_PORT_MODE_MASK));
6522                                 udelay(40);
6523                                 tw32_f(MAC_MODE, tp->mac_mode);
6524                                 udelay(40);
6525                                 tg3_setup_phy(tp, 0);
6526                         }
6527                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6528                         tg3_serdes_parallel_detect(tp);
6529
6530                 tp->timer_counter = tp->timer_multiplier;
6531         }
6532
6533         /* Heartbeat is only sent once every 2 seconds.  */
6534         if (!--tp->asf_counter) {
6535                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6536                         u32 val;
6537
6538                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_MBOX,
6539                                            FWCMD_NICDRV_ALIVE2);
6540                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6541                         /* 5 seconds timeout */
6542                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6543                         val = tr32(GRC_RX_CPU_EVENT);
6544                         val |= (1 << 14);
6545                         tw32(GRC_RX_CPU_EVENT, val);
6546                 }
6547                 tp->asf_counter = tp->asf_multiplier;
6548         }
6549
6550         spin_unlock(&tp->lock);
6551
6552 restart_timer:
6553         tp->timer.expires = jiffies + tp->timer_offset;
6554         add_timer(&tp->timer);
6555 }
6556
6557 static int tg3_request_irq(struct tg3 *tp)
6558 {
6559         irqreturn_t (*fn)(int, void *, struct pt_regs *);
6560         unsigned long flags;
6561         struct net_device *dev = tp->dev;
6562
6563         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6564                 fn = tg3_msi;
6565                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6566                         fn = tg3_msi_1shot;
6567                 flags = SA_SAMPLE_RANDOM;
6568         } else {
6569                 fn = tg3_interrupt;
6570                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6571                         fn = tg3_interrupt_tagged;
6572                 flags = SA_SHIRQ | SA_SAMPLE_RANDOM;
6573         }
6574         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6575 }
6576
6577 static int tg3_test_interrupt(struct tg3 *tp)
6578 {
6579         struct net_device *dev = tp->dev;
6580         int err, i;
6581         u32 int_mbox = 0;
6582
6583         if (!netif_running(dev))
6584                 return -ENODEV;
6585
6586         tg3_disable_ints(tp);
6587
6588         free_irq(tp->pdev->irq, dev);
6589
6590         err = request_irq(tp->pdev->irq, tg3_test_isr,
6591                           SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6592         if (err)
6593                 return err;
6594
6595         tp->hw_status->status &= ~SD_STATUS_UPDATED;
6596         tg3_enable_ints(tp);
6597
6598         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6599                HOSTCC_MODE_NOW);
6600
6601         for (i = 0; i < 5; i++) {
6602                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6603                                         TG3_64BIT_REG_LOW);
6604                 if (int_mbox != 0)
6605                         break;
6606                 msleep(10);
6607         }
6608
6609         tg3_disable_ints(tp);
6610
6611         free_irq(tp->pdev->irq, dev);
6612         
6613         err = tg3_request_irq(tp);
6614
6615         if (err)
6616                 return err;
6617
6618         if (int_mbox != 0)
6619                 return 0;
6620
6621         return -EIO;
6622 }
6623
6624 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6625  * successfully restored
6626  */
6627 static int tg3_test_msi(struct tg3 *tp)
6628 {
6629         struct net_device *dev = tp->dev;
6630         int err;
6631         u16 pci_cmd;
6632
6633         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6634                 return 0;
6635
6636         /* Turn off SERR reporting in case MSI terminates with Master
6637          * Abort.
6638          */
6639         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6640         pci_write_config_word(tp->pdev, PCI_COMMAND,
6641                               pci_cmd & ~PCI_COMMAND_SERR);
6642
6643         err = tg3_test_interrupt(tp);
6644
6645         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6646
6647         if (!err)
6648                 return 0;
6649
6650         /* other failures */
6651         if (err != -EIO)
6652                 return err;
6653
6654         /* MSI test failed, go back to INTx mode */
6655         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6656                "switching to INTx mode. Please report this failure to "
6657                "the PCI maintainer and include system chipset information.\n",
6658                        tp->dev->name);
6659
6660         free_irq(tp->pdev->irq, dev);
6661         pci_disable_msi(tp->pdev);
6662
6663         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6664
6665         err = tg3_request_irq(tp);
6666         if (err)
6667                 return err;
6668
6669         /* Need to reset the chip because the MSI cycle may have terminated
6670          * with Master Abort.
6671          */
6672         tg3_full_lock(tp, 1);
6673
6674         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6675         err = tg3_init_hw(tp);
6676
6677         tg3_full_unlock(tp);
6678
6679         if (err)
6680                 free_irq(tp->pdev->irq, dev);
6681
6682         return err;
6683 }
6684
6685 static int tg3_open(struct net_device *dev)
6686 {
6687         struct tg3 *tp = netdev_priv(dev);
6688         int err;
6689
6690         tg3_full_lock(tp, 0);
6691
6692         err = tg3_set_power_state(tp, PCI_D0);
6693         if (err)
6694                 return err;
6695
6696         tg3_disable_ints(tp);
6697         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6698
6699         tg3_full_unlock(tp);
6700
6701         /* The placement of this call is tied
6702          * to the setup and use of Host TX descriptors.
6703          */
6704         err = tg3_alloc_consistent(tp);
6705         if (err)
6706                 return err;
6707
6708         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6709             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6710             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
6711             !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
6712               (tp->pdev_peer == tp->pdev))) {
6713                 /* All MSI supporting chips should support tagged
6714                  * status.  Assert that this is the case.
6715                  */
6716                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6717                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6718                                "Not using MSI.\n", tp->dev->name);
6719                 } else if (pci_enable_msi(tp->pdev) == 0) {
6720                         u32 msi_mode;
6721
6722                         msi_mode = tr32(MSGINT_MODE);
6723                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6724                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6725                 }
6726         }
6727         err = tg3_request_irq(tp);
6728
6729         if (err) {
6730                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6731                         pci_disable_msi(tp->pdev);
6732                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6733                 }
6734                 tg3_free_consistent(tp);
6735                 return err;
6736         }
6737
6738         tg3_full_lock(tp, 0);
6739
6740         err = tg3_init_hw(tp);
6741         if (err) {
6742                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6743                 tg3_free_rings(tp);
6744         } else {
6745                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6746                         tp->timer_offset = HZ;
6747                 else
6748                         tp->timer_offset = HZ / 10;
6749
6750                 BUG_ON(tp->timer_offset > HZ);
6751                 tp->timer_counter = tp->timer_multiplier =
6752                         (HZ / tp->timer_offset);
6753                 tp->asf_counter = tp->asf_multiplier =
6754                         ((HZ / tp->timer_offset) * 2);
6755
6756                 init_timer(&tp->timer);
6757                 tp->timer.expires = jiffies + tp->timer_offset;
6758                 tp->timer.data = (unsigned long) tp;
6759                 tp->timer.function = tg3_timer;
6760         }
6761
6762         tg3_full_unlock(tp);
6763
6764         if (err) {
6765                 free_irq(tp->pdev->irq, dev);
6766                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6767                         pci_disable_msi(tp->pdev);
6768                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6769                 }
6770                 tg3_free_consistent(tp);
6771                 return err;
6772         }
6773
6774         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6775                 err = tg3_test_msi(tp);
6776
6777                 if (err) {
6778                         tg3_full_lock(tp, 0);
6779
6780                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6781                                 pci_disable_msi(tp->pdev);
6782                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6783                         }
6784                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6785                         tg3_free_rings(tp);
6786                         tg3_free_consistent(tp);
6787
6788                         tg3_full_unlock(tp);
6789
6790                         return err;
6791                 }
6792
6793                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6794                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
6795                                 u32 val = tr32(0x7c04);
6796
6797                                 tw32(0x7c04, val | (1 << 29));
6798                         }
6799                 }
6800         }
6801
6802         tg3_full_lock(tp, 0);
6803
6804         add_timer(&tp->timer);
6805         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6806         tg3_enable_ints(tp);
6807
6808         tg3_full_unlock(tp);
6809
6810         netif_start_queue(dev);
6811
6812         return 0;
6813 }
6814
6815 #if 0
6816 /*static*/ void tg3_dump_state(struct tg3 *tp)
6817 {
6818         u32 val32, val32_2, val32_3, val32_4, val32_5;
6819         u16 val16;
6820         int i;
6821
6822         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6823         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6824         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6825                val16, val32);
6826
6827         /* MAC block */
6828         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6829                tr32(MAC_MODE), tr32(MAC_STATUS));
6830         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6831                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6832         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6833                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6834         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6835                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6836
6837         /* Send data initiator control block */
6838         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6839                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6840         printk("       SNDDATAI_STATSCTRL[%08x]\n",
6841                tr32(SNDDATAI_STATSCTRL));
6842
6843         /* Send data completion control block */
6844         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6845
6846         /* Send BD ring selector block */
6847         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6848                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6849
6850         /* Send BD initiator control block */
6851         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6852                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6853
6854         /* Send BD completion control block */
6855         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6856
6857         /* Receive list placement control block */
6858         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6859                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6860         printk("       RCVLPC_STATSCTRL[%08x]\n",
6861                tr32(RCVLPC_STATSCTRL));
6862
6863         /* Receive data and receive BD initiator control block */
6864         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6865                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6866
6867         /* Receive data completion control block */
6868         printk("DEBUG: RCVDCC_MODE[%08x]\n",
6869                tr32(RCVDCC_MODE));
6870
6871         /* Receive BD initiator control block */
6872         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6873                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6874
6875         /* Receive BD completion control block */
6876         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6877                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6878
6879         /* Receive list selector control block */
6880         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6881                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6882
6883         /* Mbuf cluster free block */
6884         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6885                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6886
6887         /* Host coalescing control block */
6888         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6889                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6890         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6891                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6892                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6893         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6894                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6895                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6896         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6897                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6898         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6899                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6900
6901         /* Memory arbiter control block */
6902         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6903                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6904
6905         /* Buffer manager control block */
6906         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6907                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6908         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6909                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6910         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6911                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6912                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6913                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6914
6915         /* Read DMA control block */
6916         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6917                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6918
6919         /* Write DMA control block */
6920         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6921                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6922
6923         /* DMA completion block */
6924         printk("DEBUG: DMAC_MODE[%08x]\n",
6925                tr32(DMAC_MODE));
6926
6927         /* GRC block */
6928         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6929                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6930         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6931                tr32(GRC_LOCAL_CTRL));
6932
6933         /* TG3_BDINFOs */
6934         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6935                tr32(RCVDBDI_JUMBO_BD + 0x0),
6936                tr32(RCVDBDI_JUMBO_BD + 0x4),
6937                tr32(RCVDBDI_JUMBO_BD + 0x8),
6938                tr32(RCVDBDI_JUMBO_BD + 0xc));
6939         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6940                tr32(RCVDBDI_STD_BD + 0x0),
6941                tr32(RCVDBDI_STD_BD + 0x4),
6942                tr32(RCVDBDI_STD_BD + 0x8),
6943                tr32(RCVDBDI_STD_BD + 0xc));
6944         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6945                tr32(RCVDBDI_MINI_BD + 0x0),
6946                tr32(RCVDBDI_MINI_BD + 0x4),
6947                tr32(RCVDBDI_MINI_BD + 0x8),
6948                tr32(RCVDBDI_MINI_BD + 0xc));
6949
6950         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6951         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6952         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6953         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6954         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6955                val32, val32_2, val32_3, val32_4);
6956
6957         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6958         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6959         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6960         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6961         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6962                val32, val32_2, val32_3, val32_4);
6963
6964         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6965         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6966         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6967         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6968         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6969         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6970                val32, val32_2, val32_3, val32_4, val32_5);
6971
6972         /* SW status block */
6973         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6974                tp->hw_status->status,
6975                tp->hw_status->status_tag,
6976                tp->hw_status->rx_jumbo_consumer,
6977                tp->hw_status->rx_consumer,
6978                tp->hw_status->rx_mini_consumer,
6979                tp->hw_status->idx[0].rx_producer,
6980                tp->hw_status->idx[0].tx_consumer);
6981
6982         /* SW statistics block */
6983         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6984                ((u32 *)tp->hw_stats)[0],
6985                ((u32 *)tp->hw_stats)[1],
6986                ((u32 *)tp->hw_stats)[2],
6987                ((u32 *)tp->hw_stats)[3]);
6988
6989         /* Mailboxes */
6990         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
6991                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6992                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
6993                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
6994                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
6995
6996         /* NIC side send descriptors. */
6997         for (i = 0; i < 6; i++) {
6998                 unsigned long txd;
6999
7000                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7001                         + (i * sizeof(struct tg3_tx_buffer_desc));
7002                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7003                        i,
7004                        readl(txd + 0x0), readl(txd + 0x4),
7005                        readl(txd + 0x8), readl(txd + 0xc));
7006         }
7007
7008         /* NIC side RX descriptors. */
7009         for (i = 0; i < 6; i++) {
7010                 unsigned long rxd;
7011
7012                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7013                         + (i * sizeof(struct tg3_rx_buffer_desc));
7014                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7015                        i,
7016                        readl(rxd + 0x0), readl(rxd + 0x4),
7017                        readl(rxd + 0x8), readl(rxd + 0xc));
7018                 rxd += (4 * sizeof(u32));
7019                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7020                        i,
7021                        readl(rxd + 0x0), readl(rxd + 0x4),
7022                        readl(rxd + 0x8), readl(rxd + 0xc));
7023         }
7024
7025         for (i = 0; i < 6; i++) {
7026                 unsigned long rxd;
7027
7028                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7029                         + (i * sizeof(struct tg3_rx_buffer_desc));
7030                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7031                        i,
7032                        readl(rxd + 0x0), readl(rxd + 0x4),
7033                        readl(rxd + 0x8), readl(rxd + 0xc));
7034                 rxd += (4 * sizeof(u32));
7035                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7036                        i,
7037                        readl(rxd + 0x0), readl(rxd + 0x4),
7038                        readl(rxd + 0x8), readl(rxd + 0xc));
7039         }
7040 }
7041 #endif
7042
7043 static struct net_device_stats *tg3_get_stats(struct net_device *);
7044 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7045
7046 static int tg3_close(struct net_device *dev)
7047 {
7048         struct tg3 *tp = netdev_priv(dev);
7049
7050         /* Calling flush_scheduled_work() may deadlock because
7051          * linkwatch_event() may be on the workqueue and it will try to get
7052          * the rtnl_lock which we are holding.
7053          */
7054         while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
7055                 msleep(1);
7056
7057         netif_stop_queue(dev);
7058
7059         del_timer_sync(&tp->timer);
7060
7061         tg3_full_lock(tp, 1);
7062 #if 0
7063         tg3_dump_state(tp);
7064 #endif
7065
7066         tg3_disable_ints(tp);
7067
7068         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7069         tg3_free_rings(tp);
7070         tp->tg3_flags &=
7071                 ~(TG3_FLAG_INIT_COMPLETE |
7072                   TG3_FLAG_GOT_SERDES_FLOWCTL);
7073
7074         tg3_full_unlock(tp);
7075
7076         free_irq(tp->pdev->irq, dev);
7077         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7078                 pci_disable_msi(tp->pdev);
7079                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7080         }
7081
7082         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7083                sizeof(tp->net_stats_prev));
7084         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7085                sizeof(tp->estats_prev));
7086
7087         tg3_free_consistent(tp);
7088
7089         tg3_set_power_state(tp, PCI_D3hot);
7090
7091         netif_carrier_off(tp->dev);
7092
7093         return 0;
7094 }
7095
7096 static inline unsigned long get_stat64(tg3_stat64_t *val)
7097 {
7098         unsigned long ret;
7099
7100 #if (BITS_PER_LONG == 32)
7101         ret = val->low;
7102 #else
7103         ret = ((u64)val->high << 32) | ((u64)val->low);
7104 #endif
7105         return ret;
7106 }
7107
7108 static unsigned long calc_crc_errors(struct tg3 *tp)
7109 {
7110         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7111
7112         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7113             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7114              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7115                 u32 val;
7116
7117                 spin_lock_bh(&tp->lock);
7118                 if (!tg3_readphy(tp, 0x1e, &val)) {
7119                         tg3_writephy(tp, 0x1e, val | 0x8000);
7120                         tg3_readphy(tp, 0x14, &val);
7121                 } else
7122                         val = 0;
7123                 spin_unlock_bh(&tp->lock);
7124
7125                 tp->phy_crc_errors += val;
7126
7127                 return tp->phy_crc_errors;
7128         }
7129
7130         return get_stat64(&hw_stats->rx_fcs_errors);
7131 }
7132
7133 #define ESTAT_ADD(member) \
7134         estats->member =        old_estats->member + \
7135                                 get_stat64(&hw_stats->member)
7136
7137 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7138 {
7139         struct tg3_ethtool_stats *estats = &tp->estats;
7140         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7141         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7142
7143         if (!hw_stats)
7144                 return old_estats;
7145
7146         ESTAT_ADD(rx_octets);
7147         ESTAT_ADD(rx_fragments);
7148         ESTAT_ADD(rx_ucast_packets);
7149         ESTAT_ADD(rx_mcast_packets);
7150         ESTAT_ADD(rx_bcast_packets);
7151         ESTAT_ADD(rx_fcs_errors);
7152         ESTAT_ADD(rx_align_errors);
7153         ESTAT_ADD(rx_xon_pause_rcvd);
7154         ESTAT_ADD(rx_xoff_pause_rcvd);
7155         ESTAT_ADD(rx_mac_ctrl_rcvd);
7156         ESTAT_ADD(rx_xoff_entered);
7157         ESTAT_ADD(rx_frame_too_long_errors);
7158         ESTAT_ADD(rx_jabbers);
7159         ESTAT_ADD(rx_undersize_packets);
7160         ESTAT_ADD(rx_in_length_errors);
7161         ESTAT_ADD(rx_out_length_errors);
7162         ESTAT_ADD(rx_64_or_less_octet_packets);
7163         ESTAT_ADD(rx_65_to_127_octet_packets);
7164         ESTAT_ADD(rx_128_to_255_octet_packets);
7165         ESTAT_ADD(rx_256_to_511_octet_packets);
7166         ESTAT_ADD(rx_512_to_1023_octet_packets);
7167         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7168         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7169         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7170         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7171         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7172
7173         ESTAT_ADD(tx_octets);
7174         ESTAT_ADD(tx_collisions);
7175         ESTAT_ADD(tx_xon_sent);
7176         ESTAT_ADD(tx_xoff_sent);
7177         ESTAT_ADD(tx_flow_control);
7178         ESTAT_ADD(tx_mac_errors);
7179         ESTAT_ADD(tx_single_collisions);
7180         ESTAT_ADD(tx_mult_collisions);
7181         ESTAT_ADD(tx_deferred);
7182         ESTAT_ADD(tx_excessive_collisions);
7183         ESTAT_ADD(tx_late_collisions);
7184         ESTAT_ADD(tx_collide_2times);
7185         ESTAT_ADD(tx_collide_3times);
7186         ESTAT_ADD(tx_collide_4times);
7187         ESTAT_ADD(tx_collide_5times);
7188         ESTAT_ADD(tx_collide_6times);
7189         ESTAT_ADD(tx_collide_7times);
7190         ESTAT_ADD(tx_collide_8times);
7191         ESTAT_ADD(tx_collide_9times);
7192         ESTAT_ADD(tx_collide_10times);
7193         ESTAT_ADD(tx_collide_11times);
7194         ESTAT_ADD(tx_collide_12times);
7195         ESTAT_ADD(tx_collide_13times);
7196         ESTAT_ADD(tx_collide_14times);
7197         ESTAT_ADD(tx_collide_15times);
7198         ESTAT_ADD(tx_ucast_packets);
7199         ESTAT_ADD(tx_mcast_packets);
7200         ESTAT_ADD(tx_bcast_packets);
7201         ESTAT_ADD(tx_carrier_sense_errors);
7202         ESTAT_ADD(tx_discards);
7203         ESTAT_ADD(tx_errors);
7204
7205         ESTAT_ADD(dma_writeq_full);
7206         ESTAT_ADD(dma_write_prioq_full);
7207         ESTAT_ADD(rxbds_empty);
7208         ESTAT_ADD(rx_discards);
7209         ESTAT_ADD(rx_errors);
7210         ESTAT_ADD(rx_threshold_hit);
7211
7212         ESTAT_ADD(dma_readq_full);
7213         ESTAT_ADD(dma_read_prioq_full);
7214         ESTAT_ADD(tx_comp_queue_full);
7215
7216         ESTAT_ADD(ring_set_send_prod_index);
7217         ESTAT_ADD(ring_status_update);
7218         ESTAT_ADD(nic_irqs);
7219         ESTAT_ADD(nic_avoided_irqs);
7220         ESTAT_ADD(nic_tx_threshold_hit);
7221
7222         return estats;
7223 }
7224
7225 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7226 {
7227         struct tg3 *tp = netdev_priv(dev);
7228         struct net_device_stats *stats = &tp->net_stats;
7229         struct net_device_stats *old_stats = &tp->net_stats_prev;
7230         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7231
7232         if (!hw_stats)
7233                 return old_stats;
7234
7235         stats->rx_packets = old_stats->rx_packets +
7236                 get_stat64(&hw_stats->rx_ucast_packets) +
7237                 get_stat64(&hw_stats->rx_mcast_packets) +
7238                 get_stat64(&hw_stats->rx_bcast_packets);
7239                 
7240         stats->tx_packets = old_stats->tx_packets +
7241                 get_stat64(&hw_stats->tx_ucast_packets) +
7242                 get_stat64(&hw_stats->tx_mcast_packets) +
7243                 get_stat64(&hw_stats->tx_bcast_packets);
7244
7245         stats->rx_bytes = old_stats->rx_bytes +
7246                 get_stat64(&hw_stats->rx_octets);
7247         stats->tx_bytes = old_stats->tx_bytes +
7248                 get_stat64(&hw_stats->tx_octets);
7249
7250         stats->rx_errors = old_stats->rx_errors +
7251                 get_stat64(&hw_stats->rx_errors);
7252         stats->tx_errors = old_stats->tx_errors +
7253                 get_stat64(&hw_stats->tx_errors) +
7254                 get_stat64(&hw_stats->tx_mac_errors) +
7255                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7256                 get_stat64(&hw_stats->tx_discards);
7257
7258         stats->multicast = old_stats->multicast +
7259                 get_stat64(&hw_stats->rx_mcast_packets);
7260         stats->collisions = old_stats->collisions +
7261                 get_stat64(&hw_stats->tx_collisions);
7262
7263         stats->rx_length_errors = old_stats->rx_length_errors +
7264                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7265                 get_stat64(&hw_stats->rx_undersize_packets);
7266
7267         stats->rx_over_errors = old_stats->rx_over_errors +
7268                 get_stat64(&hw_stats->rxbds_empty);
7269         stats->rx_frame_errors = old_stats->rx_frame_errors +
7270                 get_stat64(&hw_stats->rx_align_errors);
7271         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7272                 get_stat64(&hw_stats->tx_discards);
7273         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7274                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7275
7276         stats->rx_crc_errors = old_stats->rx_crc_errors +
7277                 calc_crc_errors(tp);
7278
7279         stats->rx_missed_errors = old_stats->rx_missed_errors +
7280                 get_stat64(&hw_stats->rx_discards);
7281
7282         return stats;
7283 }
7284
7285 static inline u32 calc_crc(unsigned char *buf, int len)
7286 {
7287         u32 reg;
7288         u32 tmp;
7289         int j, k;
7290
7291         reg = 0xffffffff;
7292
7293         for (j = 0; j < len; j++) {
7294                 reg ^= buf[j];
7295
7296                 for (k = 0; k < 8; k++) {
7297                         tmp = reg & 0x01;
7298
7299                         reg >>= 1;
7300
7301                         if (tmp) {
7302                                 reg ^= 0xedb88320;
7303                         }
7304                 }
7305         }
7306
7307         return ~reg;
7308 }
7309
7310 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7311 {
7312         /* accept or reject all multicast frames */
7313         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7314         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7315         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7316         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7317 }
7318
7319 static void __tg3_set_rx_mode(struct net_device *dev)
7320 {
7321         struct tg3 *tp = netdev_priv(dev);
7322         u32 rx_mode;
7323
7324         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7325                                   RX_MODE_KEEP_VLAN_TAG);
7326
7327         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7328          * flag clear.
7329          */
7330 #if TG3_VLAN_TAG_USED
7331         if (!tp->vlgrp &&
7332             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7333                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7334 #else
7335         /* By definition, VLAN is disabled always in this
7336          * case.
7337          */
7338         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7339                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7340 #endif
7341
7342         if (dev->flags & IFF_PROMISC) {
7343                 /* Promiscuous mode. */
7344                 rx_mode |= RX_MODE_PROMISC;
7345         } else if (dev->flags & IFF_ALLMULTI) {
7346                 /* Accept all multicast. */
7347                 tg3_set_multi (tp, 1);
7348         } else if (dev->mc_count < 1) {
7349                 /* Reject all multicast. */
7350                 tg3_set_multi (tp, 0);
7351         } else {
7352                 /* Accept one or more multicast(s). */
7353                 struct dev_mc_list *mclist;
7354                 unsigned int i;
7355                 u32 mc_filter[4] = { 0, };
7356                 u32 regidx;
7357                 u32 bit;
7358                 u32 crc;
7359
7360                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7361                      i++, mclist = mclist->next) {
7362
7363                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7364                         bit = ~crc & 0x7f;
7365                         regidx = (bit & 0x60) >> 5;
7366                         bit &= 0x1f;
7367                         mc_filter[regidx] |= (1 << bit);
7368                 }
7369
7370                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7371                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7372                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7373                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7374         }
7375
7376         if (rx_mode != tp->rx_mode) {
7377                 tp->rx_mode = rx_mode;
7378                 tw32_f(MAC_RX_MODE, rx_mode);
7379                 udelay(10);
7380         }
7381 }
7382
7383 static void tg3_set_rx_mode(struct net_device *dev)
7384 {
7385         struct tg3 *tp = netdev_priv(dev);
7386
7387         if (!netif_running(dev))
7388                 return;
7389
7390         tg3_full_lock(tp, 0);
7391         __tg3_set_rx_mode(dev);
7392         tg3_full_unlock(tp);
7393 }
7394
7395 #define TG3_REGDUMP_LEN         (32 * 1024)
7396
7397 static int tg3_get_regs_len(struct net_device *dev)
7398 {
7399         return TG3_REGDUMP_LEN;
7400 }
7401
7402 static void tg3_get_regs(struct net_device *dev,
7403                 struct ethtool_regs *regs, void *_p)
7404 {
7405         u32 *p = _p;
7406         struct tg3 *tp = netdev_priv(dev);
7407         u8 *orig_p = _p;
7408         int i;
7409
7410         regs->version = 0;
7411
7412         memset(p, 0, TG3_REGDUMP_LEN);
7413
7414         if (tp->link_config.phy_is_low_power)
7415                 return;
7416
7417         tg3_full_lock(tp, 0);
7418
7419 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7420 #define GET_REG32_LOOP(base,len)                \
7421 do {    p = (u32 *)(orig_p + (base));           \
7422         for (i = 0; i < len; i += 4)            \
7423                 __GET_REG32((base) + i);        \
7424 } while (0)
7425 #define GET_REG32_1(reg)                        \
7426 do {    p = (u32 *)(orig_p + (reg));            \
7427         __GET_REG32((reg));                     \
7428 } while (0)
7429
7430         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7431         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7432         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7433         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7434         GET_REG32_1(SNDDATAC_MODE);
7435         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7436         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7437         GET_REG32_1(SNDBDC_MODE);
7438         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7439         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7440         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7441         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7442         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7443         GET_REG32_1(RCVDCC_MODE);
7444         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7445         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7446         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7447         GET_REG32_1(MBFREE_MODE);
7448         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7449         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7450         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7451         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7452         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7453         GET_REG32_1(RX_CPU_MODE);
7454         GET_REG32_1(RX_CPU_STATE);
7455         GET_REG32_1(RX_CPU_PGMCTR);
7456         GET_REG32_1(RX_CPU_HWBKPT);
7457         GET_REG32_1(TX_CPU_MODE);
7458         GET_REG32_1(TX_CPU_STATE);
7459         GET_REG32_1(TX_CPU_PGMCTR);
7460         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7461         GET_REG32_LOOP(FTQ_RESET, 0x120);
7462         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7463         GET_REG32_1(DMAC_MODE);
7464         GET_REG32_LOOP(GRC_MODE, 0x4c);
7465         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7466                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7467
7468 #undef __GET_REG32
7469 #undef GET_REG32_LOOP
7470 #undef GET_REG32_1
7471
7472         tg3_full_unlock(tp);
7473 }
7474
7475 static int tg3_get_eeprom_len(struct net_device *dev)
7476 {
7477         struct tg3 *tp = netdev_priv(dev);
7478
7479         return tp->nvram_size;
7480 }
7481
7482 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7483 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
7484
7485 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7486 {
7487         struct tg3 *tp = netdev_priv(dev);
7488         int ret;
7489         u8  *pd;
7490         u32 i, offset, len, val, b_offset, b_count;
7491
7492         if (tp->link_config.phy_is_low_power)
7493                 return -EAGAIN;
7494
7495         offset = eeprom->offset;
7496         len = eeprom->len;
7497         eeprom->len = 0;
7498
7499         eeprom->magic = TG3_EEPROM_MAGIC;
7500
7501         if (offset & 3) {
7502                 /* adjustments to start on required 4 byte boundary */
7503                 b_offset = offset & 3;
7504                 b_count = 4 - b_offset;
7505                 if (b_count > len) {
7506                         /* i.e. offset=1 len=2 */
7507                         b_count = len;
7508                 }
7509                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7510                 if (ret)
7511                         return ret;
7512                 val = cpu_to_le32(val);
7513                 memcpy(data, ((char*)&val) + b_offset, b_count);
7514                 len -= b_count;
7515                 offset += b_count;
7516                 eeprom->len += b_count;
7517         }
7518
7519         /* read bytes upto the last 4 byte boundary */
7520         pd = &data[eeprom->len];
7521         for (i = 0; i < (len - (len & 3)); i += 4) {
7522                 ret = tg3_nvram_read(tp, offset + i, &val);
7523                 if (ret) {
7524                         eeprom->len += i;
7525                         return ret;
7526                 }
7527                 val = cpu_to_le32(val);
7528                 memcpy(pd + i, &val, 4);
7529         }
7530         eeprom->len += i;
7531
7532         if (len & 3) {
7533                 /* read last bytes not ending on 4 byte boundary */
7534                 pd = &data[eeprom->len];
7535                 b_count = len & 3;
7536                 b_offset = offset + len - b_count;
7537                 ret = tg3_nvram_read(tp, b_offset, &val);
7538                 if (ret)
7539                         return ret;
7540                 val = cpu_to_le32(val);
7541                 memcpy(pd, ((char*)&val), b_count);
7542                 eeprom->len += b_count;
7543         }
7544         return 0;
7545 }
7546
7547 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
7548
7549 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7550 {
7551         struct tg3 *tp = netdev_priv(dev);
7552         int ret;
7553         u32 offset, len, b_offset, odd_len, start, end;
7554         u8 *buf;
7555
7556         if (tp->link_config.phy_is_low_power)
7557                 return -EAGAIN;
7558
7559         if (eeprom->magic != TG3_EEPROM_MAGIC)
7560                 return -EINVAL;
7561
7562         offset = eeprom->offset;
7563         len = eeprom->len;
7564
7565         if ((b_offset = (offset & 3))) {
7566                 /* adjustments to start on required 4 byte boundary */
7567                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7568                 if (ret)
7569                         return ret;
7570                 start = cpu_to_le32(start);
7571                 len += b_offset;
7572                 offset &= ~3;
7573                 if (len < 4)
7574                         len = 4;
7575         }
7576
7577         odd_len = 0;
7578         if (len & 3) {
7579                 /* adjustments to end on required 4 byte boundary */
7580                 odd_len = 1;
7581                 len = (len + 3) & ~3;
7582                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7583                 if (ret)
7584                         return ret;
7585                 end = cpu_to_le32(end);
7586         }
7587
7588         buf = data;
7589         if (b_offset || odd_len) {
7590                 buf = kmalloc(len, GFP_KERNEL);
7591                 if (buf == 0)
7592                         return -ENOMEM;
7593                 if (b_offset)
7594                         memcpy(buf, &start, 4);
7595                 if (odd_len)
7596                         memcpy(buf+len-4, &end, 4);
7597                 memcpy(buf + b_offset, data, eeprom->len);
7598         }
7599
7600         ret = tg3_nvram_write_block(tp, offset, len, buf);
7601
7602         if (buf != data)
7603                 kfree(buf);
7604
7605         return ret;
7606 }
7607
7608 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7609 {
7610         struct tg3 *tp = netdev_priv(dev);
7611   
7612         cmd->supported = (SUPPORTED_Autoneg);
7613
7614         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7615                 cmd->supported |= (SUPPORTED_1000baseT_Half |
7616                                    SUPPORTED_1000baseT_Full);
7617
7618         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
7619                 cmd->supported |= (SUPPORTED_100baseT_Half |
7620                                   SUPPORTED_100baseT_Full |
7621                                   SUPPORTED_10baseT_Half |
7622                                   SUPPORTED_10baseT_Full |
7623                                   SUPPORTED_MII);
7624         else
7625                 cmd->supported |= SUPPORTED_FIBRE;
7626   
7627         cmd->advertising = tp->link_config.advertising;
7628         if (netif_running(dev)) {
7629                 cmd->speed = tp->link_config.active_speed;
7630                 cmd->duplex = tp->link_config.active_duplex;
7631         }
7632         cmd->port = 0;
7633         cmd->phy_address = PHY_ADDR;
7634         cmd->transceiver = 0;
7635         cmd->autoneg = tp->link_config.autoneg;
7636         cmd->maxtxpkt = 0;
7637         cmd->maxrxpkt = 0;
7638         return 0;
7639 }
7640   
7641 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7642 {
7643         struct tg3 *tp = netdev_priv(dev);
7644   
7645         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { 
7646                 /* These are the only valid advertisement bits allowed.  */
7647                 if (cmd->autoneg == AUTONEG_ENABLE &&
7648                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7649                                           ADVERTISED_1000baseT_Full |
7650                                           ADVERTISED_Autoneg |
7651                                           ADVERTISED_FIBRE)))
7652                         return -EINVAL;
7653                 /* Fiber can only do SPEED_1000.  */
7654                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7655                          (cmd->speed != SPEED_1000))
7656                         return -EINVAL;
7657         /* Copper cannot force SPEED_1000.  */
7658         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7659                    (cmd->speed == SPEED_1000))
7660                 return -EINVAL;
7661         else if ((cmd->speed == SPEED_1000) &&
7662                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7663                 return -EINVAL;
7664
7665         tg3_full_lock(tp, 0);
7666
7667         tp->link_config.autoneg = cmd->autoneg;
7668         if (cmd->autoneg == AUTONEG_ENABLE) {
7669                 tp->link_config.advertising = cmd->advertising;
7670                 tp->link_config.speed = SPEED_INVALID;
7671                 tp->link_config.duplex = DUPLEX_INVALID;
7672         } else {
7673                 tp->link_config.advertising = 0;
7674                 tp->link_config.speed = cmd->speed;
7675                 tp->link_config.duplex = cmd->duplex;
7676         }
7677   
7678         if (netif_running(dev))
7679                 tg3_setup_phy(tp, 1);
7680
7681         tg3_full_unlock(tp);
7682   
7683         return 0;
7684 }
7685   
7686 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7687 {
7688         struct tg3 *tp = netdev_priv(dev);
7689   
7690         strcpy(info->driver, DRV_MODULE_NAME);
7691         strcpy(info->version, DRV_MODULE_VERSION);
7692         strcpy(info->fw_version, tp->fw_ver);
7693         strcpy(info->bus_info, pci_name(tp->pdev));
7694 }
7695   
7696 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7697 {
7698         struct tg3 *tp = netdev_priv(dev);
7699   
7700         wol->supported = WAKE_MAGIC;
7701         wol->wolopts = 0;
7702         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7703                 wol->wolopts = WAKE_MAGIC;
7704         memset(&wol->sopass, 0, sizeof(wol->sopass));
7705 }
7706   
7707 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7708 {
7709         struct tg3 *tp = netdev_priv(dev);
7710   
7711         if (wol->wolopts & ~WAKE_MAGIC)
7712                 return -EINVAL;
7713         if ((wol->wolopts & WAKE_MAGIC) &&
7714             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7715             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7716                 return -EINVAL;
7717   
7718         spin_lock_bh(&tp->lock);
7719         if (wol->wolopts & WAKE_MAGIC)
7720                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7721         else
7722                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7723         spin_unlock_bh(&tp->lock);
7724   
7725         return 0;
7726 }
7727   
7728 static u32 tg3_get_msglevel(struct net_device *dev)
7729 {
7730         struct tg3 *tp = netdev_priv(dev);
7731         return tp->msg_enable;
7732 }
7733   
7734 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7735 {
7736         struct tg3 *tp = netdev_priv(dev);
7737         tp->msg_enable = value;
7738 }
7739   
7740 #if TG3_TSO_SUPPORT != 0
7741 static int tg3_set_tso(struct net_device *dev, u32 value)
7742 {
7743         struct tg3 *tp = netdev_priv(dev);
7744
7745         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7746                 if (value)
7747                         return -EINVAL;
7748                 return 0;
7749         }
7750         return ethtool_op_set_tso(dev, value);
7751 }
7752 #endif
7753   
7754 static int tg3_nway_reset(struct net_device *dev)
7755 {
7756         struct tg3 *tp = netdev_priv(dev);
7757         u32 bmcr;
7758         int r;
7759   
7760         if (!netif_running(dev))
7761                 return -EAGAIN;
7762
7763         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7764                 return -EINVAL;
7765
7766         spin_lock_bh(&tp->lock);
7767         r = -EINVAL;
7768         tg3_readphy(tp, MII_BMCR, &bmcr);
7769         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7770             ((bmcr & BMCR_ANENABLE) ||
7771              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7772                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7773                                            BMCR_ANENABLE);
7774                 r = 0;
7775         }
7776         spin_unlock_bh(&tp->lock);
7777   
7778         return r;
7779 }
7780   
7781 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7782 {
7783         struct tg3 *tp = netdev_priv(dev);
7784   
7785         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7786         ering->rx_mini_max_pending = 0;
7787         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7788                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7789         else
7790                 ering->rx_jumbo_max_pending = 0;
7791
7792         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
7793
7794         ering->rx_pending = tp->rx_pending;
7795         ering->rx_mini_pending = 0;
7796         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7797                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7798         else
7799                 ering->rx_jumbo_pending = 0;
7800
7801         ering->tx_pending = tp->tx_pending;
7802 }
7803   
7804 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7805 {
7806         struct tg3 *tp = netdev_priv(dev);
7807         int irq_sync = 0;
7808   
7809         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7810             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7811             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7812                 return -EINVAL;
7813   
7814         if (netif_running(dev)) {
7815                 tg3_netif_stop(tp);
7816                 irq_sync = 1;
7817         }
7818
7819         tg3_full_lock(tp, irq_sync);
7820   
7821         tp->rx_pending = ering->rx_pending;
7822
7823         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7824             tp->rx_pending > 63)
7825                 tp->rx_pending = 63;
7826         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7827         tp->tx_pending = ering->tx_pending;
7828
7829         if (netif_running(dev)) {
7830                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7831                 tg3_init_hw(tp);
7832                 tg3_netif_start(tp);
7833         }
7834
7835         tg3_full_unlock(tp);
7836   
7837         return 0;
7838 }
7839   
7840 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7841 {
7842         struct tg3 *tp = netdev_priv(dev);
7843   
7844         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7845         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7846         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7847 }
7848   
7849 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7850 {
7851         struct tg3 *tp = netdev_priv(dev);
7852         int irq_sync = 0;
7853   
7854         if (netif_running(dev)) {
7855                 tg3_netif_stop(tp);
7856                 irq_sync = 1;
7857         }
7858
7859         tg3_full_lock(tp, irq_sync);
7860
7861         if (epause->autoneg)
7862                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7863         else
7864                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7865         if (epause->rx_pause)
7866                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7867         else
7868                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7869         if (epause->tx_pause)
7870                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7871         else
7872                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7873
7874         if (netif_running(dev)) {
7875                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7876                 tg3_init_hw(tp);
7877                 tg3_netif_start(tp);
7878         }
7879
7880         tg3_full_unlock(tp);
7881   
7882         return 0;
7883 }
7884   
7885 static u32 tg3_get_rx_csum(struct net_device *dev)
7886 {
7887         struct tg3 *tp = netdev_priv(dev);
7888         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7889 }
7890   
7891 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7892 {
7893         struct tg3 *tp = netdev_priv(dev);
7894   
7895         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7896                 if (data != 0)
7897                         return -EINVAL;
7898                 return 0;
7899         }
7900   
7901         spin_lock_bh(&tp->lock);
7902         if (data)
7903                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7904         else
7905                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
7906         spin_unlock_bh(&tp->lock);
7907   
7908         return 0;
7909 }
7910   
7911 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7912 {
7913         struct tg3 *tp = netdev_priv(dev);
7914   
7915         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7916                 if (data != 0)
7917                         return -EINVAL;
7918                 return 0;
7919         }
7920   
7921         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7922             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
7923                 ethtool_op_set_tx_hw_csum(dev, data);
7924         else
7925                 ethtool_op_set_tx_csum(dev, data);
7926
7927         return 0;
7928 }
7929
7930 static int tg3_get_stats_count (struct net_device *dev)
7931 {
7932         return TG3_NUM_STATS;
7933 }
7934
7935 static int tg3_get_test_count (struct net_device *dev)
7936 {
7937         return TG3_NUM_TEST;
7938 }
7939
7940 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7941 {
7942         switch (stringset) {
7943         case ETH_SS_STATS:
7944                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7945                 break;
7946         case ETH_SS_TEST:
7947                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
7948                 break;
7949         default:
7950                 WARN_ON(1);     /* we need a WARN() */
7951                 break;
7952         }
7953 }
7954
7955 static int tg3_phys_id(struct net_device *dev, u32 data)
7956 {
7957         struct tg3 *tp = netdev_priv(dev);
7958         int i;
7959
7960         if (!netif_running(tp->dev))
7961                 return -EAGAIN;
7962
7963         if (data == 0)
7964                 data = 2;
7965
7966         for (i = 0; i < (data * 2); i++) {
7967                 if ((i % 2) == 0)
7968                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7969                                            LED_CTRL_1000MBPS_ON |
7970                                            LED_CTRL_100MBPS_ON |
7971                                            LED_CTRL_10MBPS_ON |
7972                                            LED_CTRL_TRAFFIC_OVERRIDE |
7973                                            LED_CTRL_TRAFFIC_BLINK |
7974                                            LED_CTRL_TRAFFIC_LED);
7975         
7976                 else
7977                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7978                                            LED_CTRL_TRAFFIC_OVERRIDE);
7979
7980                 if (msleep_interruptible(500))
7981                         break;
7982         }
7983         tw32(MAC_LED_CTRL, tp->led_ctrl);
7984         return 0;
7985 }
7986
7987 static void tg3_get_ethtool_stats (struct net_device *dev,
7988                                    struct ethtool_stats *estats, u64 *tmp_stats)
7989 {
7990         struct tg3 *tp = netdev_priv(dev);
7991         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7992 }
7993
7994 #define NVRAM_TEST_SIZE 0x100
7995 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
7996
7997 static int tg3_test_nvram(struct tg3 *tp)
7998 {
7999         u32 *buf, csum, magic;
8000         int i, j, err = 0, size;
8001
8002         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8003                 return -EIO;
8004
8005         if (magic == TG3_EEPROM_MAGIC)
8006                 size = NVRAM_TEST_SIZE;
8007         else if ((magic & 0xff000000) == 0xa5000000) {
8008                 if ((magic & 0xe00000) == 0x200000)
8009                         size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8010                 else
8011                         return 0;
8012         } else
8013                 return -EIO;
8014
8015         buf = kmalloc(size, GFP_KERNEL);
8016         if (buf == NULL)
8017                 return -ENOMEM;
8018
8019         err = -EIO;
8020         for (i = 0, j = 0; i < size; i += 4, j++) {
8021                 u32 val;
8022
8023                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8024                         break;
8025                 buf[j] = cpu_to_le32(val);
8026         }
8027         if (i < size)
8028                 goto out;
8029
8030         /* Selfboot format */
8031         if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) {
8032                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8033
8034                 for (i = 0; i < size; i++)
8035                         csum8 += buf8[i];
8036
8037                 if (csum8 == 0) {
8038                         err = 0;
8039                         goto out;
8040                 }
8041
8042                 err = -EIO;
8043                 goto out;
8044         }
8045
8046         /* Bootstrap checksum at offset 0x10 */
8047         csum = calc_crc((unsigned char *) buf, 0x10);
8048         if(csum != cpu_to_le32(buf[0x10/4]))
8049                 goto out;
8050
8051         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8052         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8053         if (csum != cpu_to_le32(buf[0xfc/4]))
8054                  goto out;
8055
8056         err = 0;
8057
8058 out:
8059         kfree(buf);
8060         return err;
8061 }
8062
8063 #define TG3_SERDES_TIMEOUT_SEC  2
8064 #define TG3_COPPER_TIMEOUT_SEC  6
8065
8066 static int tg3_test_link(struct tg3 *tp)
8067 {
8068         int i, max;
8069
8070         if (!netif_running(tp->dev))
8071                 return -ENODEV;
8072
8073         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8074                 max = TG3_SERDES_TIMEOUT_SEC;
8075         else
8076                 max = TG3_COPPER_TIMEOUT_SEC;
8077
8078         for (i = 0; i < max; i++) {
8079                 if (netif_carrier_ok(tp->dev))
8080                         return 0;
8081
8082                 if (msleep_interruptible(1000))
8083                         break;
8084         }
8085
8086         return -EIO;
8087 }
8088
8089 /* Only test the commonly used registers */
8090 static int tg3_test_registers(struct tg3 *tp)
8091 {
8092         int i, is_5705;
8093         u32 offset, read_mask, write_mask, val, save_val, read_val;
8094         static struct {
8095                 u16 offset;
8096                 u16 flags;
8097 #define TG3_FL_5705     0x1
8098 #define TG3_FL_NOT_5705 0x2
8099 #define TG3_FL_NOT_5788 0x4
8100                 u32 read_mask;
8101                 u32 write_mask;
8102         } reg_tbl[] = {
8103                 /* MAC Control Registers */
8104                 { MAC_MODE, TG3_FL_NOT_5705,
8105                         0x00000000, 0x00ef6f8c },
8106                 { MAC_MODE, TG3_FL_5705,
8107                         0x00000000, 0x01ef6b8c },
8108                 { MAC_STATUS, TG3_FL_NOT_5705,
8109                         0x03800107, 0x00000000 },
8110                 { MAC_STATUS, TG3_FL_5705,
8111                         0x03800100, 0x00000000 },
8112                 { MAC_ADDR_0_HIGH, 0x0000,
8113                         0x00000000, 0x0000ffff },
8114                 { MAC_ADDR_0_LOW, 0x0000,
8115                         0x00000000, 0xffffffff },
8116                 { MAC_RX_MTU_SIZE, 0x0000,
8117                         0x00000000, 0x0000ffff },
8118                 { MAC_TX_MODE, 0x0000,
8119                         0x00000000, 0x00000070 },
8120                 { MAC_TX_LENGTHS, 0x0000,
8121                         0x00000000, 0x00003fff },
8122                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8123                         0x00000000, 0x000007fc },
8124                 { MAC_RX_MODE, TG3_FL_5705,
8125                         0x00000000, 0x000007dc },
8126                 { MAC_HASH_REG_0, 0x0000,
8127                         0x00000000, 0xffffffff },
8128                 { MAC_HASH_REG_1, 0x0000,
8129                         0x00000000, 0xffffffff },
8130                 { MAC_HASH_REG_2, 0x0000,
8131                         0x00000000, 0xffffffff },
8132                 { MAC_HASH_REG_3, 0x0000,
8133                         0x00000000, 0xffffffff },
8134
8135                 /* Receive Data and Receive BD Initiator Control Registers. */
8136                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8137                         0x00000000, 0xffffffff },
8138                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8139                         0x00000000, 0xffffffff },
8140                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8141                         0x00000000, 0x00000003 },
8142                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8143                         0x00000000, 0xffffffff },
8144                 { RCVDBDI_STD_BD+0, 0x0000,
8145                         0x00000000, 0xffffffff },
8146                 { RCVDBDI_STD_BD+4, 0x0000,
8147                         0x00000000, 0xffffffff },
8148                 { RCVDBDI_STD_BD+8, 0x0000,
8149                         0x00000000, 0xffff0002 },
8150                 { RCVDBDI_STD_BD+0xc, 0x0000,
8151                         0x00000000, 0xffffffff },
8152         
8153                 /* Receive BD Initiator Control Registers. */
8154                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8155                         0x00000000, 0xffffffff },
8156                 { RCVBDI_STD_THRESH, TG3_FL_5705,
8157                         0x00000000, 0x000003ff },
8158                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8159                         0x00000000, 0xffffffff },
8160         
8161                 /* Host Coalescing Control Registers. */
8162                 { HOSTCC_MODE, TG3_FL_NOT_5705,
8163                         0x00000000, 0x00000004 },
8164                 { HOSTCC_MODE, TG3_FL_5705,
8165                         0x00000000, 0x000000f6 },
8166                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8167                         0x00000000, 0xffffffff },
8168                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8169                         0x00000000, 0x000003ff },
8170                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8171                         0x00000000, 0xffffffff },
8172                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8173                         0x00000000, 0x000003ff },
8174                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8175                         0x00000000, 0xffffffff },
8176                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8177                         0x00000000, 0x000000ff },
8178                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8179                         0x00000000, 0xffffffff },
8180                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8181                         0x00000000, 0x000000ff },
8182                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8183                         0x00000000, 0xffffffff },
8184                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8185                         0x00000000, 0xffffffff },
8186                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8187                         0x00000000, 0xffffffff },
8188                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8189                         0x00000000, 0x000000ff },
8190                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8191                         0x00000000, 0xffffffff },
8192                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8193                         0x00000000, 0x000000ff },
8194                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8195                         0x00000000, 0xffffffff },
8196                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8197                         0x00000000, 0xffffffff },
8198                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8199                         0x00000000, 0xffffffff },
8200                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8201                         0x00000000, 0xffffffff },
8202                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8203                         0x00000000, 0xffffffff },
8204                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8205                         0xffffffff, 0x00000000 },
8206                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8207                         0xffffffff, 0x00000000 },
8208
8209                 /* Buffer Manager Control Registers. */
8210                 { BUFMGR_MB_POOL_ADDR, 0x0000,
8211                         0x00000000, 0x007fff80 },
8212                 { BUFMGR_MB_POOL_SIZE, 0x0000,
8213                         0x00000000, 0x007fffff },
8214                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8215                         0x00000000, 0x0000003f },
8216                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8217                         0x00000000, 0x000001ff },
8218                 { BUFMGR_MB_HIGH_WATER, 0x0000,
8219                         0x00000000, 0x000001ff },
8220                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8221                         0xffffffff, 0x00000000 },
8222                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8223                         0xffffffff, 0x00000000 },
8224         
8225                 /* Mailbox Registers */
8226                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8227                         0x00000000, 0x000001ff },
8228                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8229                         0x00000000, 0x000001ff },
8230                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8231                         0x00000000, 0x000007ff },
8232                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8233                         0x00000000, 0x000001ff },
8234
8235                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8236         };
8237
8238         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8239                 is_5705 = 1;
8240         else
8241                 is_5705 = 0;
8242
8243         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8244                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8245                         continue;
8246
8247                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8248                         continue;
8249
8250                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8251                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
8252                         continue;
8253
8254                 offset = (u32) reg_tbl[i].offset;
8255                 read_mask = reg_tbl[i].read_mask;
8256                 write_mask = reg_tbl[i].write_mask;
8257
8258                 /* Save the original register content */
8259                 save_val = tr32(offset);
8260
8261                 /* Determine the read-only value. */
8262                 read_val = save_val & read_mask;
8263
8264                 /* Write zero to the register, then make sure the read-only bits
8265                  * are not changed and the read/write bits are all zeros.
8266                  */
8267                 tw32(offset, 0);
8268
8269                 val = tr32(offset);
8270
8271                 /* Test the read-only and read/write bits. */
8272                 if (((val & read_mask) != read_val) || (val & write_mask))
8273                         goto out;
8274
8275                 /* Write ones to all the bits defined by RdMask and WrMask, then
8276                  * make sure the read-only bits are not changed and the
8277                  * read/write bits are all ones.
8278                  */
8279                 tw32(offset, read_mask | write_mask);
8280
8281                 val = tr32(offset);
8282
8283                 /* Test the read-only bits. */
8284                 if ((val & read_mask) != read_val)
8285                         goto out;
8286
8287                 /* Test the read/write bits. */
8288                 if ((val & write_mask) != write_mask)
8289                         goto out;
8290
8291                 tw32(offset, save_val);
8292         }
8293
8294         return 0;
8295
8296 out:
8297         printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
8298         tw32(offset, save_val);
8299         return -EIO;
8300 }
8301
8302 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8303 {
8304         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8305         int i;
8306         u32 j;
8307
8308         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8309                 for (j = 0; j < len; j += 4) {
8310                         u32 val;
8311
8312                         tg3_write_mem(tp, offset + j, test_pattern[i]);
8313                         tg3_read_mem(tp, offset + j, &val);
8314                         if (val != test_pattern[i])
8315                                 return -EIO;
8316                 }
8317         }
8318         return 0;
8319 }
8320
8321 static int tg3_test_memory(struct tg3 *tp)
8322 {
8323         static struct mem_entry {
8324                 u32 offset;
8325                 u32 len;
8326         } mem_tbl_570x[] = {
8327                 { 0x00000000, 0x00b50},
8328                 { 0x00002000, 0x1c000},
8329                 { 0xffffffff, 0x00000}
8330         }, mem_tbl_5705[] = {
8331                 { 0x00000100, 0x0000c},
8332                 { 0x00000200, 0x00008},
8333                 { 0x00004000, 0x00800},
8334                 { 0x00006000, 0x01000},
8335                 { 0x00008000, 0x02000},
8336                 { 0x00010000, 0x0e000},
8337                 { 0xffffffff, 0x00000}
8338         }, mem_tbl_5755[] = {
8339                 { 0x00000200, 0x00008},
8340                 { 0x00004000, 0x00800},
8341                 { 0x00006000, 0x00800},
8342                 { 0x00008000, 0x02000},
8343                 { 0x00010000, 0x0c000},
8344                 { 0xffffffff, 0x00000}
8345         };
8346         struct mem_entry *mem_tbl;
8347         int err = 0;
8348         int i;
8349
8350         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8351                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8352                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8353                         mem_tbl = mem_tbl_5755;
8354                 else
8355                         mem_tbl = mem_tbl_5705;
8356         } else
8357                 mem_tbl = mem_tbl_570x;
8358
8359         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8360                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8361                     mem_tbl[i].len)) != 0)
8362                         break;
8363         }
8364         
8365         return err;
8366 }
8367
8368 #define TG3_MAC_LOOPBACK        0
8369 #define TG3_PHY_LOOPBACK        1
8370
8371 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8372 {
8373         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8374         u32 desc_idx;
8375         struct sk_buff *skb, *rx_skb;
8376         u8 *tx_data;
8377         dma_addr_t map;
8378         int num_pkts, tx_len, rx_len, i, err;
8379         struct tg3_rx_buffer_desc *desc;
8380
8381         if (loopback_mode == TG3_MAC_LOOPBACK) {
8382                 /* HW errata - mac loopback fails in some cases on 5780.
8383                  * Normal traffic and PHY loopback are not affected by
8384                  * errata.
8385                  */
8386                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8387                         return 0;
8388
8389                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8390                            MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
8391                            MAC_MODE_PORT_MODE_GMII;
8392                 tw32(MAC_MODE, mac_mode);
8393         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
8394                 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
8395                                            BMCR_SPEED1000);
8396                 udelay(40);
8397                 /* reset to prevent losing 1st rx packet intermittently */
8398                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8399                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8400                         udelay(10);
8401                         tw32_f(MAC_RX_MODE, tp->rx_mode);
8402                 }
8403                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8404                            MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
8405                 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
8406                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8407                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
8408                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8409                 }
8410                 tw32(MAC_MODE, mac_mode);
8411         }
8412         else
8413                 return -EINVAL;
8414
8415         err = -EIO;
8416
8417         tx_len = 1514;
8418         skb = dev_alloc_skb(tx_len);
8419         tx_data = skb_put(skb, tx_len);
8420         memcpy(tx_data, tp->dev->dev_addr, 6);
8421         memset(tx_data + 6, 0x0, 8);
8422
8423         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8424
8425         for (i = 14; i < tx_len; i++)
8426                 tx_data[i] = (u8) (i & 0xff);
8427
8428         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8429
8430         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8431              HOSTCC_MODE_NOW);
8432
8433         udelay(10);
8434
8435         rx_start_idx = tp->hw_status->idx[0].rx_producer;
8436
8437         num_pkts = 0;
8438
8439         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8440
8441         tp->tx_prod++;
8442         num_pkts++;
8443
8444         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8445                      tp->tx_prod);
8446         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8447
8448         udelay(10);
8449
8450         for (i = 0; i < 10; i++) {
8451                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8452                        HOSTCC_MODE_NOW);
8453
8454                 udelay(10);
8455
8456                 tx_idx = tp->hw_status->idx[0].tx_consumer;
8457                 rx_idx = tp->hw_status->idx[0].rx_producer;
8458                 if ((tx_idx == tp->tx_prod) &&
8459                     (rx_idx == (rx_start_idx + num_pkts)))
8460                         break;
8461         }
8462
8463         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8464         dev_kfree_skb(skb);
8465
8466         if (tx_idx != tp->tx_prod)
8467                 goto out;
8468
8469         if (rx_idx != rx_start_idx + num_pkts)
8470                 goto out;
8471
8472         desc = &tp->rx_rcb[rx_start_idx];
8473         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8474         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8475         if (opaque_key != RXD_OPAQUE_RING_STD)
8476                 goto out;
8477
8478         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8479             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8480                 goto out;
8481
8482         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8483         if (rx_len != tx_len)
8484                 goto out;
8485
8486         rx_skb = tp->rx_std_buffers[desc_idx].skb;
8487
8488         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8489         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8490
8491         for (i = 14; i < tx_len; i++) {
8492                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8493                         goto out;
8494         }
8495         err = 0;
8496         
8497         /* tg3_free_rings will unmap and free the rx_skb */
8498 out:
8499         return err;
8500 }
8501
8502 #define TG3_MAC_LOOPBACK_FAILED         1
8503 #define TG3_PHY_LOOPBACK_FAILED         2
8504 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
8505                                          TG3_PHY_LOOPBACK_FAILED)
8506
8507 static int tg3_test_loopback(struct tg3 *tp)
8508 {
8509         int err = 0;
8510
8511         if (!netif_running(tp->dev))
8512                 return TG3_LOOPBACK_FAILED;
8513
8514         tg3_reset_hw(tp);
8515
8516         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8517                 err |= TG3_MAC_LOOPBACK_FAILED;
8518         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8519                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8520                         err |= TG3_PHY_LOOPBACK_FAILED;
8521         }
8522
8523         return err;
8524 }
8525
8526 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8527                           u64 *data)
8528 {
8529         struct tg3 *tp = netdev_priv(dev);
8530
8531         if (tp->link_config.phy_is_low_power)
8532                 tg3_set_power_state(tp, PCI_D0);
8533
8534         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8535
8536         if (tg3_test_nvram(tp) != 0) {
8537                 etest->flags |= ETH_TEST_FL_FAILED;
8538                 data[0] = 1;
8539         }
8540         if (tg3_test_link(tp) != 0) {
8541                 etest->flags |= ETH_TEST_FL_FAILED;
8542                 data[1] = 1;
8543         }
8544         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8545                 int err, irq_sync = 0;
8546
8547                 if (netif_running(dev)) {
8548                         tg3_netif_stop(tp);
8549                         irq_sync = 1;
8550                 }
8551
8552                 tg3_full_lock(tp, irq_sync);
8553
8554                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8555                 err = tg3_nvram_lock(tp);
8556                 tg3_halt_cpu(tp, RX_CPU_BASE);
8557                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8558                         tg3_halt_cpu(tp, TX_CPU_BASE);
8559                 if (!err)
8560                         tg3_nvram_unlock(tp);
8561
8562                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8563                         tg3_phy_reset(tp);
8564
8565                 if (tg3_test_registers(tp) != 0) {
8566                         etest->flags |= ETH_TEST_FL_FAILED;
8567                         data[2] = 1;
8568                 }
8569                 if (tg3_test_memory(tp) != 0) {
8570                         etest->flags |= ETH_TEST_FL_FAILED;
8571                         data[3] = 1;
8572                 }
8573                 if ((data[4] = tg3_test_loopback(tp)) != 0)
8574                         etest->flags |= ETH_TEST_FL_FAILED;
8575
8576                 tg3_full_unlock(tp);
8577
8578                 if (tg3_test_interrupt(tp) != 0) {
8579                         etest->flags |= ETH_TEST_FL_FAILED;
8580                         data[5] = 1;
8581                 }
8582
8583                 tg3_full_lock(tp, 0);
8584
8585                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8586                 if (netif_running(dev)) {
8587                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8588                         tg3_init_hw(tp);
8589                         tg3_netif_start(tp);
8590                 }
8591
8592                 tg3_full_unlock(tp);
8593         }
8594         if (tp->link_config.phy_is_low_power)
8595                 tg3_set_power_state(tp, PCI_D3hot);
8596
8597 }
8598
8599 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8600 {
8601         struct mii_ioctl_data *data = if_mii(ifr);
8602         struct tg3 *tp = netdev_priv(dev);
8603         int err;
8604
8605         switch(cmd) {
8606         case SIOCGMIIPHY:
8607                 data->phy_id = PHY_ADDR;
8608
8609                 /* fallthru */
8610         case SIOCGMIIREG: {
8611                 u32 mii_regval;
8612
8613                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8614                         break;                  /* We have no PHY */
8615
8616                 if (tp->link_config.phy_is_low_power)
8617                         return -EAGAIN;
8618
8619                 spin_lock_bh(&tp->lock);
8620                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8621                 spin_unlock_bh(&tp->lock);
8622
8623                 data->val_out = mii_regval;
8624
8625                 return err;
8626         }
8627
8628         case SIOCSMIIREG:
8629                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8630                         break;                  /* We have no PHY */
8631
8632                 if (!capable(CAP_NET_ADMIN))
8633                         return -EPERM;
8634
8635                 if (tp->link_config.phy_is_low_power)
8636                         return -EAGAIN;
8637
8638                 spin_lock_bh(&tp->lock);
8639                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8640                 spin_unlock_bh(&tp->lock);
8641
8642                 return err;
8643
8644         default:
8645                 /* do nothing */
8646                 break;
8647         }
8648         return -EOPNOTSUPP;
8649 }
8650
8651 #if TG3_VLAN_TAG_USED
8652 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8653 {
8654         struct tg3 *tp = netdev_priv(dev);
8655
8656         tg3_full_lock(tp, 0);
8657
8658         tp->vlgrp = grp;
8659
8660         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8661         __tg3_set_rx_mode(dev);
8662
8663         tg3_full_unlock(tp);
8664 }
8665
8666 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8667 {
8668         struct tg3 *tp = netdev_priv(dev);
8669
8670         tg3_full_lock(tp, 0);
8671         if (tp->vlgrp)
8672                 tp->vlgrp->vlan_devices[vid] = NULL;
8673         tg3_full_unlock(tp);
8674 }
8675 #endif
8676
8677 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8678 {
8679         struct tg3 *tp = netdev_priv(dev);
8680
8681         memcpy(ec, &tp->coal, sizeof(*ec));
8682         return 0;
8683 }
8684
8685 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8686 {
8687         struct tg3 *tp = netdev_priv(dev);
8688         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8689         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8690
8691         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8692                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8693                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8694                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8695                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8696         }
8697
8698         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8699             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8700             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8701             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8702             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8703             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8704             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8705             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8706             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8707             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8708                 return -EINVAL;
8709
8710         /* No rx interrupts will be generated if both are zero */
8711         if ((ec->rx_coalesce_usecs == 0) &&
8712             (ec->rx_max_coalesced_frames == 0))
8713                 return -EINVAL;
8714
8715         /* No tx interrupts will be generated if both are zero */
8716         if ((ec->tx_coalesce_usecs == 0) &&
8717             (ec->tx_max_coalesced_frames == 0))
8718                 return -EINVAL;
8719
8720         /* Only copy relevant parameters, ignore all others. */
8721         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8722         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8723         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8724         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8725         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8726         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8727         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8728         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8729         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8730
8731         if (netif_running(dev)) {
8732                 tg3_full_lock(tp, 0);
8733                 __tg3_set_coalesce(tp, &tp->coal);
8734                 tg3_full_unlock(tp);
8735         }
8736         return 0;
8737 }
8738
8739 static struct ethtool_ops tg3_ethtool_ops = {
8740         .get_settings           = tg3_get_settings,
8741         .set_settings           = tg3_set_settings,
8742         .get_drvinfo            = tg3_get_drvinfo,
8743         .get_regs_len           = tg3_get_regs_len,
8744         .get_regs               = tg3_get_regs,
8745         .get_wol                = tg3_get_wol,
8746         .set_wol                = tg3_set_wol,
8747         .get_msglevel           = tg3_get_msglevel,
8748         .set_msglevel           = tg3_set_msglevel,
8749         .nway_reset             = tg3_nway_reset,
8750         .get_link               = ethtool_op_get_link,
8751         .get_eeprom_len         = tg3_get_eeprom_len,
8752         .get_eeprom             = tg3_get_eeprom,
8753         .set_eeprom             = tg3_set_eeprom,
8754         .get_ringparam          = tg3_get_ringparam,
8755         .set_ringparam          = tg3_set_ringparam,
8756         .get_pauseparam         = tg3_get_pauseparam,
8757         .set_pauseparam         = tg3_set_pauseparam,
8758         .get_rx_csum            = tg3_get_rx_csum,
8759         .set_rx_csum            = tg3_set_rx_csum,
8760         .get_tx_csum            = ethtool_op_get_tx_csum,
8761         .set_tx_csum            = tg3_set_tx_csum,
8762         .get_sg                 = ethtool_op_get_sg,
8763         .set_sg                 = ethtool_op_set_sg,
8764 #if TG3_TSO_SUPPORT != 0
8765         .get_tso                = ethtool_op_get_tso,
8766         .set_tso                = tg3_set_tso,
8767 #endif
8768         .self_test_count        = tg3_get_test_count,
8769         .self_test              = tg3_self_test,
8770         .get_strings            = tg3_get_strings,
8771         .phys_id                = tg3_phys_id,
8772         .get_stats_count        = tg3_get_stats_count,
8773         .get_ethtool_stats      = tg3_get_ethtool_stats,
8774         .get_coalesce           = tg3_get_coalesce,
8775         .set_coalesce           = tg3_set_coalesce,
8776         .get_perm_addr          = ethtool_op_get_perm_addr,
8777 };
8778
8779 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8780 {
8781         u32 cursize, val, magic;
8782
8783         tp->nvram_size = EEPROM_CHIP_SIZE;
8784
8785         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8786                 return;
8787
8788         if ((magic != TG3_EEPROM_MAGIC) && ((magic & 0xff000000) != 0xa5000000))
8789                 return;
8790
8791         /*
8792          * Size the chip by reading offsets at increasing powers of two.
8793          * When we encounter our validation signature, we know the addressing
8794          * has wrapped around, and thus have our chip size.
8795          */
8796         cursize = 0x10;
8797
8798         while (cursize < tp->nvram_size) {
8799                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
8800                         return;
8801
8802                 if (val == magic)
8803                         break;
8804
8805                 cursize <<= 1;
8806         }
8807
8808         tp->nvram_size = cursize;
8809 }
8810                 
8811 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8812 {
8813         u32 val;
8814
8815         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
8816                 return;
8817
8818         /* Selfboot format */
8819         if (val != TG3_EEPROM_MAGIC) {
8820                 tg3_get_eeprom_size(tp);
8821                 return;
8822         }
8823
8824         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8825                 if (val != 0) {
8826                         tp->nvram_size = (val >> 16) * 1024;
8827                         return;
8828                 }
8829         }
8830         tp->nvram_size = 0x20000;
8831 }
8832
8833 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8834 {
8835         u32 nvcfg1;
8836
8837         nvcfg1 = tr32(NVRAM_CFG1);
8838         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8839                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8840         }
8841         else {
8842                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8843                 tw32(NVRAM_CFG1, nvcfg1);
8844         }
8845
8846         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
8847             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
8848                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8849                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8850                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8851                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8852                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8853                                 break;
8854                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
8855                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8856                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
8857                                 break;
8858                         case FLASH_VENDOR_ATMEL_EEPROM:
8859                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8860                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8861                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8862                                 break;
8863                         case FLASH_VENDOR_ST:
8864                                 tp->nvram_jedecnum = JEDEC_ST;
8865                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
8866                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8867                                 break;
8868                         case FLASH_VENDOR_SAIFUN:
8869                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
8870                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
8871                                 break;
8872                         case FLASH_VENDOR_SST_SMALL:
8873                         case FLASH_VENDOR_SST_LARGE:
8874                                 tp->nvram_jedecnum = JEDEC_SST;
8875                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
8876                                 break;
8877                 }
8878         }
8879         else {
8880                 tp->nvram_jedecnum = JEDEC_ATMEL;
8881                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8882                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8883         }
8884 }
8885
8886 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
8887 {
8888         u32 nvcfg1;
8889
8890         nvcfg1 = tr32(NVRAM_CFG1);
8891
8892         /* NVRAM protection for TPM */
8893         if (nvcfg1 & (1 << 27))
8894                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8895
8896         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8897                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
8898                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
8899                         tp->nvram_jedecnum = JEDEC_ATMEL;
8900                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8901                         break;
8902                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8903                         tp->nvram_jedecnum = JEDEC_ATMEL;
8904                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8905                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8906                         break;
8907                 case FLASH_5752VENDOR_ST_M45PE10:
8908                 case FLASH_5752VENDOR_ST_M45PE20:
8909                 case FLASH_5752VENDOR_ST_M45PE40:
8910                         tp->nvram_jedecnum = JEDEC_ST;
8911                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8912                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8913                         break;
8914         }
8915
8916         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
8917                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
8918                         case FLASH_5752PAGE_SIZE_256:
8919                                 tp->nvram_pagesize = 256;
8920                                 break;
8921                         case FLASH_5752PAGE_SIZE_512:
8922                                 tp->nvram_pagesize = 512;
8923                                 break;
8924                         case FLASH_5752PAGE_SIZE_1K:
8925                                 tp->nvram_pagesize = 1024;
8926                                 break;
8927                         case FLASH_5752PAGE_SIZE_2K:
8928                                 tp->nvram_pagesize = 2048;
8929                                 break;
8930                         case FLASH_5752PAGE_SIZE_4K:
8931                                 tp->nvram_pagesize = 4096;
8932                                 break;
8933                         case FLASH_5752PAGE_SIZE_264:
8934                                 tp->nvram_pagesize = 264;
8935                                 break;
8936                 }
8937         }
8938         else {
8939                 /* For eeprom, set pagesize to maximum eeprom size */
8940                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8941
8942                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8943                 tw32(NVRAM_CFG1, nvcfg1);
8944         }
8945 }
8946
8947 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
8948 {
8949         u32 nvcfg1;
8950
8951         nvcfg1 = tr32(NVRAM_CFG1);
8952
8953         /* NVRAM protection for TPM */
8954         if (nvcfg1 & (1 << 27))
8955                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8956
8957         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8958                 case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ:
8959                 case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ:
8960                         tp->nvram_jedecnum = JEDEC_ATMEL;
8961                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8962                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8963
8964                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8965                         tw32(NVRAM_CFG1, nvcfg1);
8966                         break;
8967                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8968                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
8969                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
8970                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
8971                 case FLASH_5755VENDOR_ATMEL_FLASH_4:
8972                         tp->nvram_jedecnum = JEDEC_ATMEL;
8973                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8974                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8975                         tp->nvram_pagesize = 264;
8976                         break;
8977                 case FLASH_5752VENDOR_ST_M45PE10:
8978                 case FLASH_5752VENDOR_ST_M45PE20:
8979                 case FLASH_5752VENDOR_ST_M45PE40:
8980                         tp->nvram_jedecnum = JEDEC_ST;
8981                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8982                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8983                         tp->nvram_pagesize = 256;
8984                         break;
8985         }
8986 }
8987
8988 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
8989 {
8990         u32 nvcfg1;
8991
8992         nvcfg1 = tr32(NVRAM_CFG1);
8993
8994         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8995                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
8996                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
8997                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
8998                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
8999                         tp->nvram_jedecnum = JEDEC_ATMEL;
9000                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9001                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9002
9003                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9004                         tw32(NVRAM_CFG1, nvcfg1);
9005                         break;
9006                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9007                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9008                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9009                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9010                         tp->nvram_jedecnum = JEDEC_ATMEL;
9011                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9012                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9013                         tp->nvram_pagesize = 264;
9014                         break;
9015                 case FLASH_5752VENDOR_ST_M45PE10:
9016                 case FLASH_5752VENDOR_ST_M45PE20:
9017                 case FLASH_5752VENDOR_ST_M45PE40:
9018                         tp->nvram_jedecnum = JEDEC_ST;
9019                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9020                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9021                         tp->nvram_pagesize = 256;
9022                         break;
9023         }
9024 }
9025
9026 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9027 static void __devinit tg3_nvram_init(struct tg3 *tp)
9028 {
9029         int j;
9030
9031         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
9032                 return;
9033
9034         tw32_f(GRC_EEPROM_ADDR,
9035              (EEPROM_ADDR_FSM_RESET |
9036               (EEPROM_DEFAULT_CLOCK_PERIOD <<
9037                EEPROM_ADDR_CLKPERD_SHIFT)));
9038
9039         /* XXX schedule_timeout() ... */
9040         for (j = 0; j < 100; j++)
9041                 udelay(10);
9042
9043         /* Enable seeprom accesses. */
9044         tw32_f(GRC_LOCAL_CTRL,
9045              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9046         udelay(100);
9047
9048         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9049             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9050                 tp->tg3_flags |= TG3_FLAG_NVRAM;
9051
9052                 if (tg3_nvram_lock(tp)) {
9053                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9054                                "tg3_nvram_init failed.\n", tp->dev->name);
9055                         return;
9056                 }
9057                 tg3_enable_nvram_access(tp);
9058
9059                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9060                         tg3_get_5752_nvram_info(tp);
9061                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9062                         tg3_get_5755_nvram_info(tp);
9063                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9064                         tg3_get_5787_nvram_info(tp);
9065                 else
9066                         tg3_get_nvram_info(tp);
9067
9068                 tg3_get_nvram_size(tp);
9069
9070                 tg3_disable_nvram_access(tp);
9071                 tg3_nvram_unlock(tp);
9072
9073         } else {
9074                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9075
9076                 tg3_get_eeprom_size(tp);
9077         }
9078 }
9079
9080 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9081                                         u32 offset, u32 *val)
9082 {
9083         u32 tmp;
9084         int i;
9085
9086         if (offset > EEPROM_ADDR_ADDR_MASK ||
9087             (offset % 4) != 0)
9088                 return -EINVAL;
9089
9090         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9091                                         EEPROM_ADDR_DEVID_MASK |
9092                                         EEPROM_ADDR_READ);
9093         tw32(GRC_EEPROM_ADDR,
9094              tmp |
9095              (0 << EEPROM_ADDR_DEVID_SHIFT) |
9096              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9097               EEPROM_ADDR_ADDR_MASK) |
9098              EEPROM_ADDR_READ | EEPROM_ADDR_START);
9099
9100         for (i = 0; i < 10000; i++) {
9101                 tmp = tr32(GRC_EEPROM_ADDR);
9102
9103                 if (tmp & EEPROM_ADDR_COMPLETE)
9104                         break;
9105                 udelay(100);
9106         }
9107         if (!(tmp & EEPROM_ADDR_COMPLETE))
9108                 return -EBUSY;
9109
9110         *val = tr32(GRC_EEPROM_DATA);
9111         return 0;
9112 }
9113
9114 #define NVRAM_CMD_TIMEOUT 10000
9115
9116 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9117 {
9118         int i;
9119
9120         tw32(NVRAM_CMD, nvram_cmd);
9121         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9122                 udelay(10);
9123                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9124                         udelay(10);
9125                         break;
9126                 }
9127         }
9128         if (i == NVRAM_CMD_TIMEOUT) {
9129                 return -EBUSY;
9130         }
9131         return 0;
9132 }
9133
9134 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9135 {
9136         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9137             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9138             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9139             (tp->nvram_jedecnum == JEDEC_ATMEL))
9140
9141                 addr = ((addr / tp->nvram_pagesize) <<
9142                         ATMEL_AT45DB0X1B_PAGE_POS) +
9143                        (addr % tp->nvram_pagesize);
9144
9145         return addr;
9146 }
9147
9148 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9149 {
9150         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9151             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9152             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9153             (tp->nvram_jedecnum == JEDEC_ATMEL))
9154
9155                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9156                         tp->nvram_pagesize) +
9157                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9158
9159         return addr;
9160 }
9161
9162 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9163 {
9164         int ret;
9165
9166         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9167                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
9168                 return -EINVAL;
9169         }
9170
9171         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9172                 return tg3_nvram_read_using_eeprom(tp, offset, val);
9173
9174         offset = tg3_nvram_phys_addr(tp, offset);
9175
9176         if (offset > NVRAM_ADDR_MSK)
9177                 return -EINVAL;
9178
9179         ret = tg3_nvram_lock(tp);
9180         if (ret)
9181                 return ret;
9182
9183         tg3_enable_nvram_access(tp);
9184
9185         tw32(NVRAM_ADDR, offset);
9186         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9187                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9188
9189         if (ret == 0)
9190                 *val = swab32(tr32(NVRAM_RDDATA));
9191
9192         tg3_disable_nvram_access(tp);
9193
9194         tg3_nvram_unlock(tp);
9195
9196         return ret;
9197 }
9198
9199 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9200 {
9201         int err;
9202         u32 tmp;
9203
9204         err = tg3_nvram_read(tp, offset, &tmp);
9205         *val = swab32(tmp);
9206         return err;
9207 }
9208
9209 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9210                                     u32 offset, u32 len, u8 *buf)
9211 {
9212         int i, j, rc = 0;
9213         u32 val;
9214
9215         for (i = 0; i < len; i += 4) {
9216                 u32 addr, data;
9217
9218                 addr = offset + i;
9219
9220                 memcpy(&data, buf + i, 4);
9221
9222                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9223
9224                 val = tr32(GRC_EEPROM_ADDR);
9225                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9226
9227                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9228                         EEPROM_ADDR_READ);
9229                 tw32(GRC_EEPROM_ADDR, val |
9230                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
9231                         (addr & EEPROM_ADDR_ADDR_MASK) |
9232                         EEPROM_ADDR_START |
9233                         EEPROM_ADDR_WRITE);
9234                 
9235                 for (j = 0; j < 10000; j++) {
9236                         val = tr32(GRC_EEPROM_ADDR);
9237
9238                         if (val & EEPROM_ADDR_COMPLETE)
9239                                 break;
9240                         udelay(100);
9241                 }
9242                 if (!(val & EEPROM_ADDR_COMPLETE)) {
9243                         rc = -EBUSY;
9244                         break;
9245                 }
9246         }
9247
9248         return rc;
9249 }
9250
9251 /* offset and length are dword aligned */
9252 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9253                 u8 *buf)
9254 {
9255         int ret = 0;
9256         u32 pagesize = tp->nvram_pagesize;
9257         u32 pagemask = pagesize - 1;
9258         u32 nvram_cmd;
9259         u8 *tmp;
9260
9261         tmp = kmalloc(pagesize, GFP_KERNEL);
9262         if (tmp == NULL)
9263                 return -ENOMEM;
9264
9265         while (len) {
9266                 int j;
9267                 u32 phy_addr, page_off, size;
9268
9269                 phy_addr = offset & ~pagemask;
9270         
9271                 for (j = 0; j < pagesize; j += 4) {
9272                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
9273                                                 (u32 *) (tmp + j))))
9274                                 break;
9275                 }
9276                 if (ret)
9277                         break;
9278
9279                 page_off = offset & pagemask;
9280                 size = pagesize;
9281                 if (len < size)
9282                         size = len;
9283
9284                 len -= size;
9285
9286                 memcpy(tmp + page_off, buf, size);
9287
9288                 offset = offset + (pagesize - page_off);
9289
9290                 tg3_enable_nvram_access(tp);
9291
9292                 /*
9293                  * Before we can erase the flash page, we need
9294                  * to issue a special "write enable" command.
9295                  */
9296                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9297
9298                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9299                         break;
9300
9301                 /* Erase the target page */
9302                 tw32(NVRAM_ADDR, phy_addr);
9303
9304                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9305                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9306
9307                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9308                         break;
9309
9310                 /* Issue another write enable to start the write. */
9311                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9312
9313                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9314                         break;
9315
9316                 for (j = 0; j < pagesize; j += 4) {
9317                         u32 data;
9318
9319                         data = *((u32 *) (tmp + j));
9320                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
9321
9322                         tw32(NVRAM_ADDR, phy_addr + j);
9323
9324                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9325                                 NVRAM_CMD_WR;
9326
9327                         if (j == 0)
9328                                 nvram_cmd |= NVRAM_CMD_FIRST;
9329                         else if (j == (pagesize - 4))
9330                                 nvram_cmd |= NVRAM_CMD_LAST;
9331
9332                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9333                                 break;
9334                 }
9335                 if (ret)
9336                         break;
9337         }
9338
9339         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9340         tg3_nvram_exec_cmd(tp, nvram_cmd);
9341
9342         kfree(tmp);
9343
9344         return ret;
9345 }
9346
9347 /* offset and length are dword aligned */
9348 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9349                 u8 *buf)
9350 {
9351         int i, ret = 0;
9352
9353         for (i = 0; i < len; i += 4, offset += 4) {
9354                 u32 data, page_off, phy_addr, nvram_cmd;
9355
9356                 memcpy(&data, buf + i, 4);
9357                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9358
9359                 page_off = offset % tp->nvram_pagesize;
9360
9361                 phy_addr = tg3_nvram_phys_addr(tp, offset);
9362
9363                 tw32(NVRAM_ADDR, phy_addr);
9364
9365                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9366
9367                 if ((page_off == 0) || (i == 0))
9368                         nvram_cmd |= NVRAM_CMD_FIRST;
9369                 else if (page_off == (tp->nvram_pagesize - 4))
9370                         nvram_cmd |= NVRAM_CMD_LAST;
9371
9372                 if (i == (len - 4))
9373                         nvram_cmd |= NVRAM_CMD_LAST;
9374
9375                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
9376                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
9377                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
9378                     (tp->nvram_jedecnum == JEDEC_ST) &&
9379                     (nvram_cmd & NVRAM_CMD_FIRST)) {
9380
9381                         if ((ret = tg3_nvram_exec_cmd(tp,
9382                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9383                                 NVRAM_CMD_DONE)))
9384
9385                                 break;
9386                 }
9387                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9388                         /* We always do complete word writes to eeprom. */
9389                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9390                 }
9391
9392                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9393                         break;
9394         }
9395         return ret;
9396 }
9397
9398 /* offset and length are dword aligned */
9399 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9400 {
9401         int ret;
9402
9403         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9404                 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
9405                 return -EINVAL;
9406         }
9407
9408         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9409                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9410                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
9411                 udelay(40);
9412         }
9413
9414         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9415                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9416         }
9417         else {
9418                 u32 grc_mode;
9419
9420                 ret = tg3_nvram_lock(tp);
9421                 if (ret)
9422                         return ret;
9423
9424                 tg3_enable_nvram_access(tp);
9425                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9426                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
9427                         tw32(NVRAM_WRITE1, 0x406);
9428
9429                 grc_mode = tr32(GRC_MODE);
9430                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9431
9432                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9433                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9434
9435                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
9436                                 buf);
9437                 }
9438                 else {
9439                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9440                                 buf);
9441                 }
9442
9443                 grc_mode = tr32(GRC_MODE);
9444                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9445
9446                 tg3_disable_nvram_access(tp);
9447                 tg3_nvram_unlock(tp);
9448         }
9449
9450         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9451                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9452                 udelay(40);
9453         }
9454
9455         return ret;
9456 }
9457
9458 struct subsys_tbl_ent {
9459         u16 subsys_vendor, subsys_devid;
9460         u32 phy_id;
9461 };
9462
9463 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9464         /* Broadcom boards. */
9465         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9466         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9467         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9468         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
9469         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9470         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9471         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
9472         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9473         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9474         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9475         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9476
9477         /* 3com boards. */
9478         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9479         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9480         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
9481         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9482         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9483
9484         /* DELL boards. */
9485         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9486         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9487         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9488         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9489
9490         /* Compaq boards. */
9491         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9492         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9493         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
9494         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9495         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9496
9497         /* IBM boards. */
9498         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9499 };
9500
9501 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9502 {
9503         int i;
9504
9505         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9506                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9507                      tp->pdev->subsystem_vendor) &&
9508                     (subsys_id_to_phy_id[i].subsys_devid ==
9509                      tp->pdev->subsystem_device))
9510                         return &subsys_id_to_phy_id[i];
9511         }
9512         return NULL;
9513 }
9514
9515 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9516 {
9517         u32 val;
9518         u16 pmcsr;
9519
9520         /* On some early chips the SRAM cannot be accessed in D3hot state,
9521          * so need make sure we're in D0.
9522          */
9523         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
9524         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9525         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
9526         msleep(1);
9527
9528         /* Make sure register accesses (indirect or otherwise)
9529          * will function correctly.
9530          */
9531         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9532                                tp->misc_host_ctrl);
9533
9534         tp->phy_id = PHY_ID_INVALID;
9535         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9536
9537         /* Do not even try poking around in here on Sun parts.  */
9538         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
9539                 return;
9540
9541         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9542         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9543                 u32 nic_cfg, led_cfg;
9544                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9545                 int eeprom_phy_serdes = 0;
9546
9547                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9548                 tp->nic_sram_data_cfg = nic_cfg;
9549
9550                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9551                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9552                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9553                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9554                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9555                     (ver > 0) && (ver < 0x100))
9556                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9557
9558                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9559                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9560                         eeprom_phy_serdes = 1;
9561
9562                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9563                 if (nic_phy_id != 0) {
9564                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9565                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9566
9567                         eeprom_phy_id  = (id1 >> 16) << 10;
9568                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
9569                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
9570                 } else
9571                         eeprom_phy_id = 0;
9572
9573                 tp->phy_id = eeprom_phy_id;
9574                 if (eeprom_phy_serdes) {
9575                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
9576                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9577                         else
9578                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9579                 }
9580
9581                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9582                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9583                                     SHASTA_EXT_LED_MODE_MASK);
9584                 else
9585                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9586
9587                 switch (led_cfg) {
9588                 default:
9589                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9590                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9591                         break;
9592
9593                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9594                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9595                         break;
9596
9597                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9598                         tp->led_ctrl = LED_CTRL_MODE_MAC;
9599
9600                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9601                          * read on some older 5700/5701 bootcode.
9602                          */
9603                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9604                             ASIC_REV_5700 ||
9605                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
9606                             ASIC_REV_5701)
9607                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9608
9609                         break;
9610
9611                 case SHASTA_EXT_LED_SHARED:
9612                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
9613                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9614                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9615                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9616                                                  LED_CTRL_MODE_PHY_2);
9617                         break;
9618
9619                 case SHASTA_EXT_LED_MAC:
9620                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9621                         break;
9622
9623                 case SHASTA_EXT_LED_COMBO:
9624                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
9625                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9626                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9627                                                  LED_CTRL_MODE_PHY_2);
9628                         break;
9629
9630                 };
9631
9632                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9633                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9634                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9635                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9636
9637                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9638                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9639                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
9640                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9641
9642                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9643                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
9644                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9645                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9646                 }
9647                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9648                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9649
9650                 if (cfg2 & (1 << 17))
9651                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9652
9653                 /* serdes signal pre-emphasis in register 0x590 set by */
9654                 /* bootcode if bit 18 is set */
9655                 if (cfg2 & (1 << 18))
9656                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9657         }
9658 }
9659
9660 static int __devinit tg3_phy_probe(struct tg3 *tp)
9661 {
9662         u32 hw_phy_id_1, hw_phy_id_2;
9663         u32 hw_phy_id, hw_phy_id_masked;
9664         int err;
9665
9666         /* Reading the PHY ID register can conflict with ASF
9667          * firwmare access to the PHY hardware.
9668          */
9669         err = 0;
9670         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9671                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9672         } else {
9673                 /* Now read the physical PHY_ID from the chip and verify
9674                  * that it is sane.  If it doesn't look good, we fall back
9675                  * to either the hard-coded table based PHY_ID and failing
9676                  * that the value found in the eeprom area.
9677                  */
9678                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9679                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9680
9681                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
9682                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9683                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
9684
9685                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9686         }
9687
9688         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9689                 tp->phy_id = hw_phy_id;
9690                 if (hw_phy_id_masked == PHY_ID_BCM8002)
9691                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9692                 else
9693                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
9694         } else {
9695                 if (tp->phy_id != PHY_ID_INVALID) {
9696                         /* Do nothing, phy ID already set up in
9697                          * tg3_get_eeprom_hw_cfg().
9698                          */
9699                 } else {
9700                         struct subsys_tbl_ent *p;
9701
9702                         /* No eeprom signature?  Try the hardcoded
9703                          * subsys device table.
9704                          */
9705                         p = lookup_by_subsys(tp);
9706                         if (!p)
9707                                 return -ENODEV;
9708
9709                         tp->phy_id = p->phy_id;
9710                         if (!tp->phy_id ||
9711                             tp->phy_id == PHY_ID_BCM8002)
9712                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9713                 }
9714         }
9715
9716         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
9717             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9718                 u32 bmsr, adv_reg, tg3_ctrl;
9719
9720                 tg3_readphy(tp, MII_BMSR, &bmsr);
9721                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9722                     (bmsr & BMSR_LSTATUS))
9723                         goto skip_phy_reset;
9724                     
9725                 err = tg3_phy_reset(tp);
9726                 if (err)
9727                         return err;
9728
9729                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9730                            ADVERTISE_100HALF | ADVERTISE_100FULL |
9731                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9732                 tg3_ctrl = 0;
9733                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9734                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9735                                     MII_TG3_CTRL_ADV_1000_FULL);
9736                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9737                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9738                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9739                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
9740                 }
9741
9742                 if (!tg3_copper_is_advertising_all(tp)) {
9743                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9744
9745                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9746                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9747
9748                         tg3_writephy(tp, MII_BMCR,
9749                                      BMCR_ANENABLE | BMCR_ANRESTART);
9750                 }
9751                 tg3_phy_set_wirespeed(tp);
9752
9753                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9754                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9755                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9756         }
9757
9758 skip_phy_reset:
9759         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9760                 err = tg3_init_5401phy_dsp(tp);
9761                 if (err)
9762                         return err;
9763         }
9764
9765         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9766                 err = tg3_init_5401phy_dsp(tp);
9767         }
9768
9769         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9770                 tp->link_config.advertising =
9771                         (ADVERTISED_1000baseT_Half |
9772                          ADVERTISED_1000baseT_Full |
9773                          ADVERTISED_Autoneg |
9774                          ADVERTISED_FIBRE);
9775         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9776                 tp->link_config.advertising &=
9777                         ~(ADVERTISED_1000baseT_Half |
9778                           ADVERTISED_1000baseT_Full);
9779
9780         return err;
9781 }
9782
9783 static void __devinit tg3_read_partno(struct tg3 *tp)
9784 {
9785         unsigned char vpd_data[256];
9786         int i;
9787         u32 magic;
9788
9789         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9790                 /* Sun decided not to put the necessary bits in the
9791                  * NVRAM of their onboard tg3 parts :(
9792                  */
9793                 strcpy(tp->board_part_number, "Sun 570X");
9794                 return;
9795         }
9796
9797         if (tg3_nvram_read_swab(tp, 0x0, &magic))
9798                 return;
9799
9800         if (magic == TG3_EEPROM_MAGIC) {
9801                 for (i = 0; i < 256; i += 4) {
9802                         u32 tmp;
9803
9804                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9805                                 goto out_not_found;
9806
9807                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
9808                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
9809                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9810                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9811                 }
9812         } else {
9813                 int vpd_cap;
9814
9815                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
9816                 for (i = 0; i < 256; i += 4) {
9817                         u32 tmp, j = 0;
9818                         u16 tmp16;
9819
9820                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
9821                                               i);
9822                         while (j++ < 100) {
9823                                 pci_read_config_word(tp->pdev, vpd_cap +
9824                                                      PCI_VPD_ADDR, &tmp16);
9825                                 if (tmp16 & 0x8000)
9826                                         break;
9827                                 msleep(1);
9828                         }
9829                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
9830                                               &tmp);
9831                         tmp = cpu_to_le32(tmp);
9832                         memcpy(&vpd_data[i], &tmp, 4);
9833                 }
9834         }
9835
9836         /* Now parse and find the part number. */
9837         for (i = 0; i < 256; ) {
9838                 unsigned char val = vpd_data[i];
9839                 int block_end;
9840
9841                 if (val == 0x82 || val == 0x91) {
9842                         i = (i + 3 +
9843                              (vpd_data[i + 1] +
9844                               (vpd_data[i + 2] << 8)));
9845                         continue;
9846                 }
9847
9848                 if (val != 0x90)
9849                         goto out_not_found;
9850
9851                 block_end = (i + 3 +
9852                              (vpd_data[i + 1] +
9853                               (vpd_data[i + 2] << 8)));
9854                 i += 3;
9855                 while (i < block_end) {
9856                         if (vpd_data[i + 0] == 'P' &&
9857                             vpd_data[i + 1] == 'N') {
9858                                 int partno_len = vpd_data[i + 2];
9859
9860                                 if (partno_len > 24)
9861                                         goto out_not_found;
9862
9863                                 memcpy(tp->board_part_number,
9864                                        &vpd_data[i + 3],
9865                                        partno_len);
9866
9867                                 /* Success. */
9868                                 return;
9869                         }
9870                 }
9871
9872                 /* Part number not found. */
9873                 goto out_not_found;
9874         }
9875
9876 out_not_found:
9877         strcpy(tp->board_part_number, "none");
9878 }
9879
9880 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
9881 {
9882         u32 val, offset, start;
9883
9884         if (tg3_nvram_read_swab(tp, 0, &val))
9885                 return;
9886
9887         if (val != TG3_EEPROM_MAGIC)
9888                 return;
9889
9890         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
9891             tg3_nvram_read_swab(tp, 0x4, &start))
9892                 return;
9893
9894         offset = tg3_nvram_logical_addr(tp, offset);
9895         if (tg3_nvram_read_swab(tp, offset, &val))
9896                 return;
9897
9898         if ((val & 0xfc000000) == 0x0c000000) {
9899                 u32 ver_offset, addr;
9900                 int i;
9901
9902                 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
9903                     tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
9904                         return;
9905
9906                 if (val != 0)
9907                         return;
9908
9909                 addr = offset + ver_offset - start;
9910                 for (i = 0; i < 16; i += 4) {
9911                         if (tg3_nvram_read(tp, addr + i, &val))
9912                                 return;
9913
9914                         val = cpu_to_le32(val);
9915                         memcpy(tp->fw_ver + i, &val, 4);
9916                 }
9917         }
9918 }
9919
9920 #ifdef CONFIG_SPARC64
9921 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
9922 {
9923         struct pci_dev *pdev = tp->pdev;
9924         struct pcidev_cookie *pcp = pdev->sysdata;
9925
9926         if (pcp != NULL) {
9927                 int node = pcp->prom_node;
9928                 u32 venid;
9929                 int err;
9930
9931                 err = prom_getproperty(node, "subsystem-vendor-id",
9932                                        (char *) &venid, sizeof(venid));
9933                 if (err == 0 || err == -1)
9934                         return 0;
9935                 if (venid == PCI_VENDOR_ID_SUN)
9936                         return 1;
9937
9938                 /* TG3 chips onboard the SunBlade-2500 don't have the
9939                  * subsystem-vendor-id set to PCI_VENDOR_ID_SUN but they
9940                  * are distinguishable from non-Sun variants by being
9941                  * named "network" by the firmware.  Non-Sun cards will
9942                  * show up as being named "ethernet".
9943                  */
9944                 if (!strcmp(pcp->prom_name, "network"))
9945                         return 1;
9946         }
9947         return 0;
9948 }
9949 #endif
9950
9951 static int __devinit tg3_get_invariants(struct tg3 *tp)
9952 {
9953         static struct pci_device_id write_reorder_chipsets[] = {
9954                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9955                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
9956                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
9957                              PCI_DEVICE_ID_VIA_8385_0) },
9958                 { },
9959         };
9960         u32 misc_ctrl_reg;
9961         u32 cacheline_sz_reg;
9962         u32 pci_state_reg, grc_misc_cfg;
9963         u32 val;
9964         u16 pci_cmd;
9965         int err;
9966
9967 #ifdef CONFIG_SPARC64
9968         if (tg3_is_sun_570X(tp))
9969                 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
9970 #endif
9971
9972         /* Force memory write invalidate off.  If we leave it on,
9973          * then on 5700_BX chips we have to enable a workaround.
9974          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
9975          * to match the cacheline size.  The Broadcom driver have this
9976          * workaround but turns MWI off all the times so never uses
9977          * it.  This seems to suggest that the workaround is insufficient.
9978          */
9979         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9980         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
9981         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9982
9983         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
9984          * has the register indirect write enable bit set before
9985          * we try to access any of the MMIO registers.  It is also
9986          * critical that the PCI-X hw workaround situation is decided
9987          * before that as well.
9988          */
9989         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9990                               &misc_ctrl_reg);
9991
9992         tp->pci_chip_rev_id = (misc_ctrl_reg >>
9993                                MISC_HOST_CTRL_CHIPREV_SHIFT);
9994
9995         /* Wrong chip ID in 5752 A0. This code can be removed later
9996          * as A0 is not in production.
9997          */
9998         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
9999                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10000
10001         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10002          * we need to disable memory and use config. cycles
10003          * only to access all registers. The 5702/03 chips
10004          * can mistakenly decode the special cycles from the
10005          * ICH chipsets as memory write cycles, causing corruption
10006          * of register and memory space. Only certain ICH bridges
10007          * will drive special cycles with non-zero data during the
10008          * address phase which can fall within the 5703's address
10009          * range. This is not an ICH bug as the PCI spec allows
10010          * non-zero address during special cycles. However, only
10011          * these ICH bridges are known to drive non-zero addresses
10012          * during special cycles.
10013          *
10014          * Since special cycles do not cross PCI bridges, we only
10015          * enable this workaround if the 5703 is on the secondary
10016          * bus of these ICH bridges.
10017          */
10018         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10019             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10020                 static struct tg3_dev_id {
10021                         u32     vendor;
10022                         u32     device;
10023                         u32     rev;
10024                 } ich_chipsets[] = {
10025                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10026                           PCI_ANY_ID },
10027                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10028                           PCI_ANY_ID },
10029                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10030                           0xa },
10031                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10032                           PCI_ANY_ID },
10033                         { },
10034                 };
10035                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10036                 struct pci_dev *bridge = NULL;
10037
10038                 while (pci_id->vendor != 0) {
10039                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
10040                                                 bridge);
10041                         if (!bridge) {
10042                                 pci_id++;
10043                                 continue;
10044                         }
10045                         if (pci_id->rev != PCI_ANY_ID) {
10046                                 u8 rev;
10047
10048                                 pci_read_config_byte(bridge, PCI_REVISION_ID,
10049                                                      &rev);
10050                                 if (rev > pci_id->rev)
10051                                         continue;
10052                         }
10053                         if (bridge->subordinate &&
10054                             (bridge->subordinate->number ==
10055                              tp->pdev->bus->number)) {
10056
10057                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10058                                 pci_dev_put(bridge);
10059                                 break;
10060                         }
10061                 }
10062         }
10063
10064         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10065          * DMA addresses > 40-bit. This bridge may have other additional
10066          * 57xx devices behind it in some 4-port NIC designs for example.
10067          * Any tg3 device found behind the bridge will also need the 40-bit
10068          * DMA workaround.
10069          */
10070         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10071             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10072                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
10073                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10074                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
10075         }
10076         else {
10077                 struct pci_dev *bridge = NULL;
10078
10079                 do {
10080                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10081                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
10082                                                 bridge);
10083                         if (bridge && bridge->subordinate &&
10084                             (bridge->subordinate->number <=
10085                              tp->pdev->bus->number) &&
10086                             (bridge->subordinate->subordinate >=
10087                              tp->pdev->bus->number)) {
10088                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10089                                 pci_dev_put(bridge);
10090                                 break;
10091                         }
10092                 } while (bridge);
10093         }
10094
10095         /* Initialize misc host control in PCI block. */
10096         tp->misc_host_ctrl |= (misc_ctrl_reg &
10097                                MISC_HOST_CTRL_CHIPREV);
10098         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10099                                tp->misc_host_ctrl);
10100
10101         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10102                               &cacheline_sz_reg);
10103
10104         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
10105         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
10106         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
10107         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
10108
10109         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10110             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10111             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10112             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10113             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10114                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10115
10116         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10117             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10118                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10119
10120         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
10121                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10122                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
10123                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
10124                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
10125                 } else
10126                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1;
10127         }
10128
10129         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10130             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
10131             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
10132             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
10133             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)
10134                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10135
10136         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
10137                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
10138
10139         /* If we have an AMD 762 or VIA K8T800 chipset, write
10140          * reordering to the mailbox registers done by the host
10141          * controller can cause major troubles.  We read back from
10142          * every mailbox register write to force the writes to be
10143          * posted to the chip in order.
10144          */
10145         if (pci_dev_present(write_reorder_chipsets) &&
10146             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10147                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
10148
10149         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10150             tp->pci_lat_timer < 64) {
10151                 tp->pci_lat_timer = 64;
10152
10153                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
10154                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
10155                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
10156                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
10157
10158                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10159                                        cacheline_sz_reg);
10160         }
10161
10162         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10163                               &pci_state_reg);
10164
10165         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
10166                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
10167
10168                 /* If this is a 5700 BX chipset, and we are in PCI-X
10169                  * mode, enable register write workaround.
10170                  *
10171                  * The workaround is to use indirect register accesses
10172                  * for all chip writes not to mailbox registers.
10173                  */
10174                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
10175                         u32 pm_reg;
10176                         u16 pci_cmd;
10177
10178                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10179
10180                         /* The chip can have it's power management PCI config
10181                          * space registers clobbered due to this bug.
10182                          * So explicitly force the chip into D0 here.
10183                          */
10184                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10185                                               &pm_reg);
10186                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10187                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10188                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10189                                                pm_reg);
10190
10191                         /* Also, force SERR#/PERR# in PCI command. */
10192                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10193                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10194                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10195                 }
10196         }
10197
10198         /* 5700 BX chips need to have their TX producer index mailboxes
10199          * written twice to workaround a bug.
10200          */
10201         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10202                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10203
10204         /* Back to back register writes can cause problems on this chip,
10205          * the workaround is to read back all reg writes except those to
10206          * mailbox regs.  See tg3_write_indirect_reg32().
10207          *
10208          * PCI Express 5750_A0 rev chips need this workaround too.
10209          */
10210         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10211             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10212              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
10213                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
10214
10215         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10216                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10217         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10218                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10219
10220         /* Chip-specific fixup from Broadcom driver */
10221         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10222             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10223                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10224                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10225         }
10226
10227         /* Default fast path register access methods */
10228         tp->read32 = tg3_read32;
10229         tp->write32 = tg3_write32;
10230         tp->read32_mbox = tg3_read32;
10231         tp->write32_mbox = tg3_write32;
10232         tp->write32_tx_mbox = tg3_write32;
10233         tp->write32_rx_mbox = tg3_write32;
10234
10235         /* Various workaround register access methods */
10236         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10237                 tp->write32 = tg3_write_indirect_reg32;
10238         else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
10239                 tp->write32 = tg3_write_flush_reg32;
10240
10241         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10242             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10243                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10244                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10245                         tp->write32_rx_mbox = tg3_write_flush_reg32;
10246         }
10247
10248         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10249                 tp->read32 = tg3_read_indirect_reg32;
10250                 tp->write32 = tg3_write_indirect_reg32;
10251                 tp->read32_mbox = tg3_read_indirect_mbox;
10252                 tp->write32_mbox = tg3_write_indirect_mbox;
10253                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
10254                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
10255
10256                 iounmap(tp->regs);
10257                 tp->regs = NULL;
10258
10259                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10260                 pci_cmd &= ~PCI_COMMAND_MEMORY;
10261                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10262         }
10263
10264         /* Get eeprom hw config before calling tg3_set_power_state().
10265          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
10266          * determined before calling tg3_set_power_state() so that
10267          * we know whether or not to switch out of Vaux power.
10268          * When the flag is set, it means that GPIO1 is used for eeprom
10269          * write protect and also implies that it is a LOM where GPIOs
10270          * are not used to switch power.
10271          */ 
10272         tg3_get_eeprom_hw_cfg(tp);
10273
10274         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10275          * GPIO1 driven high will bring 5700's external PHY out of reset.
10276          * It is also used as eeprom write protect on LOMs.
10277          */
10278         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10279         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10280             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10281                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10282                                        GRC_LCLCTRL_GPIO_OUTPUT1);
10283         /* Unused GPIO3 must be driven as output on 5752 because there
10284          * are no pull-up resistors on unused GPIO pins.
10285          */
10286         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10287                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
10288
10289         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10290                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
10291
10292         /* Force the chip into D0. */
10293         err = tg3_set_power_state(tp, PCI_D0);
10294         if (err) {
10295                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10296                        pci_name(tp->pdev));
10297                 return err;
10298         }
10299
10300         /* 5700 B0 chips do not support checksumming correctly due
10301          * to hardware bugs.
10302          */
10303         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10304                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10305
10306         /* Pseudo-header checksum is done by hardware logic and not
10307          * the offload processers, so make the chip do the pseudo-
10308          * header checksums on receive.  For transmit it is more
10309          * convenient to do the pseudo-header checksum in software
10310          * as Linux does that on transmit for us in all cases.
10311          */
10312         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
10313         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
10314
10315         /* Derive initial jumbo mode from MTU assigned in
10316          * ether_setup() via the alloc_etherdev() call
10317          */
10318         if (tp->dev->mtu > ETH_DATA_LEN &&
10319             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10320                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
10321
10322         /* Determine WakeOnLan speed to use. */
10323         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10324             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10325             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10326             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10327                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10328         } else {
10329                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10330         }
10331
10332         /* A few boards don't want Ethernet@WireSpeed phy feature */
10333         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10334             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10335              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
10336              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10337             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
10338                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10339
10340         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10341             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10342                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10343         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10344                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10345
10346         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
10347             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
10348             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787))
10349                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10350
10351         tp->coalesce_mode = 0;
10352         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10353             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10354                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10355
10356         /* Initialize MAC MI mode, polling disabled. */
10357         tw32_f(MAC_MI_MODE, tp->mi_mode);
10358         udelay(80);
10359
10360         /* Initialize data/descriptor byte/word swapping. */
10361         val = tr32(GRC_MODE);
10362         val &= GRC_MODE_HOST_STACKUP;
10363         tw32(GRC_MODE, val | tp->grc_mode);
10364
10365         tg3_switch_clocks(tp);
10366
10367         /* Clear this out for sanity. */
10368         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10369
10370         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10371                               &pci_state_reg);
10372         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10373             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10374                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10375
10376                 if (chiprevid == CHIPREV_ID_5701_A0 ||
10377                     chiprevid == CHIPREV_ID_5701_B0 ||
10378                     chiprevid == CHIPREV_ID_5701_B2 ||
10379                     chiprevid == CHIPREV_ID_5701_B5) {
10380                         void __iomem *sram_base;
10381
10382                         /* Write some dummy words into the SRAM status block
10383                          * area, see if it reads back correctly.  If the return
10384                          * value is bad, force enable the PCIX workaround.
10385                          */
10386                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10387
10388                         writel(0x00000000, sram_base);
10389                         writel(0x00000000, sram_base + 4);
10390                         writel(0xffffffff, sram_base + 4);
10391                         if (readl(sram_base) != 0x00000000)
10392                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10393                 }
10394         }
10395
10396         udelay(50);
10397         tg3_nvram_init(tp);
10398
10399         grc_misc_cfg = tr32(GRC_MISC_CFG);
10400         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10401
10402         /* Broadcom's driver says that CIOBE multisplit has a bug */
10403 #if 0
10404         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10405             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
10406                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
10407                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
10408         }
10409 #endif
10410         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10411             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
10412              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
10413                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
10414
10415         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10416             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
10417                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
10418         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
10419                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
10420                                       HOSTCC_MODE_CLRTICK_TXBD);
10421
10422                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
10423                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10424                                        tp->misc_host_ctrl);
10425         }
10426
10427         /* these are limited to 10/100 only */
10428         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10429              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
10430             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10431              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10432              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
10433               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
10434               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
10435             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10436              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
10437               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
10438                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
10439
10440         err = tg3_phy_probe(tp);
10441         if (err) {
10442                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10443                        pci_name(tp->pdev), err);
10444                 /* ... but do not return immediately ... */
10445         }
10446
10447         tg3_read_partno(tp);
10448         tg3_read_fw_ver(tp);
10449
10450         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10451                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10452         } else {
10453                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10454                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10455                 else
10456                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10457         }
10458
10459         /* 5700 {AX,BX} chips have a broken status block link
10460          * change bit implementation, so we must use the
10461          * status register in those cases.
10462          */
10463         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10464                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10465         else
10466                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10467
10468         /* The led_ctrl is set during tg3_phy_probe, here we might
10469          * have to force the link status polling mechanism based
10470          * upon subsystem IDs.
10471          */
10472         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10473             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10474                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10475                                   TG3_FLAG_USE_LINKCHG_REG);
10476         }
10477
10478         /* For all SERDES we poll the MAC status register. */
10479         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10480                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10481         else
10482                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10483
10484         /* All chips before 5787 can get confused if TX buffers
10485          * straddle the 4GB address boundary in some cases.
10486          */
10487         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10488             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10489                 tp->dev->hard_start_xmit = tg3_start_xmit;
10490         else
10491                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
10492
10493         tp->rx_offset = 2;
10494         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10495             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10496                 tp->rx_offset = 0;
10497
10498         /* By default, disable wake-on-lan.  User can change this
10499          * using ETHTOOL_SWOL.
10500          */
10501         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10502
10503         return err;
10504 }
10505
10506 #ifdef CONFIG_SPARC64
10507 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10508 {
10509         struct net_device *dev = tp->dev;
10510         struct pci_dev *pdev = tp->pdev;
10511         struct pcidev_cookie *pcp = pdev->sysdata;
10512
10513         if (pcp != NULL) {
10514                 int node = pcp->prom_node;
10515
10516                 if (prom_getproplen(node, "local-mac-address") == 6) {
10517                         prom_getproperty(node, "local-mac-address",
10518                                          dev->dev_addr, 6);
10519                         memcpy(dev->perm_addr, dev->dev_addr, 6);
10520                         return 0;
10521                 }
10522         }
10523         return -ENODEV;
10524 }
10525
10526 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
10527 {
10528         struct net_device *dev = tp->dev;
10529
10530         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
10531         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
10532         return 0;
10533 }
10534 #endif
10535
10536 static int __devinit tg3_get_device_address(struct tg3 *tp)
10537 {
10538         struct net_device *dev = tp->dev;
10539         u32 hi, lo, mac_offset;
10540         int addr_ok = 0;
10541
10542 #ifdef CONFIG_SPARC64
10543         if (!tg3_get_macaddr_sparc(tp))
10544                 return 0;
10545 #endif
10546
10547         mac_offset = 0x7c;
10548         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10549              !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
10550             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10551                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10552                         mac_offset = 0xcc;
10553                 if (tg3_nvram_lock(tp))
10554                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
10555                 else
10556                         tg3_nvram_unlock(tp);
10557         }
10558
10559         /* First try to get it from MAC address mailbox. */
10560         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
10561         if ((hi >> 16) == 0x484b) {
10562                 dev->dev_addr[0] = (hi >>  8) & 0xff;
10563                 dev->dev_addr[1] = (hi >>  0) & 0xff;
10564
10565                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
10566                 dev->dev_addr[2] = (lo >> 24) & 0xff;
10567                 dev->dev_addr[3] = (lo >> 16) & 0xff;
10568                 dev->dev_addr[4] = (lo >>  8) & 0xff;
10569                 dev->dev_addr[5] = (lo >>  0) & 0xff;
10570
10571                 /* Some old bootcode may report a 0 MAC address in SRAM */
10572                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
10573         }
10574         if (!addr_ok) {
10575                 /* Next, try NVRAM. */
10576                 if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
10577                     !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
10578                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10579                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
10580                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
10581                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
10582                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
10583                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
10584                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
10585                 }
10586                 /* Finally just fetch it out of the MAC control regs. */
10587                 else {
10588                         hi = tr32(MAC_ADDR_0_HIGH);
10589                         lo = tr32(MAC_ADDR_0_LOW);
10590
10591                         dev->dev_addr[5] = lo & 0xff;
10592                         dev->dev_addr[4] = (lo >> 8) & 0xff;
10593                         dev->dev_addr[3] = (lo >> 16) & 0xff;
10594                         dev->dev_addr[2] = (lo >> 24) & 0xff;
10595                         dev->dev_addr[1] = hi & 0xff;
10596                         dev->dev_addr[0] = (hi >> 8) & 0xff;
10597                 }
10598         }
10599
10600         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
10601 #ifdef CONFIG_SPARC64
10602                 if (!tg3_get_default_macaddr_sparc(tp))
10603                         return 0;
10604 #endif
10605                 return -EINVAL;
10606         }
10607         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10608         return 0;
10609 }
10610
10611 #define BOUNDARY_SINGLE_CACHELINE       1
10612 #define BOUNDARY_MULTI_CACHELINE        2
10613
10614 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
10615 {
10616         int cacheline_size;
10617         u8 byte;
10618         int goal;
10619
10620         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
10621         if (byte == 0)
10622                 cacheline_size = 1024;
10623         else
10624                 cacheline_size = (int) byte * 4;
10625
10626         /* On 5703 and later chips, the boundary bits have no
10627          * effect.
10628          */
10629         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10630             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
10631             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10632                 goto out;
10633
10634 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10635         goal = BOUNDARY_MULTI_CACHELINE;
10636 #else
10637 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10638         goal = BOUNDARY_SINGLE_CACHELINE;
10639 #else
10640         goal = 0;
10641 #endif
10642 #endif
10643
10644         if (!goal)
10645                 goto out;
10646
10647         /* PCI controllers on most RISC systems tend to disconnect
10648          * when a device tries to burst across a cache-line boundary.
10649          * Therefore, letting tg3 do so just wastes PCI bandwidth.
10650          *
10651          * Unfortunately, for PCI-E there are only limited
10652          * write-side controls for this, and thus for reads
10653          * we will still get the disconnects.  We'll also waste
10654          * these PCI cycles for both read and write for chips
10655          * other than 5700 and 5701 which do not implement the
10656          * boundary bits.
10657          */
10658         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10659             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
10660                 switch (cacheline_size) {
10661                 case 16:
10662                 case 32:
10663                 case 64:
10664                 case 128:
10665                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10666                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
10667                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
10668                         } else {
10669                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10670                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10671                         }
10672                         break;
10673
10674                 case 256:
10675                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
10676                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
10677                         break;
10678
10679                 default:
10680                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10681                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10682                         break;
10683                 };
10684         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10685                 switch (cacheline_size) {
10686                 case 16:
10687                 case 32:
10688                 case 64:
10689                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10690                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10691                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
10692                                 break;
10693                         }
10694                         /* fallthrough */
10695                 case 128:
10696                 default:
10697                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10698                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10699                         break;
10700                 };
10701         } else {
10702                 switch (cacheline_size) {
10703                 case 16:
10704                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10705                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10706                                         DMA_RWCTRL_WRITE_BNDRY_16);
10707                                 break;
10708                         }
10709                         /* fallthrough */
10710                 case 32:
10711                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10712                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10713                                         DMA_RWCTRL_WRITE_BNDRY_32);
10714                                 break;
10715                         }
10716                         /* fallthrough */
10717                 case 64:
10718                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10719                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10720                                         DMA_RWCTRL_WRITE_BNDRY_64);
10721                                 break;
10722                         }
10723                         /* fallthrough */
10724                 case 128:
10725                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10726                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10727                                         DMA_RWCTRL_WRITE_BNDRY_128);
10728                                 break;
10729                         }
10730                         /* fallthrough */
10731                 case 256:
10732                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
10733                                 DMA_RWCTRL_WRITE_BNDRY_256);
10734                         break;
10735                 case 512:
10736                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
10737                                 DMA_RWCTRL_WRITE_BNDRY_512);
10738                         break;
10739                 case 1024:
10740                 default:
10741                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10742                                 DMA_RWCTRL_WRITE_BNDRY_1024);
10743                         break;
10744                 };
10745         }
10746
10747 out:
10748         return val;
10749 }
10750
10751 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10752 {
10753         struct tg3_internal_buffer_desc test_desc;
10754         u32 sram_dma_descs;
10755         int i, ret;
10756
10757         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10758
10759         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10760         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10761         tw32(RDMAC_STATUS, 0);
10762         tw32(WDMAC_STATUS, 0);
10763
10764         tw32(BUFMGR_MODE, 0);
10765         tw32(FTQ_RESET, 0);
10766
10767         test_desc.addr_hi = ((u64) buf_dma) >> 32;
10768         test_desc.addr_lo = buf_dma & 0xffffffff;
10769         test_desc.nic_mbuf = 0x00002100;
10770         test_desc.len = size;
10771
10772         /*
10773          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10774          * the *second* time the tg3 driver was getting loaded after an
10775          * initial scan.
10776          *
10777          * Broadcom tells me:
10778          *   ...the DMA engine is connected to the GRC block and a DMA
10779          *   reset may affect the GRC block in some unpredictable way...
10780          *   The behavior of resets to individual blocks has not been tested.
10781          *
10782          * Broadcom noted the GRC reset will also reset all sub-components.
10783          */
10784         if (to_device) {
10785                 test_desc.cqid_sqid = (13 << 8) | 2;
10786
10787                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10788                 udelay(40);
10789         } else {
10790                 test_desc.cqid_sqid = (16 << 8) | 7;
10791
10792                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10793                 udelay(40);
10794         }
10795         test_desc.flags = 0x00000005;
10796
10797         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10798                 u32 val;
10799
10800                 val = *(((u32 *)&test_desc) + i);
10801                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10802                                        sram_dma_descs + (i * sizeof(u32)));
10803                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10804         }
10805         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10806
10807         if (to_device) {
10808                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10809         } else {
10810                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
10811         }
10812
10813         ret = -ENODEV;
10814         for (i = 0; i < 40; i++) {
10815                 u32 val;
10816
10817                 if (to_device)
10818                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
10819                 else
10820                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
10821                 if ((val & 0xffff) == sram_dma_descs) {
10822                         ret = 0;
10823                         break;
10824                 }
10825
10826                 udelay(100);
10827         }
10828
10829         return ret;
10830 }
10831
10832 #define TEST_BUFFER_SIZE        0x2000
10833
10834 static int __devinit tg3_test_dma(struct tg3 *tp)
10835 {
10836         dma_addr_t buf_dma;
10837         u32 *buf, saved_dma_rwctrl;
10838         int ret;
10839
10840         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
10841         if (!buf) {
10842                 ret = -ENOMEM;
10843                 goto out_nofree;
10844         }
10845
10846         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
10847                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
10848
10849         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
10850
10851         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10852                 /* DMA read watermark not used on PCIE */
10853                 tp->dma_rwctrl |= 0x00180000;
10854         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
10855                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
10856                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
10857                         tp->dma_rwctrl |= 0x003f0000;
10858                 else
10859                         tp->dma_rwctrl |= 0x003f000f;
10860         } else {
10861                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10862                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
10863                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
10864
10865                         /* If the 5704 is behind the EPB bridge, we can
10866                          * do the less restrictive ONE_DMA workaround for
10867                          * better performance.
10868                          */
10869                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
10870                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10871                                 tp->dma_rwctrl |= 0x8000;
10872                         else if (ccval == 0x6 || ccval == 0x7)
10873                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
10874
10875                         /* Set bit 23 to enable PCIX hw bug fix */
10876                         tp->dma_rwctrl |= 0x009f0000;
10877                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
10878                         /* 5780 always in PCIX mode */
10879                         tp->dma_rwctrl |= 0x00144000;
10880                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10881                         /* 5714 always in PCIX mode */
10882                         tp->dma_rwctrl |= 0x00148000;
10883                 } else {
10884                         tp->dma_rwctrl |= 0x001b000f;
10885                 }
10886         }
10887
10888         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10889             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10890                 tp->dma_rwctrl &= 0xfffffff0;
10891
10892         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10893             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
10894                 /* Remove this if it causes problems for some boards. */
10895                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
10896
10897                 /* On 5700/5701 chips, we need to set this bit.
10898                  * Otherwise the chip will issue cacheline transactions
10899                  * to streamable DMA memory with not all the byte
10900                  * enables turned on.  This is an error on several
10901                  * RISC PCI controllers, in particular sparc64.
10902                  *
10903                  * On 5703/5704 chips, this bit has been reassigned
10904                  * a different meaning.  In particular, it is used
10905                  * on those chips to enable a PCI-X workaround.
10906                  */
10907                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
10908         }
10909
10910         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10911
10912 #if 0
10913         /* Unneeded, already done by tg3_get_invariants.  */
10914         tg3_switch_clocks(tp);
10915 #endif
10916
10917         ret = 0;
10918         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10919             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
10920                 goto out;
10921
10922         /* It is best to perform DMA test with maximum write burst size
10923          * to expose the 5700/5701 write DMA bug.
10924          */
10925         saved_dma_rwctrl = tp->dma_rwctrl;
10926         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10927         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10928
10929         while (1) {
10930                 u32 *p = buf, i;
10931
10932                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
10933                         p[i] = i;
10934
10935                 /* Send the buffer to the chip. */
10936                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
10937                 if (ret) {
10938                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
10939                         break;
10940                 }
10941
10942 #if 0
10943                 /* validate data reached card RAM correctly. */
10944                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10945                         u32 val;
10946                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
10947                         if (le32_to_cpu(val) != p[i]) {
10948                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
10949                                 /* ret = -ENODEV here? */
10950                         }
10951                         p[i] = 0;
10952                 }
10953 #endif
10954                 /* Now read it back. */
10955                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
10956                 if (ret) {
10957                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
10958
10959                         break;
10960                 }
10961
10962                 /* Verify it. */
10963                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10964                         if (p[i] == i)
10965                                 continue;
10966
10967                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10968                             DMA_RWCTRL_WRITE_BNDRY_16) {
10969                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10970                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10971                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10972                                 break;
10973                         } else {
10974                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
10975                                 ret = -ENODEV;
10976                                 goto out;
10977                         }
10978                 }
10979
10980                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
10981                         /* Success. */
10982                         ret = 0;
10983                         break;
10984                 }
10985         }
10986         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10987             DMA_RWCTRL_WRITE_BNDRY_16) {
10988                 static struct pci_device_id dma_wait_state_chipsets[] = {
10989                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
10990                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
10991                         { },
10992                 };
10993
10994                 /* DMA test passed without adjusting DMA boundary,
10995                  * now look for chipsets that are known to expose the
10996                  * DMA bug without failing the test.
10997                  */
10998                 if (pci_dev_present(dma_wait_state_chipsets)) {
10999                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11000                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11001                 }
11002                 else
11003                         /* Safe to use the calculated DMA boundary. */
11004                         tp->dma_rwctrl = saved_dma_rwctrl;
11005
11006                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11007         }
11008
11009 out:
11010         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
11011 out_nofree:
11012         return ret;
11013 }
11014
11015 static void __devinit tg3_init_link_config(struct tg3 *tp)
11016 {
11017         tp->link_config.advertising =
11018                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11019                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11020                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11021                  ADVERTISED_Autoneg | ADVERTISED_MII);
11022         tp->link_config.speed = SPEED_INVALID;
11023         tp->link_config.duplex = DUPLEX_INVALID;
11024         tp->link_config.autoneg = AUTONEG_ENABLE;
11025         tp->link_config.active_speed = SPEED_INVALID;
11026         tp->link_config.active_duplex = DUPLEX_INVALID;
11027         tp->link_config.phy_is_low_power = 0;
11028         tp->link_config.orig_speed = SPEED_INVALID;
11029         tp->link_config.orig_duplex = DUPLEX_INVALID;
11030         tp->link_config.orig_autoneg = AUTONEG_INVALID;
11031 }
11032
11033 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
11034 {
11035         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11036                 tp->bufmgr_config.mbuf_read_dma_low_water =
11037                         DEFAULT_MB_RDMA_LOW_WATER_5705;
11038                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11039                         DEFAULT_MB_MACRX_LOW_WATER_5705;
11040                 tp->bufmgr_config.mbuf_high_water =
11041                         DEFAULT_MB_HIGH_WATER_5705;
11042
11043                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11044                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
11045                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11046                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
11047                 tp->bufmgr_config.mbuf_high_water_jumbo =
11048                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
11049         } else {
11050                 tp->bufmgr_config.mbuf_read_dma_low_water =
11051                         DEFAULT_MB_RDMA_LOW_WATER;
11052                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11053                         DEFAULT_MB_MACRX_LOW_WATER;
11054                 tp->bufmgr_config.mbuf_high_water =
11055                         DEFAULT_MB_HIGH_WATER;
11056
11057                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11058                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
11059                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11060                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
11061                 tp->bufmgr_config.mbuf_high_water_jumbo =
11062                         DEFAULT_MB_HIGH_WATER_JUMBO;
11063         }
11064
11065         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
11066         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
11067 }
11068
11069 static char * __devinit tg3_phy_string(struct tg3 *tp)
11070 {
11071         switch (tp->phy_id & PHY_ID_MASK) {
11072         case PHY_ID_BCM5400:    return "5400";
11073         case PHY_ID_BCM5401:    return "5401";
11074         case PHY_ID_BCM5411:    return "5411";
11075         case PHY_ID_BCM5701:    return "5701";
11076         case PHY_ID_BCM5703:    return "5703";
11077         case PHY_ID_BCM5704:    return "5704";
11078         case PHY_ID_BCM5705:    return "5705";
11079         case PHY_ID_BCM5750:    return "5750";
11080         case PHY_ID_BCM5752:    return "5752";
11081         case PHY_ID_BCM5714:    return "5714";
11082         case PHY_ID_BCM5780:    return "5780";
11083         case PHY_ID_BCM5755:    return "5755";
11084         case PHY_ID_BCM5787:    return "5787";
11085         case PHY_ID_BCM8002:    return "8002/serdes";
11086         case 0:                 return "serdes";
11087         default:                return "unknown";
11088         };
11089 }
11090
11091 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
11092 {
11093         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11094                 strcpy(str, "PCI Express");
11095                 return str;
11096         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11097                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
11098
11099                 strcpy(str, "PCIX:");
11100
11101                 if ((clock_ctrl == 7) ||
11102                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
11103                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
11104                         strcat(str, "133MHz");
11105                 else if (clock_ctrl == 0)
11106                         strcat(str, "33MHz");
11107                 else if (clock_ctrl == 2)
11108                         strcat(str, "50MHz");
11109                 else if (clock_ctrl == 4)
11110                         strcat(str, "66MHz");
11111                 else if (clock_ctrl == 6)
11112                         strcat(str, "100MHz");
11113         } else {
11114                 strcpy(str, "PCI:");
11115                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
11116                         strcat(str, "66MHz");
11117                 else
11118                         strcat(str, "33MHz");
11119         }
11120         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
11121                 strcat(str, ":32-bit");
11122         else
11123                 strcat(str, ":64-bit");
11124         return str;
11125 }
11126
11127 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
11128 {
11129         struct pci_dev *peer;
11130         unsigned int func, devnr = tp->pdev->devfn & ~7;
11131
11132         for (func = 0; func < 8; func++) {
11133                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
11134                 if (peer && peer != tp->pdev)
11135                         break;
11136                 pci_dev_put(peer);
11137         }
11138         /* 5704 can be configured in single-port mode, set peer to
11139          * tp->pdev in that case.
11140          */
11141         if (!peer) {
11142                 peer = tp->pdev;
11143                 return peer;
11144         }
11145
11146         /*
11147          * We don't need to keep the refcount elevated; there's no way
11148          * to remove one half of this device without removing the other
11149          */
11150         pci_dev_put(peer);
11151
11152         return peer;
11153 }
11154
11155 static void __devinit tg3_init_coal(struct tg3 *tp)
11156 {
11157         struct ethtool_coalesce *ec = &tp->coal;
11158
11159         memset(ec, 0, sizeof(*ec));
11160         ec->cmd = ETHTOOL_GCOALESCE;
11161         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
11162         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
11163         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
11164         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
11165         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
11166         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
11167         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
11168         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
11169         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
11170
11171         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
11172                                  HOSTCC_MODE_CLRTICK_TXBD)) {
11173                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
11174                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
11175                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
11176                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
11177         }
11178
11179         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11180                 ec->rx_coalesce_usecs_irq = 0;
11181                 ec->tx_coalesce_usecs_irq = 0;
11182                 ec->stats_block_coalesce_usecs = 0;
11183         }
11184 }
11185
11186 static int __devinit tg3_init_one(struct pci_dev *pdev,
11187                                   const struct pci_device_id *ent)
11188 {
11189         static int tg3_version_printed = 0;
11190         unsigned long tg3reg_base, tg3reg_len;
11191         struct net_device *dev;
11192         struct tg3 *tp;
11193         int i, err, pm_cap;
11194         char str[40];
11195         u64 dma_mask, persist_dma_mask;
11196
11197         if (tg3_version_printed++ == 0)
11198                 printk(KERN_INFO "%s", version);
11199
11200         err = pci_enable_device(pdev);
11201         if (err) {
11202                 printk(KERN_ERR PFX "Cannot enable PCI device, "
11203                        "aborting.\n");
11204                 return err;
11205         }
11206
11207         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11208                 printk(KERN_ERR PFX "Cannot find proper PCI device "
11209                        "base address, aborting.\n");
11210                 err = -ENODEV;
11211                 goto err_out_disable_pdev;
11212         }
11213
11214         err = pci_request_regions(pdev, DRV_MODULE_NAME);
11215         if (err) {
11216                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11217                        "aborting.\n");
11218                 goto err_out_disable_pdev;
11219         }
11220
11221         pci_set_master(pdev);
11222
11223         /* Find power-management capability. */
11224         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11225         if (pm_cap == 0) {
11226                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11227                        "aborting.\n");
11228                 err = -EIO;
11229                 goto err_out_free_res;
11230         }
11231
11232         tg3reg_base = pci_resource_start(pdev, 0);
11233         tg3reg_len = pci_resource_len(pdev, 0);
11234
11235         dev = alloc_etherdev(sizeof(*tp));
11236         if (!dev) {
11237                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11238                 err = -ENOMEM;
11239                 goto err_out_free_res;
11240         }
11241
11242         SET_MODULE_OWNER(dev);
11243         SET_NETDEV_DEV(dev, &pdev->dev);
11244
11245         dev->features |= NETIF_F_LLTX;
11246 #if TG3_VLAN_TAG_USED
11247         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11248         dev->vlan_rx_register = tg3_vlan_rx_register;
11249         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
11250 #endif
11251
11252         tp = netdev_priv(dev);
11253         tp->pdev = pdev;
11254         tp->dev = dev;
11255         tp->pm_cap = pm_cap;
11256         tp->mac_mode = TG3_DEF_MAC_MODE;
11257         tp->rx_mode = TG3_DEF_RX_MODE;
11258         tp->tx_mode = TG3_DEF_TX_MODE;
11259         tp->mi_mode = MAC_MI_MODE_BASE;
11260         if (tg3_debug > 0)
11261                 tp->msg_enable = tg3_debug;
11262         else
11263                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
11264
11265         /* The word/byte swap controls here control register access byte
11266          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
11267          * setting below.
11268          */
11269         tp->misc_host_ctrl =
11270                 MISC_HOST_CTRL_MASK_PCI_INT |
11271                 MISC_HOST_CTRL_WORD_SWAP |
11272                 MISC_HOST_CTRL_INDIR_ACCESS |
11273                 MISC_HOST_CTRL_PCISTATE_RW;
11274
11275         /* The NONFRM (non-frame) byte/word swap controls take effect
11276          * on descriptor entries, anything which isn't packet data.
11277          *
11278          * The StrongARM chips on the board (one for tx, one for rx)
11279          * are running in big-endian mode.
11280          */
11281         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11282                         GRC_MODE_WSWAP_NONFRM_DATA);
11283 #ifdef __BIG_ENDIAN
11284         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11285 #endif
11286         spin_lock_init(&tp->lock);
11287         spin_lock_init(&tp->tx_lock);
11288         spin_lock_init(&tp->indirect_lock);
11289         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
11290
11291         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11292         if (tp->regs == 0UL) {
11293                 printk(KERN_ERR PFX "Cannot map device registers, "
11294                        "aborting.\n");
11295                 err = -ENOMEM;
11296                 goto err_out_free_dev;
11297         }
11298
11299         tg3_init_link_config(tp);
11300
11301         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11302         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11303         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11304
11305         dev->open = tg3_open;
11306         dev->stop = tg3_close;
11307         dev->get_stats = tg3_get_stats;
11308         dev->set_multicast_list = tg3_set_rx_mode;
11309         dev->set_mac_address = tg3_set_mac_addr;
11310         dev->do_ioctl = tg3_ioctl;
11311         dev->tx_timeout = tg3_tx_timeout;
11312         dev->poll = tg3_poll;
11313         dev->ethtool_ops = &tg3_ethtool_ops;
11314         dev->weight = 64;
11315         dev->watchdog_timeo = TG3_TX_TIMEOUT;
11316         dev->change_mtu = tg3_change_mtu;
11317         dev->irq = pdev->irq;
11318 #ifdef CONFIG_NET_POLL_CONTROLLER
11319         dev->poll_controller = tg3_poll_controller;
11320 #endif
11321
11322         err = tg3_get_invariants(tp);
11323         if (err) {
11324                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11325                        "aborting.\n");
11326                 goto err_out_iounmap;
11327         }
11328
11329         /* The EPB bridge inside 5714, 5715, and 5780 and any
11330          * device behind the EPB cannot support DMA addresses > 40-bit.
11331          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11332          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11333          * do DMA address check in tg3_start_xmit().
11334          */
11335         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11336                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11337         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
11338                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11339 #ifdef CONFIG_HIGHMEM
11340                 dma_mask = DMA_64BIT_MASK;
11341 #endif
11342         } else
11343                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11344
11345         /* Configure DMA attributes. */
11346         if (dma_mask > DMA_32BIT_MASK) {
11347                 err = pci_set_dma_mask(pdev, dma_mask);
11348                 if (!err) {
11349                         dev->features |= NETIF_F_HIGHDMA;
11350                         err = pci_set_consistent_dma_mask(pdev,
11351                                                           persist_dma_mask);
11352                         if (err < 0) {
11353                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11354                                        "DMA for consistent allocations\n");
11355                                 goto err_out_iounmap;
11356                         }
11357                 }
11358         }
11359         if (err || dma_mask == DMA_32BIT_MASK) {
11360                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11361                 if (err) {
11362                         printk(KERN_ERR PFX "No usable DMA configuration, "
11363                                "aborting.\n");
11364                         goto err_out_iounmap;
11365                 }
11366         }
11367
11368         tg3_init_bufmgr_config(tp);
11369
11370 #if TG3_TSO_SUPPORT != 0
11371         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11372                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11373         }
11374         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11375             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11376             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11377             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11378                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11379         } else {
11380                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11381         }
11382
11383         /* TSO is on by default on chips that support hardware TSO.
11384          * Firmware TSO on older chips gives lower performance, so it
11385          * is off by default, but can be enabled using ethtool.
11386          */
11387         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
11388                 dev->features |= NETIF_F_TSO;
11389
11390 #endif
11391
11392         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11393             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11394             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
11395                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
11396                 tp->rx_pending = 63;
11397         }
11398
11399         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11400             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11401                 tp->pdev_peer = tg3_find_peer(tp);
11402
11403         err = tg3_get_device_address(tp);
11404         if (err) {
11405                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
11406                        "aborting.\n");
11407                 goto err_out_iounmap;
11408         }
11409
11410         /*
11411          * Reset chip in case UNDI or EFI driver did not shutdown
11412          * DMA self test will enable WDMAC and we'll see (spurious)
11413          * pending DMA on the PCI bus at that point.
11414          */
11415         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
11416             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11417                 pci_save_state(tp->pdev);
11418                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
11419                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11420         }
11421
11422         err = tg3_test_dma(tp);
11423         if (err) {
11424                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
11425                 goto err_out_iounmap;
11426         }
11427
11428         /* Tigon3 can do ipv4 only... and some chips have buggy
11429          * checksumming.
11430          */
11431         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
11432                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11433                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
11434                         dev->features |= NETIF_F_HW_CSUM;
11435                 else
11436                         dev->features |= NETIF_F_IP_CSUM;
11437                 dev->features |= NETIF_F_SG;
11438                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11439         } else
11440                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
11441
11442         /* flow control autonegotiation is default behavior */
11443         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
11444
11445         tg3_init_coal(tp);
11446
11447         /* Now that we have fully setup the chip, save away a snapshot
11448          * of the PCI config space.  We need to restore this after
11449          * GRC_MISC_CFG core clock resets and some resume events.
11450          */
11451         pci_save_state(tp->pdev);
11452
11453         err = register_netdev(dev);
11454         if (err) {
11455                 printk(KERN_ERR PFX "Cannot register net device, "
11456                        "aborting.\n");
11457                 goto err_out_iounmap;
11458         }
11459
11460         pci_set_drvdata(pdev, dev);
11461
11462         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
11463                dev->name,
11464                tp->board_part_number,
11465                tp->pci_chip_rev_id,
11466                tg3_phy_string(tp),
11467                tg3_bus_string(tp, str),
11468                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
11469
11470         for (i = 0; i < 6; i++)
11471                 printk("%2.2x%c", dev->dev_addr[i],
11472                        i == 5 ? '\n' : ':');
11473
11474         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11475                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11476                "TSOcap[%d] \n",
11477                dev->name,
11478                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11479                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11480                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11481                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11482                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
11483                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11484                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
11485         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11486                dev->name, tp->dma_rwctrl,
11487                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11488                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
11489
11490         netif_carrier_off(tp->dev);
11491
11492         return 0;
11493
11494 err_out_iounmap:
11495         if (tp->regs) {
11496                 iounmap(tp->regs);
11497                 tp->regs = NULL;
11498         }
11499
11500 err_out_free_dev:
11501         free_netdev(dev);
11502
11503 err_out_free_res:
11504         pci_release_regions(pdev);
11505
11506 err_out_disable_pdev:
11507         pci_disable_device(pdev);
11508         pci_set_drvdata(pdev, NULL);
11509         return err;
11510 }
11511
11512 static void __devexit tg3_remove_one(struct pci_dev *pdev)
11513 {
11514         struct net_device *dev = pci_get_drvdata(pdev);
11515
11516         if (dev) {
11517                 struct tg3 *tp = netdev_priv(dev);
11518
11519                 flush_scheduled_work();
11520                 unregister_netdev(dev);
11521                 if (tp->regs) {
11522                         iounmap(tp->regs);
11523                         tp->regs = NULL;
11524                 }
11525                 free_netdev(dev);
11526                 pci_release_regions(pdev);
11527                 pci_disable_device(pdev);
11528                 pci_set_drvdata(pdev, NULL);
11529         }
11530 }
11531
11532 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11533 {
11534         struct net_device *dev = pci_get_drvdata(pdev);
11535         struct tg3 *tp = netdev_priv(dev);
11536         int err;
11537
11538         if (!netif_running(dev))
11539                 return 0;
11540
11541         flush_scheduled_work();
11542         tg3_netif_stop(tp);
11543
11544         del_timer_sync(&tp->timer);
11545
11546         tg3_full_lock(tp, 1);
11547         tg3_disable_ints(tp);
11548         tg3_full_unlock(tp);
11549
11550         netif_device_detach(dev);
11551
11552         tg3_full_lock(tp, 0);
11553         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11554         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
11555         tg3_full_unlock(tp);
11556
11557         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
11558         if (err) {
11559                 tg3_full_lock(tp, 0);
11560
11561                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11562                 tg3_init_hw(tp);
11563
11564                 tp->timer.expires = jiffies + tp->timer_offset;
11565                 add_timer(&tp->timer);
11566
11567                 netif_device_attach(dev);
11568                 tg3_netif_start(tp);
11569
11570                 tg3_full_unlock(tp);
11571         }
11572
11573         return err;
11574 }
11575
11576 static int tg3_resume(struct pci_dev *pdev)
11577 {
11578         struct net_device *dev = pci_get_drvdata(pdev);
11579         struct tg3 *tp = netdev_priv(dev);
11580         int err;
11581
11582         if (!netif_running(dev))
11583                 return 0;
11584
11585         pci_restore_state(tp->pdev);
11586
11587         err = tg3_set_power_state(tp, PCI_D0);
11588         if (err)
11589                 return err;
11590
11591         netif_device_attach(dev);
11592
11593         tg3_full_lock(tp, 0);
11594
11595         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11596         tg3_init_hw(tp);
11597
11598         tp->timer.expires = jiffies + tp->timer_offset;
11599         add_timer(&tp->timer);
11600
11601         tg3_netif_start(tp);
11602
11603         tg3_full_unlock(tp);
11604
11605         return 0;
11606 }
11607
11608 static struct pci_driver tg3_driver = {
11609         .name           = DRV_MODULE_NAME,
11610         .id_table       = tg3_pci_tbl,
11611         .probe          = tg3_init_one,
11612         .remove         = __devexit_p(tg3_remove_one),
11613         .suspend        = tg3_suspend,
11614         .resume         = tg3_resume
11615 };
11616
11617 static int __init tg3_init(void)
11618 {
11619         return pci_module_init(&tg3_driver);
11620 }
11621
11622 static void __exit tg3_cleanup(void)
11623 {
11624         pci_unregister_driver(&tg3_driver);
11625 }
11626
11627 module_init(tg3_init);
11628 module_exit(tg3_cleanup);