]> Pileus Git - ~andy/linux/blob - drivers/net/tg3.c
[TG3]: Kill some less useful flags
[~andy/linux] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18 #include <linux/config.h>
19
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mii.h>
36 #include <linux/if_vlan.h>
37 #include <linux/ip.h>
38 #include <linux/tcp.h>
39 #include <linux/workqueue.h>
40 #include <linux/prefetch.h>
41 #include <linux/dma-mapping.h>
42
43 #include <net/checksum.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC64
51 #include <asm/idprom.h>
52 #include <asm/oplib.h>
53 #include <asm/pbm.h>
54 #endif
55
56 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
57 #define TG3_VLAN_TAG_USED 1
58 #else
59 #define TG3_VLAN_TAG_USED 0
60 #endif
61
62 #ifdef NETIF_F_TSO
63 #define TG3_TSO_SUPPORT 1
64 #else
65 #define TG3_TSO_SUPPORT 0
66 #endif
67
68 #include "tg3.h"
69
70 #define DRV_MODULE_NAME         "tg3"
71 #define PFX DRV_MODULE_NAME     ": "
72 #define DRV_MODULE_VERSION      "3.56"
73 #define DRV_MODULE_RELDATE      "Apr 1, 2006"
74
75 #define TG3_DEF_MAC_MODE        0
76 #define TG3_DEF_RX_MODE         0
77 #define TG3_DEF_TX_MODE         0
78 #define TG3_DEF_MSG_ENABLE        \
79         (NETIF_MSG_DRV          | \
80          NETIF_MSG_PROBE        | \
81          NETIF_MSG_LINK         | \
82          NETIF_MSG_TIMER        | \
83          NETIF_MSG_IFDOWN       | \
84          NETIF_MSG_IFUP         | \
85          NETIF_MSG_RX_ERR       | \
86          NETIF_MSG_TX_ERR)
87
88 /* length of time before we decide the hardware is borked,
89  * and dev->tx_timeout() should be called to fix the problem
90  */
91 #define TG3_TX_TIMEOUT                  (5 * HZ)
92
93 /* hardware minimum and maximum for a single frame's data payload */
94 #define TG3_MIN_MTU                     60
95 #define TG3_MAX_MTU(tp) \
96         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
97
98 /* These numbers seem to be hard coded in the NIC firmware somehow.
99  * You can't change the ring sizes, but you can change where you place
100  * them in the NIC onboard memory.
101  */
102 #define TG3_RX_RING_SIZE                512
103 #define TG3_DEF_RX_RING_PENDING         200
104 #define TG3_RX_JUMBO_RING_SIZE          256
105 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
106
107 /* Do not place this n-ring entries value into the tp struct itself,
108  * we really want to expose these constants to GCC so that modulo et
109  * al.  operations are done with shifts and masks instead of with
110  * hw multiply/modulo instructions.  Another solution would be to
111  * replace things like '% foo' with '& (foo - 1)'.
112  */
113 #define TG3_RX_RCB_RING_SIZE(tp)        \
114         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
115
116 #define TG3_TX_RING_SIZE                512
117 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
118
119 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
120                                  TG3_RX_RING_SIZE)
121 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122                                  TG3_RX_JUMBO_RING_SIZE)
123 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124                                    TG3_RX_RCB_RING_SIZE(tp))
125 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
126                                  TG3_TX_RING_SIZE)
127 #define TX_BUFFS_AVAIL(TP)                                              \
128         ((TP)->tx_pending -                                             \
129          (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
130 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
131
132 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
133 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
134
135 /* minimum number of free TX descriptors required to wake up TX process */
136 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
137
138 /* number of ETHTOOL_GSTATS u64's */
139 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
140
141 #define TG3_NUM_TEST            6
142
143 static char version[] __devinitdata =
144         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
145
146 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
147 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
148 MODULE_LICENSE("GPL");
149 MODULE_VERSION(DRV_MODULE_VERSION);
150
151 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
152 module_param(tg3_debug, int, 0);
153 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
154
155 static struct pci_device_id tg3_pci_tbl[] = {
156         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
157           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
159           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
161           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
163           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
165           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
167           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
169           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
171           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
173           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
175           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
177           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
179           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
181           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
183           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
185           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
187           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
189           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
191           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
193           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
195           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
197           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
199           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
201           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
203           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
205           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
207           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
209           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
211           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
213           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
214         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
215           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
216         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
217           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
218         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
219           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
221           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
223           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754,
225           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M,
227           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755,
229           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M,
231           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787,
233           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M,
235           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
237           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S,
239           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
241           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S,
243           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
244         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
245           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
246         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
247           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
248         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
249           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
250         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
251           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
252         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
253           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
254         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
255           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
256         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
257           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
258         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
259           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
260         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
261           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
262         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
263           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
264         { 0, }
265 };
266
267 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
268
269 static struct {
270         const char string[ETH_GSTRING_LEN];
271 } ethtool_stats_keys[TG3_NUM_STATS] = {
272         { "rx_octets" },
273         { "rx_fragments" },
274         { "rx_ucast_packets" },
275         { "rx_mcast_packets" },
276         { "rx_bcast_packets" },
277         { "rx_fcs_errors" },
278         { "rx_align_errors" },
279         { "rx_xon_pause_rcvd" },
280         { "rx_xoff_pause_rcvd" },
281         { "rx_mac_ctrl_rcvd" },
282         { "rx_xoff_entered" },
283         { "rx_frame_too_long_errors" },
284         { "rx_jabbers" },
285         { "rx_undersize_packets" },
286         { "rx_in_length_errors" },
287         { "rx_out_length_errors" },
288         { "rx_64_or_less_octet_packets" },
289         { "rx_65_to_127_octet_packets" },
290         { "rx_128_to_255_octet_packets" },
291         { "rx_256_to_511_octet_packets" },
292         { "rx_512_to_1023_octet_packets" },
293         { "rx_1024_to_1522_octet_packets" },
294         { "rx_1523_to_2047_octet_packets" },
295         { "rx_2048_to_4095_octet_packets" },
296         { "rx_4096_to_8191_octet_packets" },
297         { "rx_8192_to_9022_octet_packets" },
298
299         { "tx_octets" },
300         { "tx_collisions" },
301
302         { "tx_xon_sent" },
303         { "tx_xoff_sent" },
304         { "tx_flow_control" },
305         { "tx_mac_errors" },
306         { "tx_single_collisions" },
307         { "tx_mult_collisions" },
308         { "tx_deferred" },
309         { "tx_excessive_collisions" },
310         { "tx_late_collisions" },
311         { "tx_collide_2times" },
312         { "tx_collide_3times" },
313         { "tx_collide_4times" },
314         { "tx_collide_5times" },
315         { "tx_collide_6times" },
316         { "tx_collide_7times" },
317         { "tx_collide_8times" },
318         { "tx_collide_9times" },
319         { "tx_collide_10times" },
320         { "tx_collide_11times" },
321         { "tx_collide_12times" },
322         { "tx_collide_13times" },
323         { "tx_collide_14times" },
324         { "tx_collide_15times" },
325         { "tx_ucast_packets" },
326         { "tx_mcast_packets" },
327         { "tx_bcast_packets" },
328         { "tx_carrier_sense_errors" },
329         { "tx_discards" },
330         { "tx_errors" },
331
332         { "dma_writeq_full" },
333         { "dma_write_prioq_full" },
334         { "rxbds_empty" },
335         { "rx_discards" },
336         { "rx_errors" },
337         { "rx_threshold_hit" },
338
339         { "dma_readq_full" },
340         { "dma_read_prioq_full" },
341         { "tx_comp_queue_full" },
342
343         { "ring_set_send_prod_index" },
344         { "ring_status_update" },
345         { "nic_irqs" },
346         { "nic_avoided_irqs" },
347         { "nic_tx_threshold_hit" }
348 };
349
350 static struct {
351         const char string[ETH_GSTRING_LEN];
352 } ethtool_test_keys[TG3_NUM_TEST] = {
353         { "nvram test     (online) " },
354         { "link test      (online) " },
355         { "register test  (offline)" },
356         { "memory test    (offline)" },
357         { "loopback test  (offline)" },
358         { "interrupt test (offline)" },
359 };
360
361 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
362 {
363         writel(val, tp->regs + off);
364 }
365
366 static u32 tg3_read32(struct tg3 *tp, u32 off)
367 {
368         return (readl(tp->regs + off)); 
369 }
370
371 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
372 {
373         unsigned long flags;
374
375         spin_lock_irqsave(&tp->indirect_lock, flags);
376         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
377         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
378         spin_unlock_irqrestore(&tp->indirect_lock, flags);
379 }
380
381 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
382 {
383         writel(val, tp->regs + off);
384         readl(tp->regs + off);
385 }
386
387 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
388 {
389         unsigned long flags;
390         u32 val;
391
392         spin_lock_irqsave(&tp->indirect_lock, flags);
393         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
394         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
395         spin_unlock_irqrestore(&tp->indirect_lock, flags);
396         return val;
397 }
398
399 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
400 {
401         unsigned long flags;
402
403         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
404                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
405                                        TG3_64BIT_REG_LOW, val);
406                 return;
407         }
408         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
409                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
410                                        TG3_64BIT_REG_LOW, val);
411                 return;
412         }
413
414         spin_lock_irqsave(&tp->indirect_lock, flags);
415         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
416         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
417         spin_unlock_irqrestore(&tp->indirect_lock, flags);
418
419         /* In indirect mode when disabling interrupts, we also need
420          * to clear the interrupt bit in the GRC local ctrl register.
421          */
422         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
423             (val == 0x1)) {
424                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
425                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
426         }
427 }
428
429 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
430 {
431         unsigned long flags;
432         u32 val;
433
434         spin_lock_irqsave(&tp->indirect_lock, flags);
435         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
436         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
437         spin_unlock_irqrestore(&tp->indirect_lock, flags);
438         return val;
439 }
440
441 /* usec_wait specifies the wait time in usec when writing to certain registers
442  * where it is unsafe to read back the register without some delay.
443  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
444  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
445  */
446 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
447 {
448         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
449             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
450                 /* Non-posted methods */
451                 tp->write32(tp, off, val);
452         else {
453                 /* Posted method */
454                 tg3_write32(tp, off, val);
455                 if (usec_wait)
456                         udelay(usec_wait);
457                 tp->read32(tp, off);
458         }
459         /* Wait again after the read for the posted method to guarantee that
460          * the wait time is met.
461          */
462         if (usec_wait)
463                 udelay(usec_wait);
464 }
465
466 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
467 {
468         tp->write32_mbox(tp, off, val);
469         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
470             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
471                 tp->read32_mbox(tp, off);
472 }
473
474 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
475 {
476         void __iomem *mbox = tp->regs + off;
477         writel(val, mbox);
478         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
479                 writel(val, mbox);
480         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
481                 readl(mbox);
482 }
483
484 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
485 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
486 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
487 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
488 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
489
490 #define tw32(reg,val)           tp->write32(tp, reg, val)
491 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
492 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
493 #define tr32(reg)               tp->read32(tp, reg)
494
495 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
496 {
497         unsigned long flags;
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
501         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
502
503         /* Always leave this as zero. */
504         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
505         spin_unlock_irqrestore(&tp->indirect_lock, flags);
506 }
507
508 static void tg3_write_mem_fast(struct tg3 *tp, u32 off, u32 val)
509 {
510         /* If no workaround is needed, write to mem space directly */
511         if (tp->write32 != tg3_write_indirect_reg32)
512                 tw32(NIC_SRAM_WIN_BASE + off, val);
513         else
514                 tg3_write_mem(tp, off, val);
515 }
516
517 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
518 {
519         unsigned long flags;
520
521         spin_lock_irqsave(&tp->indirect_lock, flags);
522         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
523         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
524
525         /* Always leave this as zero. */
526         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
527         spin_unlock_irqrestore(&tp->indirect_lock, flags);
528 }
529
530 static void tg3_disable_ints(struct tg3 *tp)
531 {
532         tw32(TG3PCI_MISC_HOST_CTRL,
533              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
534         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
535 }
536
537 static inline void tg3_cond_int(struct tg3 *tp)
538 {
539         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
540             (tp->hw_status->status & SD_STATUS_UPDATED))
541                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
542 }
543
544 static void tg3_enable_ints(struct tg3 *tp)
545 {
546         tp->irq_sync = 0;
547         wmb();
548
549         tw32(TG3PCI_MISC_HOST_CTRL,
550              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
551         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
552                        (tp->last_tag << 24));
553         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
554                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
555                                (tp->last_tag << 24));
556         tg3_cond_int(tp);
557 }
558
559 static inline unsigned int tg3_has_work(struct tg3 *tp)
560 {
561         struct tg3_hw_status *sblk = tp->hw_status;
562         unsigned int work_exists = 0;
563
564         /* check for phy events */
565         if (!(tp->tg3_flags &
566               (TG3_FLAG_USE_LINKCHG_REG |
567                TG3_FLAG_POLL_SERDES))) {
568                 if (sblk->status & SD_STATUS_LINK_CHG)
569                         work_exists = 1;
570         }
571         /* check for RX/TX work to do */
572         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
573             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
574                 work_exists = 1;
575
576         return work_exists;
577 }
578
579 /* tg3_restart_ints
580  *  similar to tg3_enable_ints, but it accurately determines whether there
581  *  is new work pending and can return without flushing the PIO write
582  *  which reenables interrupts 
583  */
584 static void tg3_restart_ints(struct tg3 *tp)
585 {
586         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
587                      tp->last_tag << 24);
588         mmiowb();
589
590         /* When doing tagged status, this work check is unnecessary.
591          * The last_tag we write above tells the chip which piece of
592          * work we've completed.
593          */
594         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
595             tg3_has_work(tp))
596                 tw32(HOSTCC_MODE, tp->coalesce_mode |
597                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
598 }
599
600 static inline void tg3_netif_stop(struct tg3 *tp)
601 {
602         tp->dev->trans_start = jiffies; /* prevent tx timeout */
603         netif_poll_disable(tp->dev);
604         netif_tx_disable(tp->dev);
605 }
606
607 static inline void tg3_netif_start(struct tg3 *tp)
608 {
609         netif_wake_queue(tp->dev);
610         /* NOTE: unconditional netif_wake_queue is only appropriate
611          * so long as all callers are assured to have free tx slots
612          * (such as after tg3_init_hw)
613          */
614         netif_poll_enable(tp->dev);
615         tp->hw_status->status |= SD_STATUS_UPDATED;
616         tg3_enable_ints(tp);
617 }
618
619 static void tg3_switch_clocks(struct tg3 *tp)
620 {
621         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
622         u32 orig_clock_ctrl;
623
624         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
625                 return;
626
627         orig_clock_ctrl = clock_ctrl;
628         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
629                        CLOCK_CTRL_CLKRUN_OENABLE |
630                        0x1f);
631         tp->pci_clock_ctrl = clock_ctrl;
632
633         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
634                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
635                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
636                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
637                 }
638         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
639                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
640                             clock_ctrl |
641                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
642                             40);
643                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
644                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
645                             40);
646         }
647         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
648 }
649
650 #define PHY_BUSY_LOOPS  5000
651
652 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
653 {
654         u32 frame_val;
655         unsigned int loops;
656         int ret;
657
658         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
659                 tw32_f(MAC_MI_MODE,
660                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
661                 udelay(80);
662         }
663
664         *val = 0x0;
665
666         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
667                       MI_COM_PHY_ADDR_MASK);
668         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
669                       MI_COM_REG_ADDR_MASK);
670         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
671         
672         tw32_f(MAC_MI_COM, frame_val);
673
674         loops = PHY_BUSY_LOOPS;
675         while (loops != 0) {
676                 udelay(10);
677                 frame_val = tr32(MAC_MI_COM);
678
679                 if ((frame_val & MI_COM_BUSY) == 0) {
680                         udelay(5);
681                         frame_val = tr32(MAC_MI_COM);
682                         break;
683                 }
684                 loops -= 1;
685         }
686
687         ret = -EBUSY;
688         if (loops != 0) {
689                 *val = frame_val & MI_COM_DATA_MASK;
690                 ret = 0;
691         }
692
693         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
694                 tw32_f(MAC_MI_MODE, tp->mi_mode);
695                 udelay(80);
696         }
697
698         return ret;
699 }
700
701 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
702 {
703         u32 frame_val;
704         unsigned int loops;
705         int ret;
706
707         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
708                 tw32_f(MAC_MI_MODE,
709                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
710                 udelay(80);
711         }
712
713         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
714                       MI_COM_PHY_ADDR_MASK);
715         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
716                       MI_COM_REG_ADDR_MASK);
717         frame_val |= (val & MI_COM_DATA_MASK);
718         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
719         
720         tw32_f(MAC_MI_COM, frame_val);
721
722         loops = PHY_BUSY_LOOPS;
723         while (loops != 0) {
724                 udelay(10);
725                 frame_val = tr32(MAC_MI_COM);
726                 if ((frame_val & MI_COM_BUSY) == 0) {
727                         udelay(5);
728                         frame_val = tr32(MAC_MI_COM);
729                         break;
730                 }
731                 loops -= 1;
732         }
733
734         ret = -EBUSY;
735         if (loops != 0)
736                 ret = 0;
737
738         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
739                 tw32_f(MAC_MI_MODE, tp->mi_mode);
740                 udelay(80);
741         }
742
743         return ret;
744 }
745
746 static void tg3_phy_set_wirespeed(struct tg3 *tp)
747 {
748         u32 val;
749
750         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
751                 return;
752
753         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
754             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
755                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
756                              (val | (1 << 15) | (1 << 4)));
757 }
758
759 static int tg3_bmcr_reset(struct tg3 *tp)
760 {
761         u32 phy_control;
762         int limit, err;
763
764         /* OK, reset it, and poll the BMCR_RESET bit until it
765          * clears or we time out.
766          */
767         phy_control = BMCR_RESET;
768         err = tg3_writephy(tp, MII_BMCR, phy_control);
769         if (err != 0)
770                 return -EBUSY;
771
772         limit = 5000;
773         while (limit--) {
774                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
775                 if (err != 0)
776                         return -EBUSY;
777
778                 if ((phy_control & BMCR_RESET) == 0) {
779                         udelay(40);
780                         break;
781                 }
782                 udelay(10);
783         }
784         if (limit <= 0)
785                 return -EBUSY;
786
787         return 0;
788 }
789
790 static int tg3_wait_macro_done(struct tg3 *tp)
791 {
792         int limit = 100;
793
794         while (limit--) {
795                 u32 tmp32;
796
797                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
798                         if ((tmp32 & 0x1000) == 0)
799                                 break;
800                 }
801         }
802         if (limit <= 0)
803                 return -EBUSY;
804
805         return 0;
806 }
807
808 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
809 {
810         static const u32 test_pat[4][6] = {
811         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
812         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
813         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
814         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
815         };
816         int chan;
817
818         for (chan = 0; chan < 4; chan++) {
819                 int i;
820
821                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
822                              (chan * 0x2000) | 0x0200);
823                 tg3_writephy(tp, 0x16, 0x0002);
824
825                 for (i = 0; i < 6; i++)
826                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
827                                      test_pat[chan][i]);
828
829                 tg3_writephy(tp, 0x16, 0x0202);
830                 if (tg3_wait_macro_done(tp)) {
831                         *resetp = 1;
832                         return -EBUSY;
833                 }
834
835                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
836                              (chan * 0x2000) | 0x0200);
837                 tg3_writephy(tp, 0x16, 0x0082);
838                 if (tg3_wait_macro_done(tp)) {
839                         *resetp = 1;
840                         return -EBUSY;
841                 }
842
843                 tg3_writephy(tp, 0x16, 0x0802);
844                 if (tg3_wait_macro_done(tp)) {
845                         *resetp = 1;
846                         return -EBUSY;
847                 }
848
849                 for (i = 0; i < 6; i += 2) {
850                         u32 low, high;
851
852                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
853                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
854                             tg3_wait_macro_done(tp)) {
855                                 *resetp = 1;
856                                 return -EBUSY;
857                         }
858                         low &= 0x7fff;
859                         high &= 0x000f;
860                         if (low != test_pat[chan][i] ||
861                             high != test_pat[chan][i+1]) {
862                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
863                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
864                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
865
866                                 return -EBUSY;
867                         }
868                 }
869         }
870
871         return 0;
872 }
873
874 static int tg3_phy_reset_chanpat(struct tg3 *tp)
875 {
876         int chan;
877
878         for (chan = 0; chan < 4; chan++) {
879                 int i;
880
881                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
882                              (chan * 0x2000) | 0x0200);
883                 tg3_writephy(tp, 0x16, 0x0002);
884                 for (i = 0; i < 6; i++)
885                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
886                 tg3_writephy(tp, 0x16, 0x0202);
887                 if (tg3_wait_macro_done(tp))
888                         return -EBUSY;
889         }
890
891         return 0;
892 }
893
894 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
895 {
896         u32 reg32, phy9_orig;
897         int retries, do_phy_reset, err;
898
899         retries = 10;
900         do_phy_reset = 1;
901         do {
902                 if (do_phy_reset) {
903                         err = tg3_bmcr_reset(tp);
904                         if (err)
905                                 return err;
906                         do_phy_reset = 0;
907                 }
908
909                 /* Disable transmitter and interrupt.  */
910                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
911                         continue;
912
913                 reg32 |= 0x3000;
914                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
915
916                 /* Set full-duplex, 1000 mbps.  */
917                 tg3_writephy(tp, MII_BMCR,
918                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
919
920                 /* Set to master mode.  */
921                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
922                         continue;
923
924                 tg3_writephy(tp, MII_TG3_CTRL,
925                              (MII_TG3_CTRL_AS_MASTER |
926                               MII_TG3_CTRL_ENABLE_AS_MASTER));
927
928                 /* Enable SM_DSP_CLOCK and 6dB.  */
929                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
930
931                 /* Block the PHY control access.  */
932                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
933                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
934
935                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
936                 if (!err)
937                         break;
938         } while (--retries);
939
940         err = tg3_phy_reset_chanpat(tp);
941         if (err)
942                 return err;
943
944         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
945         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
946
947         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
948         tg3_writephy(tp, 0x16, 0x0000);
949
950         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
951             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
952                 /* Set Extended packet length bit for jumbo frames */
953                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
954         }
955         else {
956                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
957         }
958
959         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
960
961         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
962                 reg32 &= ~0x3000;
963                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
964         } else if (!err)
965                 err = -EBUSY;
966
967         return err;
968 }
969
970 /* This will reset the tigon3 PHY if there is no valid
971  * link unless the FORCE argument is non-zero.
972  */
973 static int tg3_phy_reset(struct tg3 *tp)
974 {
975         u32 phy_status;
976         int err;
977
978         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
979         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
980         if (err != 0)
981                 return -EBUSY;
982
983         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
984             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
985             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
986                 err = tg3_phy_reset_5703_4_5(tp);
987                 if (err)
988                         return err;
989                 goto out;
990         }
991
992         err = tg3_bmcr_reset(tp);
993         if (err)
994                 return err;
995
996 out:
997         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
998                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
999                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1000                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1001                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1002                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1003                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1004         }
1005         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1006                 tg3_writephy(tp, 0x1c, 0x8d68);
1007                 tg3_writephy(tp, 0x1c, 0x8d68);
1008         }
1009         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1010                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1011                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1012                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1013                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1014                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1015                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1016                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1017                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1018         }
1019         /* Set Extended packet length bit (bit 14) on all chips that */
1020         /* support jumbo frames */
1021         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1022                 /* Cannot do read-modify-write on 5401 */
1023                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1024         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1025                 u32 phy_reg;
1026
1027                 /* Set bit 14 with read-modify-write to preserve other bits */
1028                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1029                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1030                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1031         }
1032
1033         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1034          * jumbo frames transmission.
1035          */
1036         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1037                 u32 phy_reg;
1038
1039                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1040                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1041                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1042         }
1043
1044         tg3_phy_set_wirespeed(tp);
1045         return 0;
1046 }
1047
1048 static void tg3_frob_aux_power(struct tg3 *tp)
1049 {
1050         struct tg3 *tp_peer = tp;
1051
1052         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1053                 return;
1054
1055         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1056             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1057                 struct net_device *dev_peer;
1058
1059                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1060                 /* remove_one() may have been run on the peer. */
1061                 if (!dev_peer)
1062                         tp_peer = tp;
1063                 else
1064                         tp_peer = netdev_priv(dev_peer);
1065         }
1066
1067         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1068             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1069             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1070             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1071                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1072                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1073                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1074                                     (GRC_LCLCTRL_GPIO_OE0 |
1075                                      GRC_LCLCTRL_GPIO_OE1 |
1076                                      GRC_LCLCTRL_GPIO_OE2 |
1077                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1078                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1079                                     100);
1080                 } else {
1081                         u32 no_gpio2;
1082                         u32 grc_local_ctrl = 0;
1083
1084                         if (tp_peer != tp &&
1085                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1086                                 return;
1087
1088                         /* Workaround to prevent overdrawing Amps. */
1089                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1090                             ASIC_REV_5714) {
1091                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1092                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1093                                             grc_local_ctrl, 100);
1094                         }
1095
1096                         /* On 5753 and variants, GPIO2 cannot be used. */
1097                         no_gpio2 = tp->nic_sram_data_cfg &
1098                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1099
1100                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1101                                          GRC_LCLCTRL_GPIO_OE1 |
1102                                          GRC_LCLCTRL_GPIO_OE2 |
1103                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1104                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1105                         if (no_gpio2) {
1106                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1107                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1108                         }
1109                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1110                                                     grc_local_ctrl, 100);
1111
1112                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1113
1114                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1115                                                     grc_local_ctrl, 100);
1116
1117                         if (!no_gpio2) {
1118                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1119                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1120                                             grc_local_ctrl, 100);
1121                         }
1122                 }
1123         } else {
1124                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1125                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1126                         if (tp_peer != tp &&
1127                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1128                                 return;
1129
1130                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1131                                     (GRC_LCLCTRL_GPIO_OE1 |
1132                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1133
1134                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1135                                     GRC_LCLCTRL_GPIO_OE1, 100);
1136
1137                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1138                                     (GRC_LCLCTRL_GPIO_OE1 |
1139                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1140                 }
1141         }
1142 }
1143
1144 static int tg3_setup_phy(struct tg3 *, int);
1145
1146 #define RESET_KIND_SHUTDOWN     0
1147 #define RESET_KIND_INIT         1
1148 #define RESET_KIND_SUSPEND      2
1149
1150 static void tg3_write_sig_post_reset(struct tg3 *, int);
1151 static int tg3_halt_cpu(struct tg3 *, u32);
1152 static int tg3_nvram_lock(struct tg3 *);
1153 static void tg3_nvram_unlock(struct tg3 *);
1154
1155 static void tg3_power_down_phy(struct tg3 *tp)
1156 {
1157         /* The PHY should not be powered down on some chips because
1158          * of bugs.
1159          */
1160         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1161             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1162             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1163              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1164                 return;
1165         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1166 }
1167
1168 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1169 {
1170         u32 misc_host_ctrl;
1171         u16 power_control, power_caps;
1172         int pm = tp->pm_cap;
1173
1174         /* Make sure register accesses (indirect or otherwise)
1175          * will function correctly.
1176          */
1177         pci_write_config_dword(tp->pdev,
1178                                TG3PCI_MISC_HOST_CTRL,
1179                                tp->misc_host_ctrl);
1180
1181         pci_read_config_word(tp->pdev,
1182                              pm + PCI_PM_CTRL,
1183                              &power_control);
1184         power_control |= PCI_PM_CTRL_PME_STATUS;
1185         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1186         switch (state) {
1187         case PCI_D0:
1188                 power_control |= 0;
1189                 pci_write_config_word(tp->pdev,
1190                                       pm + PCI_PM_CTRL,
1191                                       power_control);
1192                 udelay(100);    /* Delay after power state change */
1193
1194                 /* Switch out of Vaux if it is not a LOM */
1195                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1196                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1197
1198                 return 0;
1199
1200         case PCI_D1:
1201                 power_control |= 1;
1202                 break;
1203
1204         case PCI_D2:
1205                 power_control |= 2;
1206                 break;
1207
1208         case PCI_D3hot:
1209                 power_control |= 3;
1210                 break;
1211
1212         default:
1213                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1214                        "requested.\n",
1215                        tp->dev->name, state);
1216                 return -EINVAL;
1217         };
1218
1219         power_control |= PCI_PM_CTRL_PME_ENABLE;
1220
1221         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1222         tw32(TG3PCI_MISC_HOST_CTRL,
1223              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1224
1225         if (tp->link_config.phy_is_low_power == 0) {
1226                 tp->link_config.phy_is_low_power = 1;
1227                 tp->link_config.orig_speed = tp->link_config.speed;
1228                 tp->link_config.orig_duplex = tp->link_config.duplex;
1229                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1230         }
1231
1232         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1233                 tp->link_config.speed = SPEED_10;
1234                 tp->link_config.duplex = DUPLEX_HALF;
1235                 tp->link_config.autoneg = AUTONEG_ENABLE;
1236                 tg3_setup_phy(tp, 0);
1237         }
1238
1239         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1240                 int i;
1241                 u32 val;
1242
1243                 for (i = 0; i < 200; i++) {
1244                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1245                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1246                                 break;
1247                         msleep(1);
1248                 }
1249         }
1250         tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1251                                              WOL_DRV_STATE_SHUTDOWN |
1252                                              WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1253
1254         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1255
1256         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1257                 u32 mac_mode;
1258
1259                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1260                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1261                         udelay(40);
1262
1263                         mac_mode = MAC_MODE_PORT_MODE_MII;
1264
1265                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1266                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1267                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1268                 } else {
1269                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1270                 }
1271
1272                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1273                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1274
1275                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1276                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1277                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1278
1279                 tw32_f(MAC_MODE, mac_mode);
1280                 udelay(100);
1281
1282                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1283                 udelay(10);
1284         }
1285
1286         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1287             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1288              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1289                 u32 base_val;
1290
1291                 base_val = tp->pci_clock_ctrl;
1292                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1293                              CLOCK_CTRL_TXCLK_DISABLE);
1294
1295                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1296                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1297         } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1298                 /* do nothing */
1299         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1300                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1301                 u32 newbits1, newbits2;
1302
1303                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1304                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1305                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1306                                     CLOCK_CTRL_TXCLK_DISABLE |
1307                                     CLOCK_CTRL_ALTCLK);
1308                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1309                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1310                         newbits1 = CLOCK_CTRL_625_CORE;
1311                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1312                 } else {
1313                         newbits1 = CLOCK_CTRL_ALTCLK;
1314                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1315                 }
1316
1317                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1318                             40);
1319
1320                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1321                             40);
1322
1323                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1324                         u32 newbits3;
1325
1326                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1327                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1328                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1329                                             CLOCK_CTRL_TXCLK_DISABLE |
1330                                             CLOCK_CTRL_44MHZ_CORE);
1331                         } else {
1332                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1333                         }
1334
1335                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1336                                     tp->pci_clock_ctrl | newbits3, 40);
1337                 }
1338         }
1339
1340         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1341             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1342                 /* Turn off the PHY */
1343                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1344                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1345                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1346                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1347                         tg3_power_down_phy(tp);
1348                 }
1349         }
1350
1351         tg3_frob_aux_power(tp);
1352
1353         /* Workaround for unstable PLL clock */
1354         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1355             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1356                 u32 val = tr32(0x7d00);
1357
1358                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1359                 tw32(0x7d00, val);
1360                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1361                         int err;
1362
1363                         err = tg3_nvram_lock(tp);
1364                         tg3_halt_cpu(tp, RX_CPU_BASE);
1365                         if (!err)
1366                                 tg3_nvram_unlock(tp);
1367                 }
1368         }
1369
1370         /* Finally, set the new power state. */
1371         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1372         udelay(100);    /* Delay after power state change */
1373
1374         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1375
1376         return 0;
1377 }
1378
1379 static void tg3_link_report(struct tg3 *tp)
1380 {
1381         if (!netif_carrier_ok(tp->dev)) {
1382                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1383         } else {
1384                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1385                        tp->dev->name,
1386                        (tp->link_config.active_speed == SPEED_1000 ?
1387                         1000 :
1388                         (tp->link_config.active_speed == SPEED_100 ?
1389                          100 : 10)),
1390                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1391                         "full" : "half"));
1392
1393                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1394                        "%s for RX.\n",
1395                        tp->dev->name,
1396                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1397                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1398         }
1399 }
1400
1401 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1402 {
1403         u32 new_tg3_flags = 0;
1404         u32 old_rx_mode = tp->rx_mode;
1405         u32 old_tx_mode = tp->tx_mode;
1406
1407         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1408
1409                 /* Convert 1000BaseX flow control bits to 1000BaseT
1410                  * bits before resolving flow control.
1411                  */
1412                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1413                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1414                                        ADVERTISE_PAUSE_ASYM);
1415                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1416
1417                         if (local_adv & ADVERTISE_1000XPAUSE)
1418                                 local_adv |= ADVERTISE_PAUSE_CAP;
1419                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1420                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1421                         if (remote_adv & LPA_1000XPAUSE)
1422                                 remote_adv |= LPA_PAUSE_CAP;
1423                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1424                                 remote_adv |= LPA_PAUSE_ASYM;
1425                 }
1426
1427                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1428                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1429                                 if (remote_adv & LPA_PAUSE_CAP)
1430                                         new_tg3_flags |=
1431                                                 (TG3_FLAG_RX_PAUSE |
1432                                                 TG3_FLAG_TX_PAUSE);
1433                                 else if (remote_adv & LPA_PAUSE_ASYM)
1434                                         new_tg3_flags |=
1435                                                 (TG3_FLAG_RX_PAUSE);
1436                         } else {
1437                                 if (remote_adv & LPA_PAUSE_CAP)
1438                                         new_tg3_flags |=
1439                                                 (TG3_FLAG_RX_PAUSE |
1440                                                 TG3_FLAG_TX_PAUSE);
1441                         }
1442                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1443                         if ((remote_adv & LPA_PAUSE_CAP) &&
1444                         (remote_adv & LPA_PAUSE_ASYM))
1445                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1446                 }
1447
1448                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1449                 tp->tg3_flags |= new_tg3_flags;
1450         } else {
1451                 new_tg3_flags = tp->tg3_flags;
1452         }
1453
1454         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1455                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1456         else
1457                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1458
1459         if (old_rx_mode != tp->rx_mode) {
1460                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1461         }
1462         
1463         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1464                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1465         else
1466                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1467
1468         if (old_tx_mode != tp->tx_mode) {
1469                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1470         }
1471 }
1472
1473 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1474 {
1475         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1476         case MII_TG3_AUX_STAT_10HALF:
1477                 *speed = SPEED_10;
1478                 *duplex = DUPLEX_HALF;
1479                 break;
1480
1481         case MII_TG3_AUX_STAT_10FULL:
1482                 *speed = SPEED_10;
1483                 *duplex = DUPLEX_FULL;
1484                 break;
1485
1486         case MII_TG3_AUX_STAT_100HALF:
1487                 *speed = SPEED_100;
1488                 *duplex = DUPLEX_HALF;
1489                 break;
1490
1491         case MII_TG3_AUX_STAT_100FULL:
1492                 *speed = SPEED_100;
1493                 *duplex = DUPLEX_FULL;
1494                 break;
1495
1496         case MII_TG3_AUX_STAT_1000HALF:
1497                 *speed = SPEED_1000;
1498                 *duplex = DUPLEX_HALF;
1499                 break;
1500
1501         case MII_TG3_AUX_STAT_1000FULL:
1502                 *speed = SPEED_1000;
1503                 *duplex = DUPLEX_FULL;
1504                 break;
1505
1506         default:
1507                 *speed = SPEED_INVALID;
1508                 *duplex = DUPLEX_INVALID;
1509                 break;
1510         };
1511 }
1512
1513 static void tg3_phy_copper_begin(struct tg3 *tp)
1514 {
1515         u32 new_adv;
1516         int i;
1517
1518         if (tp->link_config.phy_is_low_power) {
1519                 /* Entering low power mode.  Disable gigabit and
1520                  * 100baseT advertisements.
1521                  */
1522                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1523
1524                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1525                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1526                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1527                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1528
1529                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1530         } else if (tp->link_config.speed == SPEED_INVALID) {
1531                 tp->link_config.advertising =
1532                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1533                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1534                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1535                          ADVERTISED_Autoneg | ADVERTISED_MII);
1536
1537                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1538                         tp->link_config.advertising &=
1539                                 ~(ADVERTISED_1000baseT_Half |
1540                                   ADVERTISED_1000baseT_Full);
1541
1542                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1543                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1544                         new_adv |= ADVERTISE_10HALF;
1545                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1546                         new_adv |= ADVERTISE_10FULL;
1547                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1548                         new_adv |= ADVERTISE_100HALF;
1549                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1550                         new_adv |= ADVERTISE_100FULL;
1551                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1552
1553                 if (tp->link_config.advertising &
1554                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1555                         new_adv = 0;
1556                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1557                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1558                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1559                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1560                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1561                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1562                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1563                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1564                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1565                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1566                 } else {
1567                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1568                 }
1569         } else {
1570                 /* Asking for a specific link mode. */
1571                 if (tp->link_config.speed == SPEED_1000) {
1572                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1573                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1574
1575                         if (tp->link_config.duplex == DUPLEX_FULL)
1576                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1577                         else
1578                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1579                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1580                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1581                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1582                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1583                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1584                 } else {
1585                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1586
1587                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1588                         if (tp->link_config.speed == SPEED_100) {
1589                                 if (tp->link_config.duplex == DUPLEX_FULL)
1590                                         new_adv |= ADVERTISE_100FULL;
1591                                 else
1592                                         new_adv |= ADVERTISE_100HALF;
1593                         } else {
1594                                 if (tp->link_config.duplex == DUPLEX_FULL)
1595                                         new_adv |= ADVERTISE_10FULL;
1596                                 else
1597                                         new_adv |= ADVERTISE_10HALF;
1598                         }
1599                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1600                 }
1601         }
1602
1603         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1604             tp->link_config.speed != SPEED_INVALID) {
1605                 u32 bmcr, orig_bmcr;
1606
1607                 tp->link_config.active_speed = tp->link_config.speed;
1608                 tp->link_config.active_duplex = tp->link_config.duplex;
1609
1610                 bmcr = 0;
1611                 switch (tp->link_config.speed) {
1612                 default:
1613                 case SPEED_10:
1614                         break;
1615
1616                 case SPEED_100:
1617                         bmcr |= BMCR_SPEED100;
1618                         break;
1619
1620                 case SPEED_1000:
1621                         bmcr |= TG3_BMCR_SPEED1000;
1622                         break;
1623                 };
1624
1625                 if (tp->link_config.duplex == DUPLEX_FULL)
1626                         bmcr |= BMCR_FULLDPLX;
1627
1628                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1629                     (bmcr != orig_bmcr)) {
1630                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1631                         for (i = 0; i < 1500; i++) {
1632                                 u32 tmp;
1633
1634                                 udelay(10);
1635                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1636                                     tg3_readphy(tp, MII_BMSR, &tmp))
1637                                         continue;
1638                                 if (!(tmp & BMSR_LSTATUS)) {
1639                                         udelay(40);
1640                                         break;
1641                                 }
1642                         }
1643                         tg3_writephy(tp, MII_BMCR, bmcr);
1644                         udelay(40);
1645                 }
1646         } else {
1647                 tg3_writephy(tp, MII_BMCR,
1648                              BMCR_ANENABLE | BMCR_ANRESTART);
1649         }
1650 }
1651
1652 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1653 {
1654         int err;
1655
1656         /* Turn off tap power management. */
1657         /* Set Extended packet length bit */
1658         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1659
1660         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1661         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1662
1663         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1664         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1665
1666         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1667         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1668
1669         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1670         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1671
1672         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1673         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1674
1675         udelay(40);
1676
1677         return err;
1678 }
1679
1680 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1681 {
1682         u32 adv_reg, all_mask;
1683
1684         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1685                 return 0;
1686
1687         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1688                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1689         if ((adv_reg & all_mask) != all_mask)
1690                 return 0;
1691         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1692                 u32 tg3_ctrl;
1693
1694                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1695                         return 0;
1696
1697                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1698                             MII_TG3_CTRL_ADV_1000_FULL);
1699                 if ((tg3_ctrl & all_mask) != all_mask)
1700                         return 0;
1701         }
1702         return 1;
1703 }
1704
1705 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1706 {
1707         int current_link_up;
1708         u32 bmsr, dummy;
1709         u16 current_speed;
1710         u8 current_duplex;
1711         int i, err;
1712
1713         tw32(MAC_EVENT, 0);
1714
1715         tw32_f(MAC_STATUS,
1716              (MAC_STATUS_SYNC_CHANGED |
1717               MAC_STATUS_CFG_CHANGED |
1718               MAC_STATUS_MI_COMPLETION |
1719               MAC_STATUS_LNKSTATE_CHANGED));
1720         udelay(40);
1721
1722         tp->mi_mode = MAC_MI_MODE_BASE;
1723         tw32_f(MAC_MI_MODE, tp->mi_mode);
1724         udelay(80);
1725
1726         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1727
1728         /* Some third-party PHYs need to be reset on link going
1729          * down.
1730          */
1731         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1732              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1733              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1734             netif_carrier_ok(tp->dev)) {
1735                 tg3_readphy(tp, MII_BMSR, &bmsr);
1736                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1737                     !(bmsr & BMSR_LSTATUS))
1738                         force_reset = 1;
1739         }
1740         if (force_reset)
1741                 tg3_phy_reset(tp);
1742
1743         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1744                 tg3_readphy(tp, MII_BMSR, &bmsr);
1745                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1746                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1747                         bmsr = 0;
1748
1749                 if (!(bmsr & BMSR_LSTATUS)) {
1750                         err = tg3_init_5401phy_dsp(tp);
1751                         if (err)
1752                                 return err;
1753
1754                         tg3_readphy(tp, MII_BMSR, &bmsr);
1755                         for (i = 0; i < 1000; i++) {
1756                                 udelay(10);
1757                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1758                                     (bmsr & BMSR_LSTATUS)) {
1759                                         udelay(40);
1760                                         break;
1761                                 }
1762                         }
1763
1764                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1765                             !(bmsr & BMSR_LSTATUS) &&
1766                             tp->link_config.active_speed == SPEED_1000) {
1767                                 err = tg3_phy_reset(tp);
1768                                 if (!err)
1769                                         err = tg3_init_5401phy_dsp(tp);
1770                                 if (err)
1771                                         return err;
1772                         }
1773                 }
1774         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1775                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1776                 /* 5701 {A0,B0} CRC bug workaround */
1777                 tg3_writephy(tp, 0x15, 0x0a75);
1778                 tg3_writephy(tp, 0x1c, 0x8c68);
1779                 tg3_writephy(tp, 0x1c, 0x8d68);
1780                 tg3_writephy(tp, 0x1c, 0x8c68);
1781         }
1782
1783         /* Clear pending interrupts... */
1784         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1785         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1786
1787         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1788                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1789         else
1790                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1791
1792         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1793             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1794                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1795                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1796                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1797                 else
1798                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1799         }
1800
1801         current_link_up = 0;
1802         current_speed = SPEED_INVALID;
1803         current_duplex = DUPLEX_INVALID;
1804
1805         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1806                 u32 val;
1807
1808                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1809                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1810                 if (!(val & (1 << 10))) {
1811                         val |= (1 << 10);
1812                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1813                         goto relink;
1814                 }
1815         }
1816
1817         bmsr = 0;
1818         for (i = 0; i < 100; i++) {
1819                 tg3_readphy(tp, MII_BMSR, &bmsr);
1820                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1821                     (bmsr & BMSR_LSTATUS))
1822                         break;
1823                 udelay(40);
1824         }
1825
1826         if (bmsr & BMSR_LSTATUS) {
1827                 u32 aux_stat, bmcr;
1828
1829                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1830                 for (i = 0; i < 2000; i++) {
1831                         udelay(10);
1832                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1833                             aux_stat)
1834                                 break;
1835                 }
1836
1837                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1838                                              &current_speed,
1839                                              &current_duplex);
1840
1841                 bmcr = 0;
1842                 for (i = 0; i < 200; i++) {
1843                         tg3_readphy(tp, MII_BMCR, &bmcr);
1844                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1845                                 continue;
1846                         if (bmcr && bmcr != 0x7fff)
1847                                 break;
1848                         udelay(10);
1849                 }
1850
1851                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1852                         if (bmcr & BMCR_ANENABLE) {
1853                                 current_link_up = 1;
1854
1855                                 /* Force autoneg restart if we are exiting
1856                                  * low power mode.
1857                                  */
1858                                 if (!tg3_copper_is_advertising_all(tp))
1859                                         current_link_up = 0;
1860                         } else {
1861                                 current_link_up = 0;
1862                         }
1863                 } else {
1864                         if (!(bmcr & BMCR_ANENABLE) &&
1865                             tp->link_config.speed == current_speed &&
1866                             tp->link_config.duplex == current_duplex) {
1867                                 current_link_up = 1;
1868                         } else {
1869                                 current_link_up = 0;
1870                         }
1871                 }
1872
1873                 tp->link_config.active_speed = current_speed;
1874                 tp->link_config.active_duplex = current_duplex;
1875         }
1876
1877         if (current_link_up == 1 &&
1878             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1879             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1880                 u32 local_adv, remote_adv;
1881
1882                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1883                         local_adv = 0;
1884                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1885
1886                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1887                         remote_adv = 0;
1888
1889                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1890
1891                 /* If we are not advertising full pause capability,
1892                  * something is wrong.  Bring the link down and reconfigure.
1893                  */
1894                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1895                         current_link_up = 0;
1896                 } else {
1897                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1898                 }
1899         }
1900 relink:
1901         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1902                 u32 tmp;
1903
1904                 tg3_phy_copper_begin(tp);
1905
1906                 tg3_readphy(tp, MII_BMSR, &tmp);
1907                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1908                     (tmp & BMSR_LSTATUS))
1909                         current_link_up = 1;
1910         }
1911
1912         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1913         if (current_link_up == 1) {
1914                 if (tp->link_config.active_speed == SPEED_100 ||
1915                     tp->link_config.active_speed == SPEED_10)
1916                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1917                 else
1918                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1919         } else
1920                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1921
1922         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1923         if (tp->link_config.active_duplex == DUPLEX_HALF)
1924                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1925
1926         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1927         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1928                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1929                     (current_link_up == 1 &&
1930                      tp->link_config.active_speed == SPEED_10))
1931                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1932         } else {
1933                 if (current_link_up == 1)
1934                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1935         }
1936
1937         /* ??? Without this setting Netgear GA302T PHY does not
1938          * ??? send/receive packets...
1939          */
1940         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1941             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1942                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1943                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1944                 udelay(80);
1945         }
1946
1947         tw32_f(MAC_MODE, tp->mac_mode);
1948         udelay(40);
1949
1950         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1951                 /* Polled via timer. */
1952                 tw32_f(MAC_EVENT, 0);
1953         } else {
1954                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1955         }
1956         udelay(40);
1957
1958         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1959             current_link_up == 1 &&
1960             tp->link_config.active_speed == SPEED_1000 &&
1961             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1962              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1963                 udelay(120);
1964                 tw32_f(MAC_STATUS,
1965                      (MAC_STATUS_SYNC_CHANGED |
1966                       MAC_STATUS_CFG_CHANGED));
1967                 udelay(40);
1968                 tg3_write_mem(tp,
1969                               NIC_SRAM_FIRMWARE_MBOX,
1970                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1971         }
1972
1973         if (current_link_up != netif_carrier_ok(tp->dev)) {
1974                 if (current_link_up)
1975                         netif_carrier_on(tp->dev);
1976                 else
1977                         netif_carrier_off(tp->dev);
1978                 tg3_link_report(tp);
1979         }
1980
1981         return 0;
1982 }
1983
1984 struct tg3_fiber_aneginfo {
1985         int state;
1986 #define ANEG_STATE_UNKNOWN              0
1987 #define ANEG_STATE_AN_ENABLE            1
1988 #define ANEG_STATE_RESTART_INIT         2
1989 #define ANEG_STATE_RESTART              3
1990 #define ANEG_STATE_DISABLE_LINK_OK      4
1991 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1992 #define ANEG_STATE_ABILITY_DETECT       6
1993 #define ANEG_STATE_ACK_DETECT_INIT      7
1994 #define ANEG_STATE_ACK_DETECT           8
1995 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1996 #define ANEG_STATE_COMPLETE_ACK         10
1997 #define ANEG_STATE_IDLE_DETECT_INIT     11
1998 #define ANEG_STATE_IDLE_DETECT          12
1999 #define ANEG_STATE_LINK_OK              13
2000 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2001 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2002
2003         u32 flags;
2004 #define MR_AN_ENABLE            0x00000001
2005 #define MR_RESTART_AN           0x00000002
2006 #define MR_AN_COMPLETE          0x00000004
2007 #define MR_PAGE_RX              0x00000008
2008 #define MR_NP_LOADED            0x00000010
2009 #define MR_TOGGLE_TX            0x00000020
2010 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2011 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2012 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2013 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2014 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2015 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2016 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2017 #define MR_TOGGLE_RX            0x00002000
2018 #define MR_NP_RX                0x00004000
2019
2020 #define MR_LINK_OK              0x80000000
2021
2022         unsigned long link_time, cur_time;
2023
2024         u32 ability_match_cfg;
2025         int ability_match_count;
2026
2027         char ability_match, idle_match, ack_match;
2028
2029         u32 txconfig, rxconfig;
2030 #define ANEG_CFG_NP             0x00000080
2031 #define ANEG_CFG_ACK            0x00000040
2032 #define ANEG_CFG_RF2            0x00000020
2033 #define ANEG_CFG_RF1            0x00000010
2034 #define ANEG_CFG_PS2            0x00000001
2035 #define ANEG_CFG_PS1            0x00008000
2036 #define ANEG_CFG_HD             0x00004000
2037 #define ANEG_CFG_FD             0x00002000
2038 #define ANEG_CFG_INVAL          0x00001f06
2039
2040 };
2041 #define ANEG_OK         0
2042 #define ANEG_DONE       1
2043 #define ANEG_TIMER_ENAB 2
2044 #define ANEG_FAILED     -1
2045
2046 #define ANEG_STATE_SETTLE_TIME  10000
2047
2048 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2049                                    struct tg3_fiber_aneginfo *ap)
2050 {
2051         unsigned long delta;
2052         u32 rx_cfg_reg;
2053         int ret;
2054
2055         if (ap->state == ANEG_STATE_UNKNOWN) {
2056                 ap->rxconfig = 0;
2057                 ap->link_time = 0;
2058                 ap->cur_time = 0;
2059                 ap->ability_match_cfg = 0;
2060                 ap->ability_match_count = 0;
2061                 ap->ability_match = 0;
2062                 ap->idle_match = 0;
2063                 ap->ack_match = 0;
2064         }
2065         ap->cur_time++;
2066
2067         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2068                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2069
2070                 if (rx_cfg_reg != ap->ability_match_cfg) {
2071                         ap->ability_match_cfg = rx_cfg_reg;
2072                         ap->ability_match = 0;
2073                         ap->ability_match_count = 0;
2074                 } else {
2075                         if (++ap->ability_match_count > 1) {
2076                                 ap->ability_match = 1;
2077                                 ap->ability_match_cfg = rx_cfg_reg;
2078                         }
2079                 }
2080                 if (rx_cfg_reg & ANEG_CFG_ACK)
2081                         ap->ack_match = 1;
2082                 else
2083                         ap->ack_match = 0;
2084
2085                 ap->idle_match = 0;
2086         } else {
2087                 ap->idle_match = 1;
2088                 ap->ability_match_cfg = 0;
2089                 ap->ability_match_count = 0;
2090                 ap->ability_match = 0;
2091                 ap->ack_match = 0;
2092
2093                 rx_cfg_reg = 0;
2094         }
2095
2096         ap->rxconfig = rx_cfg_reg;
2097         ret = ANEG_OK;
2098
2099         switch(ap->state) {
2100         case ANEG_STATE_UNKNOWN:
2101                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2102                         ap->state = ANEG_STATE_AN_ENABLE;
2103
2104                 /* fallthru */
2105         case ANEG_STATE_AN_ENABLE:
2106                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2107                 if (ap->flags & MR_AN_ENABLE) {
2108                         ap->link_time = 0;
2109                         ap->cur_time = 0;
2110                         ap->ability_match_cfg = 0;
2111                         ap->ability_match_count = 0;
2112                         ap->ability_match = 0;
2113                         ap->idle_match = 0;
2114                         ap->ack_match = 0;
2115
2116                         ap->state = ANEG_STATE_RESTART_INIT;
2117                 } else {
2118                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2119                 }
2120                 break;
2121
2122         case ANEG_STATE_RESTART_INIT:
2123                 ap->link_time = ap->cur_time;
2124                 ap->flags &= ~(MR_NP_LOADED);
2125                 ap->txconfig = 0;
2126                 tw32(MAC_TX_AUTO_NEG, 0);
2127                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2128                 tw32_f(MAC_MODE, tp->mac_mode);
2129                 udelay(40);
2130
2131                 ret = ANEG_TIMER_ENAB;
2132                 ap->state = ANEG_STATE_RESTART;
2133
2134                 /* fallthru */
2135         case ANEG_STATE_RESTART:
2136                 delta = ap->cur_time - ap->link_time;
2137                 if (delta > ANEG_STATE_SETTLE_TIME) {
2138                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2139                 } else {
2140                         ret = ANEG_TIMER_ENAB;
2141                 }
2142                 break;
2143
2144         case ANEG_STATE_DISABLE_LINK_OK:
2145                 ret = ANEG_DONE;
2146                 break;
2147
2148         case ANEG_STATE_ABILITY_DETECT_INIT:
2149                 ap->flags &= ~(MR_TOGGLE_TX);
2150                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2151                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2152                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2153                 tw32_f(MAC_MODE, tp->mac_mode);
2154                 udelay(40);
2155
2156                 ap->state = ANEG_STATE_ABILITY_DETECT;
2157                 break;
2158
2159         case ANEG_STATE_ABILITY_DETECT:
2160                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2161                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2162                 }
2163                 break;
2164
2165         case ANEG_STATE_ACK_DETECT_INIT:
2166                 ap->txconfig |= ANEG_CFG_ACK;
2167                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2168                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2169                 tw32_f(MAC_MODE, tp->mac_mode);
2170                 udelay(40);
2171
2172                 ap->state = ANEG_STATE_ACK_DETECT;
2173
2174                 /* fallthru */
2175         case ANEG_STATE_ACK_DETECT:
2176                 if (ap->ack_match != 0) {
2177                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2178                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2179                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2180                         } else {
2181                                 ap->state = ANEG_STATE_AN_ENABLE;
2182                         }
2183                 } else if (ap->ability_match != 0 &&
2184                            ap->rxconfig == 0) {
2185                         ap->state = ANEG_STATE_AN_ENABLE;
2186                 }
2187                 break;
2188
2189         case ANEG_STATE_COMPLETE_ACK_INIT:
2190                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2191                         ret = ANEG_FAILED;
2192                         break;
2193                 }
2194                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2195                                MR_LP_ADV_HALF_DUPLEX |
2196                                MR_LP_ADV_SYM_PAUSE |
2197                                MR_LP_ADV_ASYM_PAUSE |
2198                                MR_LP_ADV_REMOTE_FAULT1 |
2199                                MR_LP_ADV_REMOTE_FAULT2 |
2200                                MR_LP_ADV_NEXT_PAGE |
2201                                MR_TOGGLE_RX |
2202                                MR_NP_RX);
2203                 if (ap->rxconfig & ANEG_CFG_FD)
2204                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2205                 if (ap->rxconfig & ANEG_CFG_HD)
2206                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2207                 if (ap->rxconfig & ANEG_CFG_PS1)
2208                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2209                 if (ap->rxconfig & ANEG_CFG_PS2)
2210                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2211                 if (ap->rxconfig & ANEG_CFG_RF1)
2212                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2213                 if (ap->rxconfig & ANEG_CFG_RF2)
2214                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2215                 if (ap->rxconfig & ANEG_CFG_NP)
2216                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2217
2218                 ap->link_time = ap->cur_time;
2219
2220                 ap->flags ^= (MR_TOGGLE_TX);
2221                 if (ap->rxconfig & 0x0008)
2222                         ap->flags |= MR_TOGGLE_RX;
2223                 if (ap->rxconfig & ANEG_CFG_NP)
2224                         ap->flags |= MR_NP_RX;
2225                 ap->flags |= MR_PAGE_RX;
2226
2227                 ap->state = ANEG_STATE_COMPLETE_ACK;
2228                 ret = ANEG_TIMER_ENAB;
2229                 break;
2230
2231         case ANEG_STATE_COMPLETE_ACK:
2232                 if (ap->ability_match != 0 &&
2233                     ap->rxconfig == 0) {
2234                         ap->state = ANEG_STATE_AN_ENABLE;
2235                         break;
2236                 }
2237                 delta = ap->cur_time - ap->link_time;
2238                 if (delta > ANEG_STATE_SETTLE_TIME) {
2239                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2240                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2241                         } else {
2242                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2243                                     !(ap->flags & MR_NP_RX)) {
2244                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2245                                 } else {
2246                                         ret = ANEG_FAILED;
2247                                 }
2248                         }
2249                 }
2250                 break;
2251
2252         case ANEG_STATE_IDLE_DETECT_INIT:
2253                 ap->link_time = ap->cur_time;
2254                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2255                 tw32_f(MAC_MODE, tp->mac_mode);
2256                 udelay(40);
2257
2258                 ap->state = ANEG_STATE_IDLE_DETECT;
2259                 ret = ANEG_TIMER_ENAB;
2260                 break;
2261
2262         case ANEG_STATE_IDLE_DETECT:
2263                 if (ap->ability_match != 0 &&
2264                     ap->rxconfig == 0) {
2265                         ap->state = ANEG_STATE_AN_ENABLE;
2266                         break;
2267                 }
2268                 delta = ap->cur_time - ap->link_time;
2269                 if (delta > ANEG_STATE_SETTLE_TIME) {
2270                         /* XXX another gem from the Broadcom driver :( */
2271                         ap->state = ANEG_STATE_LINK_OK;
2272                 }
2273                 break;
2274
2275         case ANEG_STATE_LINK_OK:
2276                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2277                 ret = ANEG_DONE;
2278                 break;
2279
2280         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2281                 /* ??? unimplemented */
2282                 break;
2283
2284         case ANEG_STATE_NEXT_PAGE_WAIT:
2285                 /* ??? unimplemented */
2286                 break;
2287
2288         default:
2289                 ret = ANEG_FAILED;
2290                 break;
2291         };
2292
2293         return ret;
2294 }
2295
2296 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2297 {
2298         int res = 0;
2299         struct tg3_fiber_aneginfo aninfo;
2300         int status = ANEG_FAILED;
2301         unsigned int tick;
2302         u32 tmp;
2303
2304         tw32_f(MAC_TX_AUTO_NEG, 0);
2305
2306         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2307         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2308         udelay(40);
2309
2310         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2311         udelay(40);
2312
2313         memset(&aninfo, 0, sizeof(aninfo));
2314         aninfo.flags |= MR_AN_ENABLE;
2315         aninfo.state = ANEG_STATE_UNKNOWN;
2316         aninfo.cur_time = 0;
2317         tick = 0;
2318         while (++tick < 195000) {
2319                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2320                 if (status == ANEG_DONE || status == ANEG_FAILED)
2321                         break;
2322
2323                 udelay(1);
2324         }
2325
2326         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2327         tw32_f(MAC_MODE, tp->mac_mode);
2328         udelay(40);
2329
2330         *flags = aninfo.flags;
2331
2332         if (status == ANEG_DONE &&
2333             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2334                              MR_LP_ADV_FULL_DUPLEX)))
2335                 res = 1;
2336
2337         return res;
2338 }
2339
2340 static void tg3_init_bcm8002(struct tg3 *tp)
2341 {
2342         u32 mac_status = tr32(MAC_STATUS);
2343         int i;
2344
2345         /* Reset when initting first time or we have a link. */
2346         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2347             !(mac_status & MAC_STATUS_PCS_SYNCED))
2348                 return;
2349
2350         /* Set PLL lock range. */
2351         tg3_writephy(tp, 0x16, 0x8007);
2352
2353         /* SW reset */
2354         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2355
2356         /* Wait for reset to complete. */
2357         /* XXX schedule_timeout() ... */
2358         for (i = 0; i < 500; i++)
2359                 udelay(10);
2360
2361         /* Config mode; select PMA/Ch 1 regs. */
2362         tg3_writephy(tp, 0x10, 0x8411);
2363
2364         /* Enable auto-lock and comdet, select txclk for tx. */
2365         tg3_writephy(tp, 0x11, 0x0a10);
2366
2367         tg3_writephy(tp, 0x18, 0x00a0);
2368         tg3_writephy(tp, 0x16, 0x41ff);
2369
2370         /* Assert and deassert POR. */
2371         tg3_writephy(tp, 0x13, 0x0400);
2372         udelay(40);
2373         tg3_writephy(tp, 0x13, 0x0000);
2374
2375         tg3_writephy(tp, 0x11, 0x0a50);
2376         udelay(40);
2377         tg3_writephy(tp, 0x11, 0x0a10);
2378
2379         /* Wait for signal to stabilize */
2380         /* XXX schedule_timeout() ... */
2381         for (i = 0; i < 15000; i++)
2382                 udelay(10);
2383
2384         /* Deselect the channel register so we can read the PHYID
2385          * later.
2386          */
2387         tg3_writephy(tp, 0x10, 0x8011);
2388 }
2389
2390 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2391 {
2392         u32 sg_dig_ctrl, sg_dig_status;
2393         u32 serdes_cfg, expected_sg_dig_ctrl;
2394         int workaround, port_a;
2395         int current_link_up;
2396
2397         serdes_cfg = 0;
2398         expected_sg_dig_ctrl = 0;
2399         workaround = 0;
2400         port_a = 1;
2401         current_link_up = 0;
2402
2403         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2404             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2405                 workaround = 1;
2406                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2407                         port_a = 0;
2408
2409                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2410                 /* preserve bits 20-23 for voltage regulator */
2411                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2412         }
2413
2414         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2415
2416         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2417                 if (sg_dig_ctrl & (1 << 31)) {
2418                         if (workaround) {
2419                                 u32 val = serdes_cfg;
2420
2421                                 if (port_a)
2422                                         val |= 0xc010000;
2423                                 else
2424                                         val |= 0x4010000;
2425                                 tw32_f(MAC_SERDES_CFG, val);
2426                         }
2427                         tw32_f(SG_DIG_CTRL, 0x01388400);
2428                 }
2429                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2430                         tg3_setup_flow_control(tp, 0, 0);
2431                         current_link_up = 1;
2432                 }
2433                 goto out;
2434         }
2435
2436         /* Want auto-negotiation.  */
2437         expected_sg_dig_ctrl = 0x81388400;
2438
2439         /* Pause capability */
2440         expected_sg_dig_ctrl |= (1 << 11);
2441
2442         /* Asymettric pause */
2443         expected_sg_dig_ctrl |= (1 << 12);
2444
2445         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2446                 if (workaround)
2447                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2448                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2449                 udelay(5);
2450                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2451
2452                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2453         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2454                                  MAC_STATUS_SIGNAL_DET)) {
2455                 int i;
2456
2457                 /* Giver time to negotiate (~200ms) */
2458                 for (i = 0; i < 40000; i++) {
2459                         sg_dig_status = tr32(SG_DIG_STATUS);
2460                         if (sg_dig_status & (0x3))
2461                                 break;
2462                         udelay(5);
2463                 }
2464                 mac_status = tr32(MAC_STATUS);
2465
2466                 if ((sg_dig_status & (1 << 1)) &&
2467                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2468                         u32 local_adv, remote_adv;
2469
2470                         local_adv = ADVERTISE_PAUSE_CAP;
2471                         remote_adv = 0;
2472                         if (sg_dig_status & (1 << 19))
2473                                 remote_adv |= LPA_PAUSE_CAP;
2474                         if (sg_dig_status & (1 << 20))
2475                                 remote_adv |= LPA_PAUSE_ASYM;
2476
2477                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2478                         current_link_up = 1;
2479                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2480                 } else if (!(sg_dig_status & (1 << 1))) {
2481                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2482                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2483                         else {
2484                                 if (workaround) {
2485                                         u32 val = serdes_cfg;
2486
2487                                         if (port_a)
2488                                                 val |= 0xc010000;
2489                                         else
2490                                                 val |= 0x4010000;
2491
2492                                         tw32_f(MAC_SERDES_CFG, val);
2493                                 }
2494
2495                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2496                                 udelay(40);
2497
2498                                 /* Link parallel detection - link is up */
2499                                 /* only if we have PCS_SYNC and not */
2500                                 /* receiving config code words */
2501                                 mac_status = tr32(MAC_STATUS);
2502                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2503                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2504                                         tg3_setup_flow_control(tp, 0, 0);
2505                                         current_link_up = 1;
2506                                 }
2507                         }
2508                 }
2509         }
2510
2511 out:
2512         return current_link_up;
2513 }
2514
2515 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2516 {
2517         int current_link_up = 0;
2518
2519         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2520                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2521                 goto out;
2522         }
2523
2524         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2525                 u32 flags;
2526                 int i;
2527   
2528                 if (fiber_autoneg(tp, &flags)) {
2529                         u32 local_adv, remote_adv;
2530
2531                         local_adv = ADVERTISE_PAUSE_CAP;
2532                         remote_adv = 0;
2533                         if (flags & MR_LP_ADV_SYM_PAUSE)
2534                                 remote_adv |= LPA_PAUSE_CAP;
2535                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2536                                 remote_adv |= LPA_PAUSE_ASYM;
2537
2538                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2539
2540                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2541                         current_link_up = 1;
2542                 }
2543                 for (i = 0; i < 30; i++) {
2544                         udelay(20);
2545                         tw32_f(MAC_STATUS,
2546                                (MAC_STATUS_SYNC_CHANGED |
2547                                 MAC_STATUS_CFG_CHANGED));
2548                         udelay(40);
2549                         if ((tr32(MAC_STATUS) &
2550                              (MAC_STATUS_SYNC_CHANGED |
2551                               MAC_STATUS_CFG_CHANGED)) == 0)
2552                                 break;
2553                 }
2554
2555                 mac_status = tr32(MAC_STATUS);
2556                 if (current_link_up == 0 &&
2557                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2558                     !(mac_status & MAC_STATUS_RCVD_CFG))
2559                         current_link_up = 1;
2560         } else {
2561                 /* Forcing 1000FD link up. */
2562                 current_link_up = 1;
2563                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2564
2565                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2566                 udelay(40);
2567         }
2568
2569 out:
2570         return current_link_up;
2571 }
2572
2573 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2574 {
2575         u32 orig_pause_cfg;
2576         u16 orig_active_speed;
2577         u8 orig_active_duplex;
2578         u32 mac_status;
2579         int current_link_up;
2580         int i;
2581
2582         orig_pause_cfg =
2583                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2584                                   TG3_FLAG_TX_PAUSE));
2585         orig_active_speed = tp->link_config.active_speed;
2586         orig_active_duplex = tp->link_config.active_duplex;
2587
2588         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2589             netif_carrier_ok(tp->dev) &&
2590             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2591                 mac_status = tr32(MAC_STATUS);
2592                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2593                                MAC_STATUS_SIGNAL_DET |
2594                                MAC_STATUS_CFG_CHANGED |
2595                                MAC_STATUS_RCVD_CFG);
2596                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2597                                    MAC_STATUS_SIGNAL_DET)) {
2598                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2599                                             MAC_STATUS_CFG_CHANGED));
2600                         return 0;
2601                 }
2602         }
2603
2604         tw32_f(MAC_TX_AUTO_NEG, 0);
2605
2606         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2607         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2608         tw32_f(MAC_MODE, tp->mac_mode);
2609         udelay(40);
2610
2611         if (tp->phy_id == PHY_ID_BCM8002)
2612                 tg3_init_bcm8002(tp);
2613
2614         /* Enable link change event even when serdes polling.  */
2615         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2616         udelay(40);
2617
2618         current_link_up = 0;
2619         mac_status = tr32(MAC_STATUS);
2620
2621         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2622                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2623         else
2624                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2625
2626         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2627         tw32_f(MAC_MODE, tp->mac_mode);
2628         udelay(40);
2629
2630         tp->hw_status->status =
2631                 (SD_STATUS_UPDATED |
2632                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2633
2634         for (i = 0; i < 100; i++) {
2635                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2636                                     MAC_STATUS_CFG_CHANGED));
2637                 udelay(5);
2638                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2639                                          MAC_STATUS_CFG_CHANGED)) == 0)
2640                         break;
2641         }
2642
2643         mac_status = tr32(MAC_STATUS);
2644         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2645                 current_link_up = 0;
2646                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2647                         tw32_f(MAC_MODE, (tp->mac_mode |
2648                                           MAC_MODE_SEND_CONFIGS));
2649                         udelay(1);
2650                         tw32_f(MAC_MODE, tp->mac_mode);
2651                 }
2652         }
2653
2654         if (current_link_up == 1) {
2655                 tp->link_config.active_speed = SPEED_1000;
2656                 tp->link_config.active_duplex = DUPLEX_FULL;
2657                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2658                                     LED_CTRL_LNKLED_OVERRIDE |
2659                                     LED_CTRL_1000MBPS_ON));
2660         } else {
2661                 tp->link_config.active_speed = SPEED_INVALID;
2662                 tp->link_config.active_duplex = DUPLEX_INVALID;
2663                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2664                                     LED_CTRL_LNKLED_OVERRIDE |
2665                                     LED_CTRL_TRAFFIC_OVERRIDE));
2666         }
2667
2668         if (current_link_up != netif_carrier_ok(tp->dev)) {
2669                 if (current_link_up)
2670                         netif_carrier_on(tp->dev);
2671                 else
2672                         netif_carrier_off(tp->dev);
2673                 tg3_link_report(tp);
2674         } else {
2675                 u32 now_pause_cfg =
2676                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2677                                          TG3_FLAG_TX_PAUSE);
2678                 if (orig_pause_cfg != now_pause_cfg ||
2679                     orig_active_speed != tp->link_config.active_speed ||
2680                     orig_active_duplex != tp->link_config.active_duplex)
2681                         tg3_link_report(tp);
2682         }
2683
2684         return 0;
2685 }
2686
2687 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2688 {
2689         int current_link_up, err = 0;
2690         u32 bmsr, bmcr;
2691         u16 current_speed;
2692         u8 current_duplex;
2693
2694         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2695         tw32_f(MAC_MODE, tp->mac_mode);
2696         udelay(40);
2697
2698         tw32(MAC_EVENT, 0);
2699
2700         tw32_f(MAC_STATUS,
2701              (MAC_STATUS_SYNC_CHANGED |
2702               MAC_STATUS_CFG_CHANGED |
2703               MAC_STATUS_MI_COMPLETION |
2704               MAC_STATUS_LNKSTATE_CHANGED));
2705         udelay(40);
2706
2707         if (force_reset)
2708                 tg3_phy_reset(tp);
2709
2710         current_link_up = 0;
2711         current_speed = SPEED_INVALID;
2712         current_duplex = DUPLEX_INVALID;
2713
2714         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2715         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2716         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2717                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2718                         bmsr |= BMSR_LSTATUS;
2719                 else
2720                         bmsr &= ~BMSR_LSTATUS;
2721         }
2722
2723         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2724
2725         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2726             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2727                 /* do nothing, just check for link up at the end */
2728         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2729                 u32 adv, new_adv;
2730
2731                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2732                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2733                                   ADVERTISE_1000XPAUSE |
2734                                   ADVERTISE_1000XPSE_ASYM |
2735                                   ADVERTISE_SLCT);
2736
2737                 /* Always advertise symmetric PAUSE just like copper */
2738                 new_adv |= ADVERTISE_1000XPAUSE;
2739
2740                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2741                         new_adv |= ADVERTISE_1000XHALF;
2742                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2743                         new_adv |= ADVERTISE_1000XFULL;
2744
2745                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2746                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2747                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2748                         tg3_writephy(tp, MII_BMCR, bmcr);
2749
2750                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2751                         tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2752                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2753
2754                         return err;
2755                 }
2756         } else {
2757                 u32 new_bmcr;
2758
2759                 bmcr &= ~BMCR_SPEED1000;
2760                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2761
2762                 if (tp->link_config.duplex == DUPLEX_FULL)
2763                         new_bmcr |= BMCR_FULLDPLX;
2764
2765                 if (new_bmcr != bmcr) {
2766                         /* BMCR_SPEED1000 is a reserved bit that needs
2767                          * to be set on write.
2768                          */
2769                         new_bmcr |= BMCR_SPEED1000;
2770
2771                         /* Force a linkdown */
2772                         if (netif_carrier_ok(tp->dev)) {
2773                                 u32 adv;
2774
2775                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2776                                 adv &= ~(ADVERTISE_1000XFULL |
2777                                          ADVERTISE_1000XHALF |
2778                                          ADVERTISE_SLCT);
2779                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2780                                 tg3_writephy(tp, MII_BMCR, bmcr |
2781                                                            BMCR_ANRESTART |
2782                                                            BMCR_ANENABLE);
2783                                 udelay(10);
2784                                 netif_carrier_off(tp->dev);
2785                         }
2786                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2787                         bmcr = new_bmcr;
2788                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2789                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2790                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2791                             ASIC_REV_5714) {
2792                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2793                                         bmsr |= BMSR_LSTATUS;
2794                                 else
2795                                         bmsr &= ~BMSR_LSTATUS;
2796                         }
2797                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2798                 }
2799         }
2800
2801         if (bmsr & BMSR_LSTATUS) {
2802                 current_speed = SPEED_1000;
2803                 current_link_up = 1;
2804                 if (bmcr & BMCR_FULLDPLX)
2805                         current_duplex = DUPLEX_FULL;
2806                 else
2807                         current_duplex = DUPLEX_HALF;
2808
2809                 if (bmcr & BMCR_ANENABLE) {
2810                         u32 local_adv, remote_adv, common;
2811
2812                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2813                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2814                         common = local_adv & remote_adv;
2815                         if (common & (ADVERTISE_1000XHALF |
2816                                       ADVERTISE_1000XFULL)) {
2817                                 if (common & ADVERTISE_1000XFULL)
2818                                         current_duplex = DUPLEX_FULL;
2819                                 else
2820                                         current_duplex = DUPLEX_HALF;
2821
2822                                 tg3_setup_flow_control(tp, local_adv,
2823                                                        remote_adv);
2824                         }
2825                         else
2826                                 current_link_up = 0;
2827                 }
2828         }
2829
2830         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2831         if (tp->link_config.active_duplex == DUPLEX_HALF)
2832                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2833
2834         tw32_f(MAC_MODE, tp->mac_mode);
2835         udelay(40);
2836
2837         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2838
2839         tp->link_config.active_speed = current_speed;
2840         tp->link_config.active_duplex = current_duplex;
2841
2842         if (current_link_up != netif_carrier_ok(tp->dev)) {
2843                 if (current_link_up)
2844                         netif_carrier_on(tp->dev);
2845                 else {
2846                         netif_carrier_off(tp->dev);
2847                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2848                 }
2849                 tg3_link_report(tp);
2850         }
2851         return err;
2852 }
2853
2854 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2855 {
2856         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2857                 /* Give autoneg time to complete. */
2858                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2859                 return;
2860         }
2861         if (!netif_carrier_ok(tp->dev) &&
2862             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2863                 u32 bmcr;
2864
2865                 tg3_readphy(tp, MII_BMCR, &bmcr);
2866                 if (bmcr & BMCR_ANENABLE) {
2867                         u32 phy1, phy2;
2868
2869                         /* Select shadow register 0x1f */
2870                         tg3_writephy(tp, 0x1c, 0x7c00);
2871                         tg3_readphy(tp, 0x1c, &phy1);
2872
2873                         /* Select expansion interrupt status register */
2874                         tg3_writephy(tp, 0x17, 0x0f01);
2875                         tg3_readphy(tp, 0x15, &phy2);
2876                         tg3_readphy(tp, 0x15, &phy2);
2877
2878                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2879                                 /* We have signal detect and not receiving
2880                                  * config code words, link is up by parallel
2881                                  * detection.
2882                                  */
2883
2884                                 bmcr &= ~BMCR_ANENABLE;
2885                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2886                                 tg3_writephy(tp, MII_BMCR, bmcr);
2887                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2888                         }
2889                 }
2890         }
2891         else if (netif_carrier_ok(tp->dev) &&
2892                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2893                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2894                 u32 phy2;
2895
2896                 /* Select expansion interrupt status register */
2897                 tg3_writephy(tp, 0x17, 0x0f01);
2898                 tg3_readphy(tp, 0x15, &phy2);
2899                 if (phy2 & 0x20) {
2900                         u32 bmcr;
2901
2902                         /* Config code words received, turn on autoneg. */
2903                         tg3_readphy(tp, MII_BMCR, &bmcr);
2904                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2905
2906                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2907
2908                 }
2909         }
2910 }
2911
2912 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2913 {
2914         int err;
2915
2916         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2917                 err = tg3_setup_fiber_phy(tp, force_reset);
2918         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2919                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2920         } else {
2921                 err = tg3_setup_copper_phy(tp, force_reset);
2922         }
2923
2924         if (tp->link_config.active_speed == SPEED_1000 &&
2925             tp->link_config.active_duplex == DUPLEX_HALF)
2926                 tw32(MAC_TX_LENGTHS,
2927                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2928                       (6 << TX_LENGTHS_IPG_SHIFT) |
2929                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2930         else
2931                 tw32(MAC_TX_LENGTHS,
2932                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2933                       (6 << TX_LENGTHS_IPG_SHIFT) |
2934                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2935
2936         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2937                 if (netif_carrier_ok(tp->dev)) {
2938                         tw32(HOSTCC_STAT_COAL_TICKS,
2939                              tp->coal.stats_block_coalesce_usecs);
2940                 } else {
2941                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2942                 }
2943         }
2944
2945         return err;
2946 }
2947
2948 /* Tigon3 never reports partial packet sends.  So we do not
2949  * need special logic to handle SKBs that have not had all
2950  * of their frags sent yet, like SunGEM does.
2951  */
2952 static void tg3_tx(struct tg3 *tp)
2953 {
2954         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2955         u32 sw_idx = tp->tx_cons;
2956
2957         while (sw_idx != hw_idx) {
2958                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2959                 struct sk_buff *skb = ri->skb;
2960                 int i;
2961
2962                 BUG_ON(skb == NULL);
2963                 pci_unmap_single(tp->pdev,
2964                                  pci_unmap_addr(ri, mapping),
2965                                  skb_headlen(skb),
2966                                  PCI_DMA_TODEVICE);
2967
2968                 ri->skb = NULL;
2969
2970                 sw_idx = NEXT_TX(sw_idx);
2971
2972                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2973                         BUG_ON(sw_idx == hw_idx);
2974
2975                         ri = &tp->tx_buffers[sw_idx];
2976                         BUG_ON(ri->skb != NULL);
2977
2978                         pci_unmap_page(tp->pdev,
2979                                        pci_unmap_addr(ri, mapping),
2980                                        skb_shinfo(skb)->frags[i].size,
2981                                        PCI_DMA_TODEVICE);
2982
2983                         sw_idx = NEXT_TX(sw_idx);
2984                 }
2985
2986                 dev_kfree_skb(skb);
2987         }
2988
2989         tp->tx_cons = sw_idx;
2990
2991         if (unlikely(netif_queue_stopped(tp->dev))) {
2992                 spin_lock(&tp->tx_lock);
2993                 if (netif_queue_stopped(tp->dev) &&
2994                     (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2995                         netif_wake_queue(tp->dev);
2996                 spin_unlock(&tp->tx_lock);
2997         }
2998 }
2999
3000 /* Returns size of skb allocated or < 0 on error.
3001  *
3002  * We only need to fill in the address because the other members
3003  * of the RX descriptor are invariant, see tg3_init_rings.
3004  *
3005  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3006  * posting buffers we only dirty the first cache line of the RX
3007  * descriptor (containing the address).  Whereas for the RX status
3008  * buffers the cpu only reads the last cacheline of the RX descriptor
3009  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3010  */
3011 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3012                             int src_idx, u32 dest_idx_unmasked)
3013 {
3014         struct tg3_rx_buffer_desc *desc;
3015         struct ring_info *map, *src_map;
3016         struct sk_buff *skb;
3017         dma_addr_t mapping;
3018         int skb_size, dest_idx;
3019
3020         src_map = NULL;
3021         switch (opaque_key) {
3022         case RXD_OPAQUE_RING_STD:
3023                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3024                 desc = &tp->rx_std[dest_idx];
3025                 map = &tp->rx_std_buffers[dest_idx];
3026                 if (src_idx >= 0)
3027                         src_map = &tp->rx_std_buffers[src_idx];
3028                 skb_size = tp->rx_pkt_buf_sz;
3029                 break;
3030
3031         case RXD_OPAQUE_RING_JUMBO:
3032                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3033                 desc = &tp->rx_jumbo[dest_idx];
3034                 map = &tp->rx_jumbo_buffers[dest_idx];
3035                 if (src_idx >= 0)
3036                         src_map = &tp->rx_jumbo_buffers[src_idx];
3037                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3038                 break;
3039
3040         default:
3041                 return -EINVAL;
3042         };
3043
3044         /* Do not overwrite any of the map or rp information
3045          * until we are sure we can commit to a new buffer.
3046          *
3047          * Callers depend upon this behavior and assume that
3048          * we leave everything unchanged if we fail.
3049          */
3050         skb = dev_alloc_skb(skb_size);
3051         if (skb == NULL)
3052                 return -ENOMEM;
3053
3054         skb->dev = tp->dev;
3055         skb_reserve(skb, tp->rx_offset);
3056
3057         mapping = pci_map_single(tp->pdev, skb->data,
3058                                  skb_size - tp->rx_offset,
3059                                  PCI_DMA_FROMDEVICE);
3060
3061         map->skb = skb;
3062         pci_unmap_addr_set(map, mapping, mapping);
3063
3064         if (src_map != NULL)
3065                 src_map->skb = NULL;
3066
3067         desc->addr_hi = ((u64)mapping >> 32);
3068         desc->addr_lo = ((u64)mapping & 0xffffffff);
3069
3070         return skb_size;
3071 }
3072
3073 /* We only need to move over in the address because the other
3074  * members of the RX descriptor are invariant.  See notes above
3075  * tg3_alloc_rx_skb for full details.
3076  */
3077 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3078                            int src_idx, u32 dest_idx_unmasked)
3079 {
3080         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3081         struct ring_info *src_map, *dest_map;
3082         int dest_idx;
3083
3084         switch (opaque_key) {
3085         case RXD_OPAQUE_RING_STD:
3086                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3087                 dest_desc = &tp->rx_std[dest_idx];
3088                 dest_map = &tp->rx_std_buffers[dest_idx];
3089                 src_desc = &tp->rx_std[src_idx];
3090                 src_map = &tp->rx_std_buffers[src_idx];
3091                 break;
3092
3093         case RXD_OPAQUE_RING_JUMBO:
3094                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3095                 dest_desc = &tp->rx_jumbo[dest_idx];
3096                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3097                 src_desc = &tp->rx_jumbo[src_idx];
3098                 src_map = &tp->rx_jumbo_buffers[src_idx];
3099                 break;
3100
3101         default:
3102                 return;
3103         };
3104
3105         dest_map->skb = src_map->skb;
3106         pci_unmap_addr_set(dest_map, mapping,
3107                            pci_unmap_addr(src_map, mapping));
3108         dest_desc->addr_hi = src_desc->addr_hi;
3109         dest_desc->addr_lo = src_desc->addr_lo;
3110
3111         src_map->skb = NULL;
3112 }
3113
3114 #if TG3_VLAN_TAG_USED
3115 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3116 {
3117         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3118 }
3119 #endif
3120
3121 /* The RX ring scheme is composed of multiple rings which post fresh
3122  * buffers to the chip, and one special ring the chip uses to report
3123  * status back to the host.
3124  *
3125  * The special ring reports the status of received packets to the
3126  * host.  The chip does not write into the original descriptor the
3127  * RX buffer was obtained from.  The chip simply takes the original
3128  * descriptor as provided by the host, updates the status and length
3129  * field, then writes this into the next status ring entry.
3130  *
3131  * Each ring the host uses to post buffers to the chip is described
3132  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3133  * it is first placed into the on-chip ram.  When the packet's length
3134  * is known, it walks down the TG3_BDINFO entries to select the ring.
3135  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3136  * which is within the range of the new packet's length is chosen.
3137  *
3138  * The "separate ring for rx status" scheme may sound queer, but it makes
3139  * sense from a cache coherency perspective.  If only the host writes
3140  * to the buffer post rings, and only the chip writes to the rx status
3141  * rings, then cache lines never move beyond shared-modified state.
3142  * If both the host and chip were to write into the same ring, cache line
3143  * eviction could occur since both entities want it in an exclusive state.
3144  */
3145 static int tg3_rx(struct tg3 *tp, int budget)
3146 {
3147         u32 work_mask;
3148         u32 sw_idx = tp->rx_rcb_ptr;
3149         u16 hw_idx;
3150         int received;
3151
3152         hw_idx = tp->hw_status->idx[0].rx_producer;
3153         /*
3154          * We need to order the read of hw_idx and the read of
3155          * the opaque cookie.
3156          */
3157         rmb();
3158         work_mask = 0;
3159         received = 0;
3160         while (sw_idx != hw_idx && budget > 0) {
3161                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3162                 unsigned int len;
3163                 struct sk_buff *skb;
3164                 dma_addr_t dma_addr;
3165                 u32 opaque_key, desc_idx, *post_ptr;
3166
3167                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3168                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3169                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3170                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3171                                                   mapping);
3172                         skb = tp->rx_std_buffers[desc_idx].skb;
3173                         post_ptr = &tp->rx_std_ptr;
3174                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3175                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3176                                                   mapping);
3177                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3178                         post_ptr = &tp->rx_jumbo_ptr;
3179                 }
3180                 else {
3181                         goto next_pkt_nopost;
3182                 }
3183
3184                 work_mask |= opaque_key;
3185
3186                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3187                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3188                 drop_it:
3189                         tg3_recycle_rx(tp, opaque_key,
3190                                        desc_idx, *post_ptr);
3191                 drop_it_no_recycle:
3192                         /* Other statistics kept track of by card. */
3193                         tp->net_stats.rx_dropped++;
3194                         goto next_pkt;
3195                 }
3196
3197                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3198
3199                 if (len > RX_COPY_THRESHOLD 
3200                         && tp->rx_offset == 2
3201                         /* rx_offset != 2 iff this is a 5701 card running
3202                          * in PCI-X mode [see tg3_get_invariants()] */
3203                 ) {
3204                         int skb_size;
3205
3206                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3207                                                     desc_idx, *post_ptr);
3208                         if (skb_size < 0)
3209                                 goto drop_it;
3210
3211                         pci_unmap_single(tp->pdev, dma_addr,
3212                                          skb_size - tp->rx_offset,
3213                                          PCI_DMA_FROMDEVICE);
3214
3215                         skb_put(skb, len);
3216                 } else {
3217                         struct sk_buff *copy_skb;
3218
3219                         tg3_recycle_rx(tp, opaque_key,
3220                                        desc_idx, *post_ptr);
3221
3222                         copy_skb = dev_alloc_skb(len + 2);
3223                         if (copy_skb == NULL)
3224                                 goto drop_it_no_recycle;
3225
3226                         copy_skb->dev = tp->dev;
3227                         skb_reserve(copy_skb, 2);
3228                         skb_put(copy_skb, len);
3229                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3230                         memcpy(copy_skb->data, skb->data, len);
3231                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3232
3233                         /* We'll reuse the original ring buffer. */
3234                         skb = copy_skb;
3235                 }
3236
3237                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3238                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3239                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3240                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3241                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3242                 else
3243                         skb->ip_summed = CHECKSUM_NONE;
3244
3245                 skb->protocol = eth_type_trans(skb, tp->dev);
3246 #if TG3_VLAN_TAG_USED
3247                 if (tp->vlgrp != NULL &&
3248                     desc->type_flags & RXD_FLAG_VLAN) {
3249                         tg3_vlan_rx(tp, skb,
3250                                     desc->err_vlan & RXD_VLAN_MASK);
3251                 } else
3252 #endif
3253                         netif_receive_skb(skb);
3254
3255                 tp->dev->last_rx = jiffies;
3256                 received++;
3257                 budget--;
3258
3259 next_pkt:
3260                 (*post_ptr)++;
3261 next_pkt_nopost:
3262                 sw_idx++;
3263                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3264
3265                 /* Refresh hw_idx to see if there is new work */
3266                 if (sw_idx == hw_idx) {
3267                         hw_idx = tp->hw_status->idx[0].rx_producer;
3268                         rmb();
3269                 }
3270         }
3271
3272         /* ACK the status ring. */
3273         tp->rx_rcb_ptr = sw_idx;
3274         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3275
3276         /* Refill RX ring(s). */
3277         if (work_mask & RXD_OPAQUE_RING_STD) {
3278                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3279                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3280                              sw_idx);
3281         }
3282         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3283                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3284                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3285                              sw_idx);
3286         }
3287         mmiowb();
3288
3289         return received;
3290 }
3291
3292 static int tg3_poll(struct net_device *netdev, int *budget)
3293 {
3294         struct tg3 *tp = netdev_priv(netdev);
3295         struct tg3_hw_status *sblk = tp->hw_status;
3296         int done;
3297
3298         /* handle link change and other phy events */
3299         if (!(tp->tg3_flags &
3300               (TG3_FLAG_USE_LINKCHG_REG |
3301                TG3_FLAG_POLL_SERDES))) {
3302                 if (sblk->status & SD_STATUS_LINK_CHG) {
3303                         sblk->status = SD_STATUS_UPDATED |
3304                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3305                         spin_lock(&tp->lock);
3306                         tg3_setup_phy(tp, 0);
3307                         spin_unlock(&tp->lock);
3308                 }
3309         }
3310
3311         /* run TX completion thread */
3312         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3313                 tg3_tx(tp);
3314         }
3315
3316         /* run RX thread, within the bounds set by NAPI.
3317          * All RX "locking" is done by ensuring outside
3318          * code synchronizes with dev->poll()
3319          */
3320         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3321                 int orig_budget = *budget;
3322                 int work_done;
3323
3324                 if (orig_budget > netdev->quota)
3325                         orig_budget = netdev->quota;
3326
3327                 work_done = tg3_rx(tp, orig_budget);
3328
3329                 *budget -= work_done;
3330                 netdev->quota -= work_done;
3331         }
3332
3333         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3334                 tp->last_tag = sblk->status_tag;
3335                 rmb();
3336         } else
3337                 sblk->status &= ~SD_STATUS_UPDATED;
3338
3339         /* if no more work, tell net stack and NIC we're done */
3340         done = !tg3_has_work(tp);
3341         if (done) {
3342                 netif_rx_complete(netdev);
3343                 tg3_restart_ints(tp);
3344         }
3345
3346         return (done ? 0 : 1);
3347 }
3348
3349 static void tg3_irq_quiesce(struct tg3 *tp)
3350 {
3351         BUG_ON(tp->irq_sync);
3352
3353         tp->irq_sync = 1;
3354         smp_mb();
3355
3356         synchronize_irq(tp->pdev->irq);
3357 }
3358
3359 static inline int tg3_irq_sync(struct tg3 *tp)
3360 {
3361         return tp->irq_sync;
3362 }
3363
3364 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3365  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3366  * with as well.  Most of the time, this is not necessary except when
3367  * shutting down the device.
3368  */
3369 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3370 {
3371         if (irq_sync)
3372                 tg3_irq_quiesce(tp);
3373         spin_lock_bh(&tp->lock);
3374         spin_lock(&tp->tx_lock);
3375 }
3376
3377 static inline void tg3_full_unlock(struct tg3 *tp)
3378 {
3379         spin_unlock(&tp->tx_lock);
3380         spin_unlock_bh(&tp->lock);
3381 }
3382
3383 /* One-shot MSI handler - Chip automatically disables interrupt
3384  * after sending MSI so driver doesn't have to do it.
3385  */
3386 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs)
3387 {
3388         struct net_device *dev = dev_id;
3389         struct tg3 *tp = netdev_priv(dev);
3390
3391         prefetch(tp->hw_status);
3392         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3393
3394         if (likely(!tg3_irq_sync(tp)))
3395                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3396
3397         return IRQ_HANDLED;
3398 }
3399
3400 /* MSI ISR - No need to check for interrupt sharing and no need to
3401  * flush status block and interrupt mailbox. PCI ordering rules
3402  * guarantee that MSI will arrive after the status block.
3403  */
3404 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3405 {
3406         struct net_device *dev = dev_id;
3407         struct tg3 *tp = netdev_priv(dev);
3408
3409         prefetch(tp->hw_status);
3410         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3411         /*
3412          * Writing any value to intr-mbox-0 clears PCI INTA# and
3413          * chip-internal interrupt pending events.
3414          * Writing non-zero to intr-mbox-0 additional tells the
3415          * NIC to stop sending us irqs, engaging "in-intr-handler"
3416          * event coalescing.
3417          */
3418         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3419         if (likely(!tg3_irq_sync(tp)))
3420                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3421
3422         return IRQ_RETVAL(1);
3423 }
3424
3425 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3426 {
3427         struct net_device *dev = dev_id;
3428         struct tg3 *tp = netdev_priv(dev);
3429         struct tg3_hw_status *sblk = tp->hw_status;
3430         unsigned int handled = 1;
3431
3432         /* In INTx mode, it is possible for the interrupt to arrive at
3433          * the CPU before the status block posted prior to the interrupt.
3434          * Reading the PCI State register will confirm whether the
3435          * interrupt is ours and will flush the status block.
3436          */
3437         if ((sblk->status & SD_STATUS_UPDATED) ||
3438             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3439                 /*
3440                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3441                  * chip-internal interrupt pending events.
3442                  * Writing non-zero to intr-mbox-0 additional tells the
3443                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3444                  * event coalescing.
3445                  */
3446                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3447                              0x00000001);
3448                 if (tg3_irq_sync(tp))
3449                         goto out;
3450                 sblk->status &= ~SD_STATUS_UPDATED;
3451                 if (likely(tg3_has_work(tp))) {
3452                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3453                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3454                 } else {
3455                         /* No work, shared interrupt perhaps?  re-enable
3456                          * interrupts, and flush that PCI write
3457                          */
3458                         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3459                                 0x00000000);
3460                 }
3461         } else {        /* shared interrupt */
3462                 handled = 0;
3463         }
3464 out:
3465         return IRQ_RETVAL(handled);
3466 }
3467
3468 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3469 {
3470         struct net_device *dev = dev_id;
3471         struct tg3 *tp = netdev_priv(dev);
3472         struct tg3_hw_status *sblk = tp->hw_status;
3473         unsigned int handled = 1;
3474
3475         /* In INTx mode, it is possible for the interrupt to arrive at
3476          * the CPU before the status block posted prior to the interrupt.
3477          * Reading the PCI State register will confirm whether the
3478          * interrupt is ours and will flush the status block.
3479          */
3480         if ((sblk->status_tag != tp->last_tag) ||
3481             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3482                 /*
3483                  * writing any value to intr-mbox-0 clears PCI INTA# and
3484                  * chip-internal interrupt pending events.
3485                  * writing non-zero to intr-mbox-0 additional tells the
3486                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3487                  * event coalescing.
3488                  */
3489                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3490                              0x00000001);
3491                 if (tg3_irq_sync(tp))
3492                         goto out;
3493                 if (netif_rx_schedule_prep(dev)) {
3494                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3495                         /* Update last_tag to mark that this status has been
3496                          * seen. Because interrupt may be shared, we may be
3497                          * racing with tg3_poll(), so only update last_tag
3498                          * if tg3_poll() is not scheduled.
3499                          */
3500                         tp->last_tag = sblk->status_tag;
3501                         __netif_rx_schedule(dev);
3502                 }
3503         } else {        /* shared interrupt */
3504                 handled = 0;
3505         }
3506 out:
3507         return IRQ_RETVAL(handled);
3508 }
3509
3510 /* ISR for interrupt test */
3511 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3512                 struct pt_regs *regs)
3513 {
3514         struct net_device *dev = dev_id;
3515         struct tg3 *tp = netdev_priv(dev);
3516         struct tg3_hw_status *sblk = tp->hw_status;
3517
3518         if ((sblk->status & SD_STATUS_UPDATED) ||
3519             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3520                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3521                              0x00000001);
3522                 return IRQ_RETVAL(1);
3523         }
3524         return IRQ_RETVAL(0);
3525 }
3526
3527 static int tg3_init_hw(struct tg3 *);
3528 static int tg3_halt(struct tg3 *, int, int);
3529
3530 #ifdef CONFIG_NET_POLL_CONTROLLER
3531 static void tg3_poll_controller(struct net_device *dev)
3532 {
3533         struct tg3 *tp = netdev_priv(dev);
3534
3535         tg3_interrupt(tp->pdev->irq, dev, NULL);
3536 }
3537 #endif
3538
3539 static void tg3_reset_task(void *_data)
3540 {
3541         struct tg3 *tp = _data;
3542         unsigned int restart_timer;
3543
3544         tg3_full_lock(tp, 0);
3545         tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3546
3547         if (!netif_running(tp->dev)) {
3548                 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3549                 tg3_full_unlock(tp);
3550                 return;
3551         }
3552
3553         tg3_full_unlock(tp);
3554
3555         tg3_netif_stop(tp);
3556
3557         tg3_full_lock(tp, 1);
3558
3559         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3560         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3561
3562         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3563         tg3_init_hw(tp);
3564
3565         tg3_netif_start(tp);
3566
3567         if (restart_timer)
3568                 mod_timer(&tp->timer, jiffies + 1);
3569
3570         tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3571
3572         tg3_full_unlock(tp);
3573 }
3574
3575 static void tg3_tx_timeout(struct net_device *dev)
3576 {
3577         struct tg3 *tp = netdev_priv(dev);
3578
3579         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3580                dev->name);
3581
3582         schedule_work(&tp->reset_task);
3583 }
3584
3585 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3586 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3587 {
3588         u32 base = (u32) mapping & 0xffffffff;
3589
3590         return ((base > 0xffffdcc0) &&
3591                 (base + len + 8 < base));
3592 }
3593
3594 /* Test for DMA addresses > 40-bit */
3595 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3596                                           int len)
3597 {
3598 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3599         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3600                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3601         return 0;
3602 #else
3603         return 0;
3604 #endif
3605 }
3606
3607 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3608
3609 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3610 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3611                                        u32 last_plus_one, u32 *start,
3612                                        u32 base_flags, u32 mss)
3613 {
3614         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3615         dma_addr_t new_addr = 0;
3616         u32 entry = *start;
3617         int i, ret = 0;
3618
3619         if (!new_skb) {
3620                 ret = -1;
3621         } else {
3622                 /* New SKB is guaranteed to be linear. */
3623                 entry = *start;
3624                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3625                                           PCI_DMA_TODEVICE);
3626                 /* Make sure new skb does not cross any 4G boundaries.
3627                  * Drop the packet if it does.
3628                  */
3629                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3630                         ret = -1;
3631                         dev_kfree_skb(new_skb);
3632                         new_skb = NULL;
3633                 } else {
3634                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3635                                     base_flags, 1 | (mss << 1));
3636                         *start = NEXT_TX(entry);
3637                 }
3638         }
3639
3640         /* Now clean up the sw ring entries. */
3641         i = 0;
3642         while (entry != last_plus_one) {
3643                 int len;
3644
3645                 if (i == 0)
3646                         len = skb_headlen(skb);
3647                 else
3648                         len = skb_shinfo(skb)->frags[i-1].size;
3649                 pci_unmap_single(tp->pdev,
3650                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3651                                  len, PCI_DMA_TODEVICE);
3652                 if (i == 0) {
3653                         tp->tx_buffers[entry].skb = new_skb;
3654                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3655                 } else {
3656                         tp->tx_buffers[entry].skb = NULL;
3657                 }
3658                 entry = NEXT_TX(entry);
3659                 i++;
3660         }
3661
3662         dev_kfree_skb(skb);
3663
3664         return ret;
3665 }
3666
3667 static void tg3_set_txd(struct tg3 *tp, int entry,
3668                         dma_addr_t mapping, int len, u32 flags,
3669                         u32 mss_and_is_end)
3670 {
3671         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3672         int is_end = (mss_and_is_end & 0x1);
3673         u32 mss = (mss_and_is_end >> 1);
3674         u32 vlan_tag = 0;
3675
3676         if (is_end)
3677                 flags |= TXD_FLAG_END;
3678         if (flags & TXD_FLAG_VLAN) {
3679                 vlan_tag = flags >> 16;
3680                 flags &= 0xffff;
3681         }
3682         vlan_tag |= (mss << TXD_MSS_SHIFT);
3683
3684         txd->addr_hi = ((u64) mapping >> 32);
3685         txd->addr_lo = ((u64) mapping & 0xffffffff);
3686         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3687         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3688 }
3689
3690 /* hard_start_xmit for devices that don't have any bugs and
3691  * support TG3_FLG2_HW_TSO_2 only.
3692  */
3693 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3694 {
3695         struct tg3 *tp = netdev_priv(dev);
3696         dma_addr_t mapping;
3697         u32 len, entry, base_flags, mss;
3698
3699         len = skb_headlen(skb);
3700
3701         /* No BH disabling for tx_lock here.  We are running in BH disabled
3702          * context and TX reclaim runs via tp->poll inside of a software
3703          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3704          * no IRQ context deadlocks to worry about either.  Rejoice!
3705          */
3706         if (!spin_trylock(&tp->tx_lock))
3707                 return NETDEV_TX_LOCKED;
3708
3709         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3710                 if (!netif_queue_stopped(dev)) {
3711                         netif_stop_queue(dev);
3712
3713                         /* This is a hard error, log it. */
3714                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3715                                "queue awake!\n", dev->name);
3716                 }
3717                 spin_unlock(&tp->tx_lock);
3718                 return NETDEV_TX_BUSY;
3719         }
3720
3721         entry = tp->tx_prod;
3722         base_flags = 0;
3723 #if TG3_TSO_SUPPORT != 0
3724         mss = 0;
3725         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3726             (mss = skb_shinfo(skb)->tso_size) != 0) {
3727                 int tcp_opt_len, ip_tcp_len;
3728
3729                 if (skb_header_cloned(skb) &&
3730                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3731                         dev_kfree_skb(skb);
3732                         goto out_unlock;
3733                 }
3734
3735                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3736                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3737
3738                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3739                                TXD_FLAG_CPU_POST_DMA);
3740
3741                 skb->nh.iph->check = 0;
3742                 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3743
3744                 skb->h.th->check = 0;
3745
3746                 mss |= (ip_tcp_len + tcp_opt_len) << 9;
3747         }
3748         else if (skb->ip_summed == CHECKSUM_HW)
3749                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3750 #else
3751         mss = 0;
3752         if (skb->ip_summed == CHECKSUM_HW)
3753                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3754 #endif
3755 #if TG3_VLAN_TAG_USED
3756         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3757                 base_flags |= (TXD_FLAG_VLAN |
3758                                (vlan_tx_tag_get(skb) << 16));
3759 #endif
3760
3761         /* Queue skb data, a.k.a. the main skb fragment. */
3762         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3763
3764         tp->tx_buffers[entry].skb = skb;
3765         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3766
3767         tg3_set_txd(tp, entry, mapping, len, base_flags,
3768                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3769
3770         entry = NEXT_TX(entry);
3771
3772         /* Now loop through additional data fragments, and queue them. */
3773         if (skb_shinfo(skb)->nr_frags > 0) {
3774                 unsigned int i, last;
3775
3776                 last = skb_shinfo(skb)->nr_frags - 1;
3777                 for (i = 0; i <= last; i++) {
3778                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3779
3780                         len = frag->size;
3781                         mapping = pci_map_page(tp->pdev,
3782                                                frag->page,
3783                                                frag->page_offset,
3784                                                len, PCI_DMA_TODEVICE);
3785
3786                         tp->tx_buffers[entry].skb = NULL;
3787                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3788
3789                         tg3_set_txd(tp, entry, mapping, len,
3790                                     base_flags, (i == last) | (mss << 1));
3791
3792                         entry = NEXT_TX(entry);
3793                 }
3794         }
3795
3796         /* Packets are ready, update Tx producer idx local and on card. */
3797         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3798
3799         tp->tx_prod = entry;
3800         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
3801                 netif_stop_queue(dev);
3802                 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3803                         netif_wake_queue(tp->dev);
3804         }
3805
3806 out_unlock:
3807         mmiowb();
3808         spin_unlock(&tp->tx_lock);
3809
3810         dev->trans_start = jiffies;
3811
3812         return NETDEV_TX_OK;
3813 }
3814
3815 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
3816  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
3817  */
3818 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
3819 {
3820         struct tg3 *tp = netdev_priv(dev);
3821         dma_addr_t mapping;
3822         u32 len, entry, base_flags, mss;
3823         int would_hit_hwbug;
3824
3825         len = skb_headlen(skb);
3826
3827         /* No BH disabling for tx_lock here.  We are running in BH disabled
3828          * context and TX reclaim runs via tp->poll inside of a software
3829          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3830          * no IRQ context deadlocks to worry about either.  Rejoice!
3831          */
3832         if (!spin_trylock(&tp->tx_lock))
3833                 return NETDEV_TX_LOCKED; 
3834
3835         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3836                 if (!netif_queue_stopped(dev)) {
3837                         netif_stop_queue(dev);
3838
3839                         /* This is a hard error, log it. */
3840                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3841                                "queue awake!\n", dev->name);
3842                 }
3843                 spin_unlock(&tp->tx_lock);
3844                 return NETDEV_TX_BUSY;
3845         }
3846
3847         entry = tp->tx_prod;
3848         base_flags = 0;
3849         if (skb->ip_summed == CHECKSUM_HW)
3850                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3851 #if TG3_TSO_SUPPORT != 0
3852         mss = 0;
3853         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3854             (mss = skb_shinfo(skb)->tso_size) != 0) {
3855                 int tcp_opt_len, ip_tcp_len;
3856
3857                 if (skb_header_cloned(skb) &&
3858                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3859                         dev_kfree_skb(skb);
3860                         goto out_unlock;
3861                 }
3862
3863                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3864                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3865
3866                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3867                                TXD_FLAG_CPU_POST_DMA);
3868
3869                 skb->nh.iph->check = 0;
3870                 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3871                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3872                         skb->h.th->check = 0;
3873                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3874                 }
3875                 else {
3876                         skb->h.th->check =
3877                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3878                                                    skb->nh.iph->daddr,
3879                                                    0, IPPROTO_TCP, 0);
3880                 }
3881
3882                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3883                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3884                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3885                                 int tsflags;
3886
3887                                 tsflags = ((skb->nh.iph->ihl - 5) +
3888                                            (tcp_opt_len >> 2));
3889                                 mss |= (tsflags << 11);
3890                         }
3891                 } else {
3892                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3893                                 int tsflags;
3894
3895                                 tsflags = ((skb->nh.iph->ihl - 5) +
3896                                            (tcp_opt_len >> 2));
3897                                 base_flags |= tsflags << 12;
3898                         }
3899                 }
3900         }
3901 #else
3902         mss = 0;
3903 #endif
3904 #if TG3_VLAN_TAG_USED
3905         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3906                 base_flags |= (TXD_FLAG_VLAN |
3907                                (vlan_tx_tag_get(skb) << 16));
3908 #endif
3909
3910         /* Queue skb data, a.k.a. the main skb fragment. */
3911         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3912
3913         tp->tx_buffers[entry].skb = skb;
3914         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3915
3916         would_hit_hwbug = 0;
3917
3918         if (tg3_4g_overflow_test(mapping, len))
3919                 would_hit_hwbug = 1;
3920
3921         tg3_set_txd(tp, entry, mapping, len, base_flags,
3922                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3923
3924         entry = NEXT_TX(entry);
3925
3926         /* Now loop through additional data fragments, and queue them. */
3927         if (skb_shinfo(skb)->nr_frags > 0) {
3928                 unsigned int i, last;
3929
3930                 last = skb_shinfo(skb)->nr_frags - 1;
3931                 for (i = 0; i <= last; i++) {
3932                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3933
3934                         len = frag->size;
3935                         mapping = pci_map_page(tp->pdev,
3936                                                frag->page,
3937                                                frag->page_offset,
3938                                                len, PCI_DMA_TODEVICE);
3939
3940                         tp->tx_buffers[entry].skb = NULL;
3941                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3942
3943                         if (tg3_4g_overflow_test(mapping, len))
3944                                 would_hit_hwbug = 1;
3945
3946                         if (tg3_40bit_overflow_test(tp, mapping, len))
3947                                 would_hit_hwbug = 1;
3948
3949                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3950                                 tg3_set_txd(tp, entry, mapping, len,
3951                                             base_flags, (i == last)|(mss << 1));
3952                         else
3953                                 tg3_set_txd(tp, entry, mapping, len,
3954                                             base_flags, (i == last));
3955
3956                         entry = NEXT_TX(entry);
3957                 }
3958         }
3959
3960         if (would_hit_hwbug) {
3961                 u32 last_plus_one = entry;
3962                 u32 start;
3963
3964                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
3965                 start &= (TG3_TX_RING_SIZE - 1);
3966
3967                 /* If the workaround fails due to memory/mapping
3968                  * failure, silently drop this packet.
3969                  */
3970                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
3971                                                 &start, base_flags, mss))
3972                         goto out_unlock;
3973
3974                 entry = start;
3975         }
3976
3977         /* Packets are ready, update Tx producer idx local and on card. */
3978         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3979
3980         tp->tx_prod = entry;
3981         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
3982                 netif_stop_queue(dev);
3983                 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3984                         netif_wake_queue(tp->dev);
3985         }
3986
3987 out_unlock:
3988         mmiowb();
3989         spin_unlock(&tp->tx_lock);
3990
3991         dev->trans_start = jiffies;
3992
3993         return NETDEV_TX_OK;
3994 }
3995
3996 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3997                                int new_mtu)
3998 {
3999         dev->mtu = new_mtu;
4000
4001         if (new_mtu > ETH_DATA_LEN) {
4002                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4003                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4004                         ethtool_op_set_tso(dev, 0);
4005                 }
4006                 else
4007                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4008         } else {
4009                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4010                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4011                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4012         }
4013 }
4014
4015 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4016 {
4017         struct tg3 *tp = netdev_priv(dev);
4018
4019         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4020                 return -EINVAL;
4021
4022         if (!netif_running(dev)) {
4023                 /* We'll just catch it later when the
4024                  * device is up'd.
4025                  */
4026                 tg3_set_mtu(dev, tp, new_mtu);
4027                 return 0;
4028         }
4029
4030         tg3_netif_stop(tp);
4031
4032         tg3_full_lock(tp, 1);
4033
4034         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4035
4036         tg3_set_mtu(dev, tp, new_mtu);
4037
4038         tg3_init_hw(tp);
4039
4040         tg3_netif_start(tp);
4041
4042         tg3_full_unlock(tp);
4043
4044         return 0;
4045 }
4046
4047 /* Free up pending packets in all rx/tx rings.
4048  *
4049  * The chip has been shut down and the driver detached from
4050  * the networking, so no interrupts or new tx packets will
4051  * end up in the driver.  tp->{tx,}lock is not held and we are not
4052  * in an interrupt context and thus may sleep.
4053  */
4054 static void tg3_free_rings(struct tg3 *tp)
4055 {
4056         struct ring_info *rxp;
4057         int i;
4058
4059         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4060                 rxp = &tp->rx_std_buffers[i];
4061
4062                 if (rxp->skb == NULL)
4063                         continue;
4064                 pci_unmap_single(tp->pdev,
4065                                  pci_unmap_addr(rxp, mapping),
4066                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4067                                  PCI_DMA_FROMDEVICE);
4068                 dev_kfree_skb_any(rxp->skb);
4069                 rxp->skb = NULL;
4070         }
4071
4072         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4073                 rxp = &tp->rx_jumbo_buffers[i];
4074
4075                 if (rxp->skb == NULL)
4076                         continue;
4077                 pci_unmap_single(tp->pdev,
4078                                  pci_unmap_addr(rxp, mapping),
4079                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4080                                  PCI_DMA_FROMDEVICE);
4081                 dev_kfree_skb_any(rxp->skb);
4082                 rxp->skb = NULL;
4083         }
4084
4085         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4086                 struct tx_ring_info *txp;
4087                 struct sk_buff *skb;
4088                 int j;
4089
4090                 txp = &tp->tx_buffers[i];
4091                 skb = txp->skb;
4092
4093                 if (skb == NULL) {
4094                         i++;
4095                         continue;
4096                 }
4097
4098                 pci_unmap_single(tp->pdev,
4099                                  pci_unmap_addr(txp, mapping),
4100                                  skb_headlen(skb),
4101                                  PCI_DMA_TODEVICE);
4102                 txp->skb = NULL;
4103
4104                 i++;
4105
4106                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4107                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4108                         pci_unmap_page(tp->pdev,
4109                                        pci_unmap_addr(txp, mapping),
4110                                        skb_shinfo(skb)->frags[j].size,
4111                                        PCI_DMA_TODEVICE);
4112                         i++;
4113                 }
4114
4115                 dev_kfree_skb_any(skb);
4116         }
4117 }
4118
4119 /* Initialize tx/rx rings for packet processing.
4120  *
4121  * The chip has been shut down and the driver detached from
4122  * the networking, so no interrupts or new tx packets will
4123  * end up in the driver.  tp->{tx,}lock are held and thus
4124  * we may not sleep.
4125  */
4126 static void tg3_init_rings(struct tg3 *tp)
4127 {
4128         u32 i;
4129
4130         /* Free up all the SKBs. */
4131         tg3_free_rings(tp);
4132
4133         /* Zero out all descriptors. */
4134         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4135         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4136         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4137         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4138
4139         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4140         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4141             (tp->dev->mtu > ETH_DATA_LEN))
4142                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4143
4144         /* Initialize invariants of the rings, we only set this
4145          * stuff once.  This works because the card does not
4146          * write into the rx buffer posting rings.
4147          */
4148         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4149                 struct tg3_rx_buffer_desc *rxd;
4150
4151                 rxd = &tp->rx_std[i];
4152                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4153                         << RXD_LEN_SHIFT;
4154                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4155                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4156                                (i << RXD_OPAQUE_INDEX_SHIFT));
4157         }
4158
4159         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4160                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4161                         struct tg3_rx_buffer_desc *rxd;
4162
4163                         rxd = &tp->rx_jumbo[i];
4164                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4165                                 << RXD_LEN_SHIFT;
4166                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4167                                 RXD_FLAG_JUMBO;
4168                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4169                                (i << RXD_OPAQUE_INDEX_SHIFT));
4170                 }
4171         }
4172
4173         /* Now allocate fresh SKBs for each rx ring. */
4174         for (i = 0; i < tp->rx_pending; i++) {
4175                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
4176                                      -1, i) < 0)
4177                         break;
4178         }
4179
4180         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4181                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4182                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4183                                              -1, i) < 0)
4184                                 break;
4185                 }
4186         }
4187 }
4188
4189 /*
4190  * Must not be invoked with interrupt sources disabled and
4191  * the hardware shutdown down.
4192  */
4193 static void tg3_free_consistent(struct tg3 *tp)
4194 {
4195         kfree(tp->rx_std_buffers);
4196         tp->rx_std_buffers = NULL;
4197         if (tp->rx_std) {
4198                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4199                                     tp->rx_std, tp->rx_std_mapping);
4200                 tp->rx_std = NULL;
4201         }
4202         if (tp->rx_jumbo) {
4203                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4204                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4205                 tp->rx_jumbo = NULL;
4206         }
4207         if (tp->rx_rcb) {
4208                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4209                                     tp->rx_rcb, tp->rx_rcb_mapping);
4210                 tp->rx_rcb = NULL;
4211         }
4212         if (tp->tx_ring) {
4213                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4214                         tp->tx_ring, tp->tx_desc_mapping);
4215                 tp->tx_ring = NULL;
4216         }
4217         if (tp->hw_status) {
4218                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4219                                     tp->hw_status, tp->status_mapping);
4220                 tp->hw_status = NULL;
4221         }
4222         if (tp->hw_stats) {
4223                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4224                                     tp->hw_stats, tp->stats_mapping);
4225                 tp->hw_stats = NULL;
4226         }
4227 }
4228
4229 /*
4230  * Must not be invoked with interrupt sources disabled and
4231  * the hardware shutdown down.  Can sleep.
4232  */
4233 static int tg3_alloc_consistent(struct tg3 *tp)
4234 {
4235         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4236                                       (TG3_RX_RING_SIZE +
4237                                        TG3_RX_JUMBO_RING_SIZE)) +
4238                                      (sizeof(struct tx_ring_info) *
4239                                       TG3_TX_RING_SIZE),
4240                                      GFP_KERNEL);
4241         if (!tp->rx_std_buffers)
4242                 return -ENOMEM;
4243
4244         memset(tp->rx_std_buffers, 0,
4245                (sizeof(struct ring_info) *
4246                 (TG3_RX_RING_SIZE +
4247                  TG3_RX_JUMBO_RING_SIZE)) +
4248                (sizeof(struct tx_ring_info) *
4249                 TG3_TX_RING_SIZE));
4250
4251         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4252         tp->tx_buffers = (struct tx_ring_info *)
4253                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4254
4255         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4256                                           &tp->rx_std_mapping);
4257         if (!tp->rx_std)
4258                 goto err_out;
4259
4260         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4261                                             &tp->rx_jumbo_mapping);
4262
4263         if (!tp->rx_jumbo)
4264                 goto err_out;
4265
4266         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4267                                           &tp->rx_rcb_mapping);
4268         if (!tp->rx_rcb)
4269                 goto err_out;
4270
4271         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4272                                            &tp->tx_desc_mapping);
4273         if (!tp->tx_ring)
4274                 goto err_out;
4275
4276         tp->hw_status = pci_alloc_consistent(tp->pdev,
4277                                              TG3_HW_STATUS_SIZE,
4278                                              &tp->status_mapping);
4279         if (!tp->hw_status)
4280                 goto err_out;
4281
4282         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4283                                             sizeof(struct tg3_hw_stats),
4284                                             &tp->stats_mapping);
4285         if (!tp->hw_stats)
4286                 goto err_out;
4287
4288         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4289         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4290
4291         return 0;
4292
4293 err_out:
4294         tg3_free_consistent(tp);
4295         return -ENOMEM;
4296 }
4297
4298 #define MAX_WAIT_CNT 1000
4299
4300 /* To stop a block, clear the enable bit and poll till it
4301  * clears.  tp->lock is held.
4302  */
4303 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4304 {
4305         unsigned int i;
4306         u32 val;
4307
4308         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4309                 switch (ofs) {
4310                 case RCVLSC_MODE:
4311                 case DMAC_MODE:
4312                 case MBFREE_MODE:
4313                 case BUFMGR_MODE:
4314                 case MEMARB_MODE:
4315                         /* We can't enable/disable these bits of the
4316                          * 5705/5750, just say success.
4317                          */
4318                         return 0;
4319
4320                 default:
4321                         break;
4322                 };
4323         }
4324
4325         val = tr32(ofs);
4326         val &= ~enable_bit;
4327         tw32_f(ofs, val);
4328
4329         for (i = 0; i < MAX_WAIT_CNT; i++) {
4330                 udelay(100);
4331                 val = tr32(ofs);
4332                 if ((val & enable_bit) == 0)
4333                         break;
4334         }
4335
4336         if (i == MAX_WAIT_CNT && !silent) {
4337                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4338                        "ofs=%lx enable_bit=%x\n",
4339                        ofs, enable_bit);
4340                 return -ENODEV;
4341         }
4342
4343         return 0;
4344 }
4345
4346 /* tp->lock is held. */
4347 static int tg3_abort_hw(struct tg3 *tp, int silent)
4348 {
4349         int i, err;
4350
4351         tg3_disable_ints(tp);
4352
4353         tp->rx_mode &= ~RX_MODE_ENABLE;
4354         tw32_f(MAC_RX_MODE, tp->rx_mode);
4355         udelay(10);
4356
4357         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4358         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4359         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4360         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4361         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4362         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4363
4364         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4365         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4366         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4367         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4368         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4369         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4370         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4371
4372         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4373         tw32_f(MAC_MODE, tp->mac_mode);
4374         udelay(40);
4375
4376         tp->tx_mode &= ~TX_MODE_ENABLE;
4377         tw32_f(MAC_TX_MODE, tp->tx_mode);
4378
4379         for (i = 0; i < MAX_WAIT_CNT; i++) {
4380                 udelay(100);
4381                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4382                         break;
4383         }
4384         if (i >= MAX_WAIT_CNT) {
4385                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4386                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4387                        tp->dev->name, tr32(MAC_TX_MODE));
4388                 err |= -ENODEV;
4389         }
4390
4391         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4392         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4393         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4394
4395         tw32(FTQ_RESET, 0xffffffff);
4396         tw32(FTQ_RESET, 0x00000000);
4397
4398         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4399         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4400
4401         if (tp->hw_status)
4402                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4403         if (tp->hw_stats)
4404                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4405
4406         return err;
4407 }
4408
4409 /* tp->lock is held. */
4410 static int tg3_nvram_lock(struct tg3 *tp)
4411 {
4412         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4413                 int i;
4414
4415                 if (tp->nvram_lock_cnt == 0) {
4416                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4417                         for (i = 0; i < 8000; i++) {
4418                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4419                                         break;
4420                                 udelay(20);
4421                         }
4422                         if (i == 8000) {
4423                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4424                                 return -ENODEV;
4425                         }
4426                 }
4427                 tp->nvram_lock_cnt++;
4428         }
4429         return 0;
4430 }
4431
4432 /* tp->lock is held. */
4433 static void tg3_nvram_unlock(struct tg3 *tp)
4434 {
4435         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4436                 if (tp->nvram_lock_cnt > 0)
4437                         tp->nvram_lock_cnt--;
4438                 if (tp->nvram_lock_cnt == 0)
4439                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4440         }
4441 }
4442
4443 /* tp->lock is held. */
4444 static void tg3_enable_nvram_access(struct tg3 *tp)
4445 {
4446         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4447             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4448                 u32 nvaccess = tr32(NVRAM_ACCESS);
4449
4450                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4451         }
4452 }
4453
4454 /* tp->lock is held. */
4455 static void tg3_disable_nvram_access(struct tg3 *tp)
4456 {
4457         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4458             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4459                 u32 nvaccess = tr32(NVRAM_ACCESS);
4460
4461                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4462         }
4463 }
4464
4465 /* tp->lock is held. */
4466 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4467 {
4468         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4469                 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4470                               NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4471
4472         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4473                 switch (kind) {
4474                 case RESET_KIND_INIT:
4475                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4476                                       DRV_STATE_START);
4477                         break;
4478
4479                 case RESET_KIND_SHUTDOWN:
4480                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4481                                       DRV_STATE_UNLOAD);
4482                         break;
4483
4484                 case RESET_KIND_SUSPEND:
4485                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4486                                       DRV_STATE_SUSPEND);
4487                         break;
4488
4489                 default:
4490                         break;
4491                 };
4492         }
4493 }
4494
4495 /* tp->lock is held. */
4496 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4497 {
4498         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4499                 switch (kind) {
4500                 case RESET_KIND_INIT:
4501                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4502                                       DRV_STATE_START_DONE);
4503                         break;
4504
4505                 case RESET_KIND_SHUTDOWN:
4506                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4507                                       DRV_STATE_UNLOAD_DONE);
4508                         break;
4509
4510                 default:
4511                         break;
4512                 };
4513         }
4514 }
4515
4516 /* tp->lock is held. */
4517 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4518 {
4519         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4520                 switch (kind) {
4521                 case RESET_KIND_INIT:
4522                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4523                                       DRV_STATE_START);
4524                         break;
4525
4526                 case RESET_KIND_SHUTDOWN:
4527                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4528                                       DRV_STATE_UNLOAD);
4529                         break;
4530
4531                 case RESET_KIND_SUSPEND:
4532                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4533                                       DRV_STATE_SUSPEND);
4534                         break;
4535
4536                 default:
4537                         break;
4538                 };
4539         }
4540 }
4541
4542 static void tg3_stop_fw(struct tg3 *);
4543
4544 /* tp->lock is held. */
4545 static int tg3_chip_reset(struct tg3 *tp)
4546 {
4547         u32 val;
4548         void (*write_op)(struct tg3 *, u32, u32);
4549         int i;
4550
4551         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4552                 tg3_nvram_lock(tp);
4553                 /* No matching tg3_nvram_unlock() after this because
4554                  * chip reset below will undo the nvram lock.
4555                  */
4556                 tp->nvram_lock_cnt = 0;
4557         }
4558
4559         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4560             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
4561             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4562                 tw32(GRC_FASTBOOT_PC, 0);
4563
4564         /*
4565          * We must avoid the readl() that normally takes place.
4566          * It locks machines, causes machine checks, and other
4567          * fun things.  So, temporarily disable the 5701
4568          * hardware workaround, while we do the reset.
4569          */
4570         write_op = tp->write32;
4571         if (write_op == tg3_write_flush_reg32)
4572                 tp->write32 = tg3_write32;
4573
4574         /* do the reset */
4575         val = GRC_MISC_CFG_CORECLK_RESET;
4576
4577         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4578                 if (tr32(0x7e2c) == 0x60) {
4579                         tw32(0x7e2c, 0x20);
4580                 }
4581                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4582                         tw32(GRC_MISC_CFG, (1 << 29));
4583                         val |= (1 << 29);
4584                 }
4585         }
4586
4587         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4588                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4589         tw32(GRC_MISC_CFG, val);
4590
4591         /* restore 5701 hardware bug workaround write method */
4592         tp->write32 = write_op;
4593
4594         /* Unfortunately, we have to delay before the PCI read back.
4595          * Some 575X chips even will not respond to a PCI cfg access
4596          * when the reset command is given to the chip.
4597          *
4598          * How do these hardware designers expect things to work
4599          * properly if the PCI write is posted for a long period
4600          * of time?  It is always necessary to have some method by
4601          * which a register read back can occur to push the write
4602          * out which does the reset.
4603          *
4604          * For most tg3 variants the trick below was working.
4605          * Ho hum...
4606          */
4607         udelay(120);
4608
4609         /* Flush PCI posted writes.  The normal MMIO registers
4610          * are inaccessible at this time so this is the only
4611          * way to make this reliably (actually, this is no longer
4612          * the case, see above).  I tried to use indirect
4613          * register read/write but this upset some 5701 variants.
4614          */
4615         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4616
4617         udelay(120);
4618
4619         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4620                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4621                         int i;
4622                         u32 cfg_val;
4623
4624                         /* Wait for link training to complete.  */
4625                         for (i = 0; i < 5000; i++)
4626                                 udelay(100);
4627
4628                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4629                         pci_write_config_dword(tp->pdev, 0xc4,
4630                                                cfg_val | (1 << 15));
4631                 }
4632                 /* Set PCIE max payload size and clear error status.  */
4633                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4634         }
4635
4636         /* Re-enable indirect register accesses. */
4637         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4638                                tp->misc_host_ctrl);
4639
4640         /* Set MAX PCI retry to zero. */
4641         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4642         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4643             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4644                 val |= PCISTATE_RETRY_SAME_DMA;
4645         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4646
4647         pci_restore_state(tp->pdev);
4648
4649         /* Make sure PCI-X relaxed ordering bit is clear. */
4650         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4651         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4652         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4653
4654         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4655                 u32 val;
4656
4657                 /* Chip reset on 5780 will reset MSI enable bit,
4658                  * so need to restore it.
4659                  */
4660                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4661                         u16 ctrl;
4662
4663                         pci_read_config_word(tp->pdev,
4664                                              tp->msi_cap + PCI_MSI_FLAGS,
4665                                              &ctrl);
4666                         pci_write_config_word(tp->pdev,
4667                                               tp->msi_cap + PCI_MSI_FLAGS,
4668                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4669                         val = tr32(MSGINT_MODE);
4670                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4671                 }
4672
4673                 val = tr32(MEMARB_MODE);
4674                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4675
4676         } else
4677                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4678
4679         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4680                 tg3_stop_fw(tp);
4681                 tw32(0x5000, 0x400);
4682         }
4683
4684         tw32(GRC_MODE, tp->grc_mode);
4685
4686         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4687                 u32 val = tr32(0xc4);
4688
4689                 tw32(0xc4, val | (1 << 15));
4690         }
4691
4692         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4693             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4694                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4695                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4696                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4697                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4698         }
4699
4700         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4701                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4702                 tw32_f(MAC_MODE, tp->mac_mode);
4703         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4704                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4705                 tw32_f(MAC_MODE, tp->mac_mode);
4706         } else
4707                 tw32_f(MAC_MODE, 0);
4708         udelay(40);
4709
4710         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4711                 /* Wait for firmware initialization to complete. */
4712                 for (i = 0; i < 100000; i++) {
4713                         tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4714                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4715                                 break;
4716                         udelay(10);
4717                 }
4718                 if (i >= 100000) {
4719                         printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4720                                "firmware will not restart magic=%08x\n",
4721                                tp->dev->name, val);
4722                         return -ENODEV;
4723                 }
4724         }
4725
4726         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4727             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4728                 u32 val = tr32(0x7c00);
4729
4730                 tw32(0x7c00, val | (1 << 25));
4731         }
4732
4733         /* Reprobe ASF enable state.  */
4734         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4735         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4736         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4737         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4738                 u32 nic_cfg;
4739
4740                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4741                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4742                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4743                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4744                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4745                 }
4746         }
4747
4748         return 0;
4749 }
4750
4751 /* tp->lock is held. */
4752 static void tg3_stop_fw(struct tg3 *tp)
4753 {
4754         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4755                 u32 val;
4756                 int i;
4757
4758                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4759                 val = tr32(GRC_RX_CPU_EVENT);
4760                 val |= (1 << 14);
4761                 tw32(GRC_RX_CPU_EVENT, val);
4762
4763                 /* Wait for RX cpu to ACK the event.  */
4764                 for (i = 0; i < 100; i++) {
4765                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4766                                 break;
4767                         udelay(1);
4768                 }
4769         }
4770 }
4771
4772 /* tp->lock is held. */
4773 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4774 {
4775         int err;
4776
4777         tg3_stop_fw(tp);
4778
4779         tg3_write_sig_pre_reset(tp, kind);
4780
4781         tg3_abort_hw(tp, silent);
4782         err = tg3_chip_reset(tp);
4783
4784         tg3_write_sig_legacy(tp, kind);
4785         tg3_write_sig_post_reset(tp, kind);
4786
4787         if (err)
4788                 return err;
4789
4790         return 0;
4791 }
4792
4793 #define TG3_FW_RELEASE_MAJOR    0x0
4794 #define TG3_FW_RELASE_MINOR     0x0
4795 #define TG3_FW_RELEASE_FIX      0x0
4796 #define TG3_FW_START_ADDR       0x08000000
4797 #define TG3_FW_TEXT_ADDR        0x08000000
4798 #define TG3_FW_TEXT_LEN         0x9c0
4799 #define TG3_FW_RODATA_ADDR      0x080009c0
4800 #define TG3_FW_RODATA_LEN       0x60
4801 #define TG3_FW_DATA_ADDR        0x08000a40
4802 #define TG3_FW_DATA_LEN         0x20
4803 #define TG3_FW_SBSS_ADDR        0x08000a60
4804 #define TG3_FW_SBSS_LEN         0xc
4805 #define TG3_FW_BSS_ADDR         0x08000a70
4806 #define TG3_FW_BSS_LEN          0x10
4807
4808 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4809         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4810         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4811         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4812         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4813         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4814         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4815         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4816         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4817         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4818         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4819         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4820         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4821         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4822         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4823         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4824         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4825         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4826         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4827         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4828         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4829         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4830         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4831         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4832         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4833         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4834         0, 0, 0, 0, 0, 0,
4835         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4836         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4837         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4838         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4839         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4840         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4841         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4842         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4843         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4844         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4845         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4846         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4847         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4848         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4849         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4850         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4851         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4852         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4853         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4854         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4855         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4856         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4857         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4858         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4859         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4860         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4861         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4862         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4863         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4864         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4865         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4866         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4867         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4868         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4869         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4870         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4871         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4872         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4873         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4874         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4875         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4876         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4877         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4878         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4879         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4880         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4881         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4882         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4883         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4884         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4885         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4886         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4887         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4888         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4889         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4890         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4891         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4892         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4893         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4894         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4895         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4896         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4897         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4898         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4899         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4900 };
4901
4902 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4903         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4904         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4905         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4906         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4907         0x00000000
4908 };
4909
4910 #if 0 /* All zeros, don't eat up space with it. */
4911 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4912         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4913         0x00000000, 0x00000000, 0x00000000, 0x00000000
4914 };
4915 #endif
4916
4917 #define RX_CPU_SCRATCH_BASE     0x30000
4918 #define RX_CPU_SCRATCH_SIZE     0x04000
4919 #define TX_CPU_SCRATCH_BASE     0x34000
4920 #define TX_CPU_SCRATCH_SIZE     0x04000
4921
4922 /* tp->lock is held. */
4923 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4924 {
4925         int i;
4926
4927         BUG_ON(offset == TX_CPU_BASE &&
4928             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
4929
4930         if (offset == RX_CPU_BASE) {
4931                 for (i = 0; i < 10000; i++) {
4932                         tw32(offset + CPU_STATE, 0xffffffff);
4933                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4934                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4935                                 break;
4936                 }
4937
4938                 tw32(offset + CPU_STATE, 0xffffffff);
4939                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4940                 udelay(10);
4941         } else {
4942                 for (i = 0; i < 10000; i++) {
4943                         tw32(offset + CPU_STATE, 0xffffffff);
4944                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4945                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4946                                 break;
4947                 }
4948         }
4949
4950         if (i >= 10000) {
4951                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4952                        "and %s CPU\n",
4953                        tp->dev->name,
4954                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4955                 return -ENODEV;
4956         }
4957
4958         /* Clear firmware's nvram arbitration. */
4959         if (tp->tg3_flags & TG3_FLAG_NVRAM)
4960                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
4961         return 0;
4962 }
4963
4964 struct fw_info {
4965         unsigned int text_base;
4966         unsigned int text_len;
4967         u32 *text_data;
4968         unsigned int rodata_base;
4969         unsigned int rodata_len;
4970         u32 *rodata_data;
4971         unsigned int data_base;
4972         unsigned int data_len;
4973         u32 *data_data;
4974 };
4975
4976 /* tp->lock is held. */
4977 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4978                                  int cpu_scratch_size, struct fw_info *info)
4979 {
4980         int err, lock_err, i;
4981         void (*write_op)(struct tg3 *, u32, u32);
4982
4983         if (cpu_base == TX_CPU_BASE &&
4984             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4985                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4986                        "TX cpu firmware on %s which is 5705.\n",
4987                        tp->dev->name);
4988                 return -EINVAL;
4989         }
4990
4991         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4992                 write_op = tg3_write_mem;
4993         else
4994                 write_op = tg3_write_indirect_reg32;
4995
4996         /* It is possible that bootcode is still loading at this point.
4997          * Get the nvram lock first before halting the cpu.
4998          */
4999         lock_err = tg3_nvram_lock(tp);
5000         err = tg3_halt_cpu(tp, cpu_base);
5001         if (!lock_err)
5002                 tg3_nvram_unlock(tp);
5003         if (err)
5004                 goto out;
5005
5006         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5007                 write_op(tp, cpu_scratch_base + i, 0);
5008         tw32(cpu_base + CPU_STATE, 0xffffffff);
5009         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5010         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5011                 write_op(tp, (cpu_scratch_base +
5012                               (info->text_base & 0xffff) +
5013                               (i * sizeof(u32))),
5014                          (info->text_data ?
5015                           info->text_data[i] : 0));
5016         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5017                 write_op(tp, (cpu_scratch_base +
5018                               (info->rodata_base & 0xffff) +
5019                               (i * sizeof(u32))),
5020                          (info->rodata_data ?
5021                           info->rodata_data[i] : 0));
5022         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5023                 write_op(tp, (cpu_scratch_base +
5024                               (info->data_base & 0xffff) +
5025                               (i * sizeof(u32))),
5026                          (info->data_data ?
5027                           info->data_data[i] : 0));
5028
5029         err = 0;
5030
5031 out:
5032         return err;
5033 }
5034
5035 /* tp->lock is held. */
5036 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5037 {
5038         struct fw_info info;
5039         int err, i;
5040
5041         info.text_base = TG3_FW_TEXT_ADDR;
5042         info.text_len = TG3_FW_TEXT_LEN;
5043         info.text_data = &tg3FwText[0];
5044         info.rodata_base = TG3_FW_RODATA_ADDR;
5045         info.rodata_len = TG3_FW_RODATA_LEN;
5046         info.rodata_data = &tg3FwRodata[0];
5047         info.data_base = TG3_FW_DATA_ADDR;
5048         info.data_len = TG3_FW_DATA_LEN;
5049         info.data_data = NULL;
5050
5051         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5052                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5053                                     &info);
5054         if (err)
5055                 return err;
5056
5057         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5058                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5059                                     &info);
5060         if (err)
5061                 return err;
5062
5063         /* Now startup only the RX cpu. */
5064         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5065         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5066
5067         for (i = 0; i < 5; i++) {
5068                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5069                         break;
5070                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5071                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5072                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5073                 udelay(1000);
5074         }
5075         if (i >= 5) {
5076                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5077                        "to set RX CPU PC, is %08x should be %08x\n",
5078                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5079                        TG3_FW_TEXT_ADDR);
5080                 return -ENODEV;
5081         }
5082         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5083         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5084
5085         return 0;
5086 }
5087
5088 #if TG3_TSO_SUPPORT != 0
5089
5090 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5091 #define TG3_TSO_FW_RELASE_MINOR         0x6
5092 #define TG3_TSO_FW_RELEASE_FIX          0x0
5093 #define TG3_TSO_FW_START_ADDR           0x08000000
5094 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5095 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5096 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5097 #define TG3_TSO_FW_RODATA_LEN           0x60
5098 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5099 #define TG3_TSO_FW_DATA_LEN             0x30
5100 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5101 #define TG3_TSO_FW_SBSS_LEN             0x2c
5102 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5103 #define TG3_TSO_FW_BSS_LEN              0x894
5104
5105 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5106         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5107         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5108         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5109         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5110         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5111         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5112         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5113         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5114         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5115         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5116         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5117         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5118         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5119         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5120         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5121         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5122         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5123         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5124         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5125         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5126         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5127         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5128         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5129         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5130         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5131         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5132         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5133         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5134         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5135         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5136         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5137         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5138         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5139         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5140         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5141         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5142         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5143         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5144         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5145         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5146         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5147         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5148         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5149         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5150         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5151         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5152         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5153         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5154         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5155         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5156         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5157         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5158         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5159         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5160         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5161         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5162         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5163         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5164         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5165         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5166         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5167         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5168         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5169         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5170         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5171         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5172         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5173         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5174         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5175         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5176         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5177         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5178         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5179         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5180         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5181         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5182         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5183         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5184         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5185         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5186         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5187         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5188         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5189         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5190         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5191         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5192         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5193         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5194         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5195         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5196         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5197         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5198         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5199         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5200         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5201         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5202         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5203         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5204         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5205         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5206         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5207         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5208         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5209         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5210         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5211         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5212         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5213         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5214         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5215         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5216         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5217         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5218         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5219         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5220         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5221         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5222         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5223         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5224         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5225         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5226         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5227         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5228         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5229         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5230         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5231         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5232         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5233         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5234         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5235         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5236         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5237         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5238         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5239         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5240         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5241         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5242         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5243         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5244         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5245         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5246         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5247         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5248         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5249         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5250         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5251         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5252         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5253         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5254         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5255         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5256         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5257         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5258         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5259         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5260         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5261         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5262         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5263         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5264         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5265         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5266         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5267         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5268         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5269         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5270         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5271         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5272         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5273         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5274         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5275         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5276         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5277         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5278         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5279         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5280         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5281         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5282         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5283         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5284         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5285         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5286         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5287         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5288         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5289         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5290         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5291         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5292         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5293         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5294         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5295         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5296         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5297         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5298         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5299         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5300         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5301         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5302         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5303         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5304         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5305         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5306         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5307         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5308         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5309         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5310         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5311         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5312         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5313         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5314         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5315         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5316         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5317         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5318         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5319         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5320         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5321         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5322         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5323         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5324         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5325         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5326         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5327         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5328         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5329         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5330         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5331         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5332         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5333         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5334         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5335         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5336         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5337         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5338         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5339         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5340         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5341         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5342         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5343         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5344         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5345         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5346         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5347         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5348         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5349         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5350         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5351         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5352         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5353         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5354         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5355         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5356         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5357         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5358         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5359         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5360         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5361         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5362         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5363         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5364         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5365         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5366         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5367         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5368         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5369         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5370         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5371         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5372         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5373         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5374         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5375         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5376         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5377         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5378         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5379         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5380         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5381         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5382         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5383         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5384         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5385         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5386         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5387         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5388         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5389         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5390 };
5391
5392 static u32 tg3TsoFwRodata[] = {
5393         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5394         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5395         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5396         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5397         0x00000000,
5398 };
5399
5400 static u32 tg3TsoFwData[] = {
5401         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5402         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5403         0x00000000,
5404 };
5405
5406 /* 5705 needs a special version of the TSO firmware.  */
5407 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5408 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5409 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5410 #define TG3_TSO5_FW_START_ADDR          0x00010000
5411 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5412 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5413 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5414 #define TG3_TSO5_FW_RODATA_LEN          0x50
5415 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5416 #define TG3_TSO5_FW_DATA_LEN            0x20
5417 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5418 #define TG3_TSO5_FW_SBSS_LEN            0x28
5419 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5420 #define TG3_TSO5_FW_BSS_LEN             0x88
5421
5422 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5423         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5424         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5425         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5426         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5427         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5428         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5429         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5430         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5431         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5432         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5433         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5434         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5435         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5436         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5437         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5438         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5439         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5440         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5441         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5442         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5443         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5444         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5445         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5446         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5447         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5448         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5449         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5450         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5451         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5452         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5453         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5454         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5455         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5456         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5457         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5458         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5459         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5460         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5461         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5462         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5463         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5464         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5465         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5466         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5467         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5468         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5469         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5470         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5471         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5472         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5473         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5474         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5475         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5476         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5477         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5478         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5479         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5480         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5481         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5482         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5483         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5484         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5485         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5486         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5487         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5488         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5489         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5490         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5491         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5492         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5493         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5494         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5495         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5496         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5497         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5498         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5499         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5500         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5501         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5502         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5503         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5504         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5505         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5506         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5507         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5508         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5509         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5510         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5511         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5512         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5513         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5514         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5515         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5516         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5517         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5518         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5519         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5520         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5521         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5522         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5523         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5524         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5525         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5526         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5527         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5528         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5529         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5530         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5531         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5532         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5533         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5534         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5535         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5536         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5537         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5538         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5539         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5540         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5541         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5542         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5543         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5544         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5545         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5546         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5547         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5548         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5549         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5550         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5551         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5552         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5553         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5554         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5555         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5556         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5557         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5558         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5559         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5560         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5561         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5562         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5563         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5564         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5565         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5566         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5567         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5568         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5569         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5570         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5571         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5572         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5573         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5574         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5575         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5576         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5577         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5578         0x00000000, 0x00000000, 0x00000000,
5579 };
5580
5581 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5582         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5583         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5584         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5585         0x00000000, 0x00000000, 0x00000000,
5586 };
5587
5588 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5589         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5590         0x00000000, 0x00000000, 0x00000000,
5591 };
5592
5593 /* tp->lock is held. */
5594 static int tg3_load_tso_firmware(struct tg3 *tp)
5595 {
5596         struct fw_info info;
5597         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5598         int err, i;
5599
5600         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5601                 return 0;
5602
5603         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5604                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5605                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5606                 info.text_data = &tg3Tso5FwText[0];
5607                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5608                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5609                 info.rodata_data = &tg3Tso5FwRodata[0];
5610                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5611                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5612                 info.data_data = &tg3Tso5FwData[0];
5613                 cpu_base = RX_CPU_BASE;
5614                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5615                 cpu_scratch_size = (info.text_len +
5616                                     info.rodata_len +
5617                                     info.data_len +
5618                                     TG3_TSO5_FW_SBSS_LEN +
5619                                     TG3_TSO5_FW_BSS_LEN);
5620         } else {
5621                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5622                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5623                 info.text_data = &tg3TsoFwText[0];
5624                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5625                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5626                 info.rodata_data = &tg3TsoFwRodata[0];
5627                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5628                 info.data_len = TG3_TSO_FW_DATA_LEN;
5629                 info.data_data = &tg3TsoFwData[0];
5630                 cpu_base = TX_CPU_BASE;
5631                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5632                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5633         }
5634
5635         err = tg3_load_firmware_cpu(tp, cpu_base,
5636                                     cpu_scratch_base, cpu_scratch_size,
5637                                     &info);
5638         if (err)
5639                 return err;
5640
5641         /* Now startup the cpu. */
5642         tw32(cpu_base + CPU_STATE, 0xffffffff);
5643         tw32_f(cpu_base + CPU_PC,    info.text_base);
5644
5645         for (i = 0; i < 5; i++) {
5646                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5647                         break;
5648                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5649                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5650                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5651                 udelay(1000);
5652         }
5653         if (i >= 5) {
5654                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5655                        "to set CPU PC, is %08x should be %08x\n",
5656                        tp->dev->name, tr32(cpu_base + CPU_PC),
5657                        info.text_base);
5658                 return -ENODEV;
5659         }
5660         tw32(cpu_base + CPU_STATE, 0xffffffff);
5661         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5662         return 0;
5663 }
5664
5665 #endif /* TG3_TSO_SUPPORT != 0 */
5666
5667 /* tp->lock is held. */
5668 static void __tg3_set_mac_addr(struct tg3 *tp)
5669 {
5670         u32 addr_high, addr_low;
5671         int i;
5672
5673         addr_high = ((tp->dev->dev_addr[0] << 8) |
5674                      tp->dev->dev_addr[1]);
5675         addr_low = ((tp->dev->dev_addr[2] << 24) |
5676                     (tp->dev->dev_addr[3] << 16) |
5677                     (tp->dev->dev_addr[4] <<  8) |
5678                     (tp->dev->dev_addr[5] <<  0));
5679         for (i = 0; i < 4; i++) {
5680                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5681                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5682         }
5683
5684         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5685             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5686                 for (i = 0; i < 12; i++) {
5687                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5688                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5689                 }
5690         }
5691
5692         addr_high = (tp->dev->dev_addr[0] +
5693                      tp->dev->dev_addr[1] +
5694                      tp->dev->dev_addr[2] +
5695                      tp->dev->dev_addr[3] +
5696                      tp->dev->dev_addr[4] +
5697                      tp->dev->dev_addr[5]) &
5698                 TX_BACKOFF_SEED_MASK;
5699         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5700 }
5701
5702 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5703 {
5704         struct tg3 *tp = netdev_priv(dev);
5705         struct sockaddr *addr = p;
5706
5707         if (!is_valid_ether_addr(addr->sa_data))
5708                 return -EINVAL;
5709
5710         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5711
5712         if (!netif_running(dev))
5713                 return 0;
5714
5715         spin_lock_bh(&tp->lock);
5716         __tg3_set_mac_addr(tp);
5717         spin_unlock_bh(&tp->lock);
5718
5719         return 0;
5720 }
5721
5722 /* tp->lock is held. */
5723 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5724                            dma_addr_t mapping, u32 maxlen_flags,
5725                            u32 nic_addr)
5726 {
5727         tg3_write_mem(tp,
5728                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5729                       ((u64) mapping >> 32));
5730         tg3_write_mem(tp,
5731                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5732                       ((u64) mapping & 0xffffffff));
5733         tg3_write_mem(tp,
5734                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5735                        maxlen_flags);
5736
5737         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5738                 tg3_write_mem(tp,
5739                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5740                               nic_addr);
5741 }
5742
5743 static void __tg3_set_rx_mode(struct net_device *);
5744 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5745 {
5746         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5747         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5748         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5749         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5750         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5751                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5752                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5753         }
5754         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5755         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5756         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5757                 u32 val = ec->stats_block_coalesce_usecs;
5758
5759                 if (!netif_carrier_ok(tp->dev))
5760                         val = 0;
5761
5762                 tw32(HOSTCC_STAT_COAL_TICKS, val);
5763         }
5764 }
5765
5766 /* tp->lock is held. */
5767 static int tg3_reset_hw(struct tg3 *tp)
5768 {
5769         u32 val, rdmac_mode;
5770         int i, err, limit;
5771
5772         tg3_disable_ints(tp);
5773
5774         tg3_stop_fw(tp);
5775
5776         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5777
5778         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5779                 tg3_abort_hw(tp, 1);
5780         }
5781
5782         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
5783                 tg3_phy_reset(tp);
5784
5785         err = tg3_chip_reset(tp);
5786         if (err)
5787                 return err;
5788
5789         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5790
5791         /* This works around an issue with Athlon chipsets on
5792          * B3 tigon3 silicon.  This bit has no effect on any
5793          * other revision.  But do not set this on PCI Express
5794          * chips.
5795          */
5796         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5797                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5798         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5799
5800         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5801             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5802                 val = tr32(TG3PCI_PCISTATE);
5803                 val |= PCISTATE_RETRY_SAME_DMA;
5804                 tw32(TG3PCI_PCISTATE, val);
5805         }
5806
5807         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5808                 /* Enable some hw fixes.  */
5809                 val = tr32(TG3PCI_MSI_DATA);
5810                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5811                 tw32(TG3PCI_MSI_DATA, val);
5812         }
5813
5814         /* Descriptor ring init may make accesses to the
5815          * NIC SRAM area to setup the TX descriptors, so we
5816          * can only do this after the hardware has been
5817          * successfully reset.
5818          */
5819         tg3_init_rings(tp);
5820
5821         /* This value is determined during the probe time DMA
5822          * engine test, tg3_test_dma.
5823          */
5824         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5825
5826         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5827                           GRC_MODE_4X_NIC_SEND_RINGS |
5828                           GRC_MODE_NO_TX_PHDR_CSUM |
5829                           GRC_MODE_NO_RX_PHDR_CSUM);
5830         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5831
5832         /* Pseudo-header checksum is done by hardware logic and not
5833          * the offload processers, so make the chip do the pseudo-
5834          * header checksums on receive.  For transmit it is more
5835          * convenient to do the pseudo-header checksum in software
5836          * as Linux does that on transmit for us in all cases.
5837          */
5838         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5839
5840         tw32(GRC_MODE,
5841              tp->grc_mode |
5842              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5843
5844         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5845         val = tr32(GRC_MISC_CFG);
5846         val &= ~0xff;
5847         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5848         tw32(GRC_MISC_CFG, val);
5849
5850         /* Initialize MBUF/DESC pool. */
5851         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5852                 /* Do nothing.  */
5853         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5854                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5855                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5856                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5857                 else
5858                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5859                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5860                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5861         }
5862 #if TG3_TSO_SUPPORT != 0
5863         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5864                 int fw_len;
5865
5866                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5867                           TG3_TSO5_FW_RODATA_LEN +
5868                           TG3_TSO5_FW_DATA_LEN +
5869                           TG3_TSO5_FW_SBSS_LEN +
5870                           TG3_TSO5_FW_BSS_LEN);
5871                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5872                 tw32(BUFMGR_MB_POOL_ADDR,
5873                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5874                 tw32(BUFMGR_MB_POOL_SIZE,
5875                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5876         }
5877 #endif
5878
5879         if (tp->dev->mtu <= ETH_DATA_LEN) {
5880                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5881                      tp->bufmgr_config.mbuf_read_dma_low_water);
5882                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5883                      tp->bufmgr_config.mbuf_mac_rx_low_water);
5884                 tw32(BUFMGR_MB_HIGH_WATER,
5885                      tp->bufmgr_config.mbuf_high_water);
5886         } else {
5887                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5888                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5889                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5890                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5891                 tw32(BUFMGR_MB_HIGH_WATER,
5892                      tp->bufmgr_config.mbuf_high_water_jumbo);
5893         }
5894         tw32(BUFMGR_DMA_LOW_WATER,
5895              tp->bufmgr_config.dma_low_water);
5896         tw32(BUFMGR_DMA_HIGH_WATER,
5897              tp->bufmgr_config.dma_high_water);
5898
5899         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5900         for (i = 0; i < 2000; i++) {
5901                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5902                         break;
5903                 udelay(10);
5904         }
5905         if (i >= 2000) {
5906                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5907                        tp->dev->name);
5908                 return -ENODEV;
5909         }
5910
5911         /* Setup replenish threshold. */
5912         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5913
5914         /* Initialize TG3_BDINFO's at:
5915          *  RCVDBDI_STD_BD:     standard eth size rx ring
5916          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5917          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5918          *
5919          * like so:
5920          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5921          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5922          *                              ring attribute flags
5923          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5924          *
5925          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5926          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5927          *
5928          * The size of each ring is fixed in the firmware, but the location is
5929          * configurable.
5930          */
5931         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5932              ((u64) tp->rx_std_mapping >> 32));
5933         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5934              ((u64) tp->rx_std_mapping & 0xffffffff));
5935         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5936              NIC_SRAM_RX_BUFFER_DESC);
5937
5938         /* Don't even try to program the JUMBO/MINI buffer descriptor
5939          * configs on 5705.
5940          */
5941         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5942                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5943                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5944         } else {
5945                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5946                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5947
5948                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5949                      BDINFO_FLAGS_DISABLED);
5950
5951                 /* Setup replenish threshold. */
5952                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5953
5954                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5955                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5956                              ((u64) tp->rx_jumbo_mapping >> 32));
5957                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5958                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5959                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5960                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5961                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5962                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5963                 } else {
5964                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5965                              BDINFO_FLAGS_DISABLED);
5966                 }
5967
5968         }
5969
5970         /* There is only one send ring on 5705/5750, no need to explicitly
5971          * disable the others.
5972          */
5973         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5974                 /* Clear out send RCB ring in SRAM. */
5975                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5976                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5977                                       BDINFO_FLAGS_DISABLED);
5978         }
5979
5980         tp->tx_prod = 0;
5981         tp->tx_cons = 0;
5982         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5983         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5984
5985         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5986                        tp->tx_desc_mapping,
5987                        (TG3_TX_RING_SIZE <<
5988                         BDINFO_FLAGS_MAXLEN_SHIFT),
5989                        NIC_SRAM_TX_BUFFER_DESC);
5990
5991         /* There is only one receive return ring on 5705/5750, no need
5992          * to explicitly disable the others.
5993          */
5994         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5995                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5996                      i += TG3_BDINFO_SIZE) {
5997                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5998                                       BDINFO_FLAGS_DISABLED);
5999                 }
6000         }
6001
6002         tp->rx_rcb_ptr = 0;
6003         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6004
6005         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6006                        tp->rx_rcb_mapping,
6007                        (TG3_RX_RCB_RING_SIZE(tp) <<
6008                         BDINFO_FLAGS_MAXLEN_SHIFT),
6009                        0);
6010
6011         tp->rx_std_ptr = tp->rx_pending;
6012         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6013                      tp->rx_std_ptr);
6014
6015         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6016                                                 tp->rx_jumbo_pending : 0;
6017         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6018                      tp->rx_jumbo_ptr);
6019
6020         /* Initialize MAC address and backoff seed. */
6021         __tg3_set_mac_addr(tp);
6022
6023         /* MTU + ethernet header + FCS + optional VLAN tag */
6024         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6025
6026         /* The slot time is changed by tg3_setup_phy if we
6027          * run at gigabit with half duplex.
6028          */
6029         tw32(MAC_TX_LENGTHS,
6030              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6031              (6 << TX_LENGTHS_IPG_SHIFT) |
6032              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6033
6034         /* Receive rules. */
6035         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6036         tw32(RCVLPC_CONFIG, 0x0181);
6037
6038         /* Calculate RDMAC_MODE setting early, we need it to determine
6039          * the RCVLPC_STATE_ENABLE mask.
6040          */
6041         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6042                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6043                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6044                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6045                       RDMAC_MODE_LNGREAD_ENAB);
6046         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6047                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
6048
6049         /* If statement applies to 5705 and 5750 PCI devices only */
6050         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6051              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6052             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6053                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6054                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6055                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6056                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6057                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6058                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6059                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6060                 }
6061         }
6062
6063         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6064                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6065
6066 #if TG3_TSO_SUPPORT != 0
6067         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6068                 rdmac_mode |= (1 << 27);
6069 #endif
6070
6071         /* Receive/send statistics. */
6072         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6073             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6074                 val = tr32(RCVLPC_STATS_ENABLE);
6075                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6076                 tw32(RCVLPC_STATS_ENABLE, val);
6077         } else {
6078                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6079         }
6080         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6081         tw32(SNDDATAI_STATSENAB, 0xffffff);
6082         tw32(SNDDATAI_STATSCTRL,
6083              (SNDDATAI_SCTRL_ENABLE |
6084               SNDDATAI_SCTRL_FASTUPD));
6085
6086         /* Setup host coalescing engine. */
6087         tw32(HOSTCC_MODE, 0);
6088         for (i = 0; i < 2000; i++) {
6089                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6090                         break;
6091                 udelay(10);
6092         }
6093
6094         __tg3_set_coalesce(tp, &tp->coal);
6095
6096         /* set status block DMA address */
6097         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6098              ((u64) tp->status_mapping >> 32));
6099         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6100              ((u64) tp->status_mapping & 0xffffffff));
6101
6102         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6103                 /* Status/statistics block address.  See tg3_timer,
6104                  * the tg3_periodic_fetch_stats call there, and
6105                  * tg3_get_stats to see how this works for 5705/5750 chips.
6106                  */
6107                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6108                      ((u64) tp->stats_mapping >> 32));
6109                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6110                      ((u64) tp->stats_mapping & 0xffffffff));
6111                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6112                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6113         }
6114
6115         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6116
6117         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6118         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6119         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6120                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6121
6122         /* Clear statistics/status block in chip, and status block in ram. */
6123         for (i = NIC_SRAM_STATS_BLK;
6124              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6125              i += sizeof(u32)) {
6126                 tg3_write_mem(tp, i, 0);
6127                 udelay(40);
6128         }
6129         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6130
6131         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6132                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6133                 /* reset to prevent losing 1st rx packet intermittently */
6134                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6135                 udelay(10);
6136         }
6137
6138         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6139                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6140         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6141         udelay(40);
6142
6143         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6144          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
6145          * register to preserve the GPIO settings for LOMs. The GPIOs,
6146          * whether used as inputs or outputs, are set by boot code after
6147          * reset.
6148          */
6149         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
6150                 u32 gpio_mask;
6151
6152                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
6153                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
6154
6155                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6156                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6157                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6158
6159                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6160                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6161
6162                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6163
6164                 /* GPIO1 must be driven high for eeprom write protect */
6165                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6166                                        GRC_LCLCTRL_GPIO_OUTPUT1);
6167         }
6168         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6169         udelay(100);
6170
6171         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6172         tp->last_tag = 0;
6173
6174         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6175                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6176                 udelay(40);
6177         }
6178
6179         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6180                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6181                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6182                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6183                WDMAC_MODE_LNGREAD_ENAB);
6184
6185         /* If statement applies to 5705 and 5750 PCI devices only */
6186         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6187              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6188             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6189                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6190                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6191                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6192                         /* nothing */
6193                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6194                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6195                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6196                         val |= WDMAC_MODE_RX_ACCEL;
6197                 }
6198         }
6199
6200         /* Enable host coalescing bug fix */
6201         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6202             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
6203                 val |= (1 << 29);
6204
6205         tw32_f(WDMAC_MODE, val);
6206         udelay(40);
6207
6208         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6209                 val = tr32(TG3PCI_X_CAPS);
6210                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6211                         val &= ~PCIX_CAPS_BURST_MASK;
6212                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6213                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6214                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6215                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6216                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6217                                 val |= (tp->split_mode_max_reqs <<
6218                                         PCIX_CAPS_SPLIT_SHIFT);
6219                 }
6220                 tw32(TG3PCI_X_CAPS, val);
6221         }
6222
6223         tw32_f(RDMAC_MODE, rdmac_mode);
6224         udelay(40);
6225
6226         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6227         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6228                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6229         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6230         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6231         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6232         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6233         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6234 #if TG3_TSO_SUPPORT != 0
6235         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6236                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6237 #endif
6238         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6239         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6240
6241         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6242                 err = tg3_load_5701_a0_firmware_fix(tp);
6243                 if (err)
6244                         return err;
6245         }
6246
6247 #if TG3_TSO_SUPPORT != 0
6248         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6249                 err = tg3_load_tso_firmware(tp);
6250                 if (err)
6251                         return err;
6252         }
6253 #endif
6254
6255         tp->tx_mode = TX_MODE_ENABLE;
6256         tw32_f(MAC_TX_MODE, tp->tx_mode);
6257         udelay(100);
6258
6259         tp->rx_mode = RX_MODE_ENABLE;
6260         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6261                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6262
6263         tw32_f(MAC_RX_MODE, tp->rx_mode);
6264         udelay(10);
6265
6266         if (tp->link_config.phy_is_low_power) {
6267                 tp->link_config.phy_is_low_power = 0;
6268                 tp->link_config.speed = tp->link_config.orig_speed;
6269                 tp->link_config.duplex = tp->link_config.orig_duplex;
6270                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6271         }
6272
6273         tp->mi_mode = MAC_MI_MODE_BASE;
6274         tw32_f(MAC_MI_MODE, tp->mi_mode);
6275         udelay(80);
6276
6277         tw32(MAC_LED_CTRL, tp->led_ctrl);
6278
6279         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6280         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6281                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6282                 udelay(10);
6283         }
6284         tw32_f(MAC_RX_MODE, tp->rx_mode);
6285         udelay(10);
6286
6287         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6288                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6289                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6290                         /* Set drive transmission level to 1.2V  */
6291                         /* only if the signal pre-emphasis bit is not set  */
6292                         val = tr32(MAC_SERDES_CFG);
6293                         val &= 0xfffff000;
6294                         val |= 0x880;
6295                         tw32(MAC_SERDES_CFG, val);
6296                 }
6297                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6298                         tw32(MAC_SERDES_CFG, 0x616000);
6299         }
6300
6301         /* Prevent chip from dropping frames when flow control
6302          * is enabled.
6303          */
6304         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6305
6306         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6307             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6308                 /* Use hardware link auto-negotiation */
6309                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6310         }
6311
6312         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6313             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6314                 u32 tmp;
6315
6316                 tmp = tr32(SERDES_RX_CTRL);
6317                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6318                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6319                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6320                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6321         }
6322
6323         err = tg3_setup_phy(tp, 1);
6324         if (err)
6325                 return err;
6326
6327         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6328                 u32 tmp;
6329
6330                 /* Clear CRC stats. */
6331                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6332                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
6333                         tg3_readphy(tp, 0x14, &tmp);
6334                 }
6335         }
6336
6337         __tg3_set_rx_mode(tp->dev);
6338
6339         /* Initialize receive rules. */
6340         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6341         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6342         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6343         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6344
6345         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6346             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6347                 limit = 8;
6348         else
6349                 limit = 16;
6350         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6351                 limit -= 4;
6352         switch (limit) {
6353         case 16:
6354                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6355         case 15:
6356                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6357         case 14:
6358                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6359         case 13:
6360                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6361         case 12:
6362                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6363         case 11:
6364                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6365         case 10:
6366                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6367         case 9:
6368                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6369         case 8:
6370                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6371         case 7:
6372                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6373         case 6:
6374                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6375         case 5:
6376                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6377         case 4:
6378                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6379         case 3:
6380                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6381         case 2:
6382         case 1:
6383
6384         default:
6385                 break;
6386         };
6387
6388         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6389
6390         return 0;
6391 }
6392
6393 /* Called at device open time to get the chip ready for
6394  * packet processing.  Invoked with tp->lock held.
6395  */
6396 static int tg3_init_hw(struct tg3 *tp)
6397 {
6398         int err;
6399
6400         /* Force the chip into D0. */
6401         err = tg3_set_power_state(tp, PCI_D0);
6402         if (err)
6403                 goto out;
6404
6405         tg3_switch_clocks(tp);
6406
6407         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6408
6409         err = tg3_reset_hw(tp);
6410
6411 out:
6412         return err;
6413 }
6414
6415 #define TG3_STAT_ADD32(PSTAT, REG) \
6416 do {    u32 __val = tr32(REG); \
6417         (PSTAT)->low += __val; \
6418         if ((PSTAT)->low < __val) \
6419                 (PSTAT)->high += 1; \
6420 } while (0)
6421
6422 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6423 {
6424         struct tg3_hw_stats *sp = tp->hw_stats;
6425
6426         if (!netif_carrier_ok(tp->dev))
6427                 return;
6428
6429         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6430         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6431         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6432         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6433         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6434         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6435         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6436         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6437         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6438         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6439         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6440         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6441         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6442
6443         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6444         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6445         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6446         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6447         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6448         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6449         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6450         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6451         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6452         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6453         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6454         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6455         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6456         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6457 }
6458
6459 static void tg3_timer(unsigned long __opaque)
6460 {
6461         struct tg3 *tp = (struct tg3 *) __opaque;
6462
6463         if (tp->irq_sync)
6464                 goto restart_timer;
6465
6466         spin_lock(&tp->lock);
6467
6468         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6469                 /* All of this garbage is because when using non-tagged
6470                  * IRQ status the mailbox/status_block protocol the chip
6471                  * uses with the cpu is race prone.
6472                  */
6473                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6474                         tw32(GRC_LOCAL_CTRL,
6475                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6476                 } else {
6477                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6478                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6479                 }
6480
6481                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6482                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6483                         spin_unlock(&tp->lock);
6484                         schedule_work(&tp->reset_task);
6485                         return;
6486                 }
6487         }
6488
6489         /* This part only runs once per second. */
6490         if (!--tp->timer_counter) {
6491                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6492                         tg3_periodic_fetch_stats(tp);
6493
6494                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6495                         u32 mac_stat;
6496                         int phy_event;
6497
6498                         mac_stat = tr32(MAC_STATUS);
6499
6500                         phy_event = 0;
6501                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6502                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6503                                         phy_event = 1;
6504                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6505                                 phy_event = 1;
6506
6507                         if (phy_event)
6508                                 tg3_setup_phy(tp, 0);
6509                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6510                         u32 mac_stat = tr32(MAC_STATUS);
6511                         int need_setup = 0;
6512
6513                         if (netif_carrier_ok(tp->dev) &&
6514                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6515                                 need_setup = 1;
6516                         }
6517                         if (! netif_carrier_ok(tp->dev) &&
6518                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6519                                          MAC_STATUS_SIGNAL_DET))) {
6520                                 need_setup = 1;
6521                         }
6522                         if (need_setup) {
6523                                 tw32_f(MAC_MODE,
6524                                      (tp->mac_mode &
6525                                       ~MAC_MODE_PORT_MODE_MASK));
6526                                 udelay(40);
6527                                 tw32_f(MAC_MODE, tp->mac_mode);
6528                                 udelay(40);
6529                                 tg3_setup_phy(tp, 0);
6530                         }
6531                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6532                         tg3_serdes_parallel_detect(tp);
6533
6534                 tp->timer_counter = tp->timer_multiplier;
6535         }
6536
6537         /* Heartbeat is only sent once every 2 seconds.  */
6538         if (!--tp->asf_counter) {
6539                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6540                         u32 val;
6541
6542                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_MBOX,
6543                                            FWCMD_NICDRV_ALIVE2);
6544                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6545                         /* 5 seconds timeout */
6546                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6547                         val = tr32(GRC_RX_CPU_EVENT);
6548                         val |= (1 << 14);
6549                         tw32(GRC_RX_CPU_EVENT, val);
6550                 }
6551                 tp->asf_counter = tp->asf_multiplier;
6552         }
6553
6554         spin_unlock(&tp->lock);
6555
6556 restart_timer:
6557         tp->timer.expires = jiffies + tp->timer_offset;
6558         add_timer(&tp->timer);
6559 }
6560
6561 static int tg3_request_irq(struct tg3 *tp)
6562 {
6563         irqreturn_t (*fn)(int, void *, struct pt_regs *);
6564         unsigned long flags;
6565         struct net_device *dev = tp->dev;
6566
6567         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6568                 fn = tg3_msi;
6569                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6570                         fn = tg3_msi_1shot;
6571                 flags = SA_SAMPLE_RANDOM;
6572         } else {
6573                 fn = tg3_interrupt;
6574                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6575                         fn = tg3_interrupt_tagged;
6576                 flags = SA_SHIRQ | SA_SAMPLE_RANDOM;
6577         }
6578         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6579 }
6580
6581 static int tg3_test_interrupt(struct tg3 *tp)
6582 {
6583         struct net_device *dev = tp->dev;
6584         int err, i;
6585         u32 int_mbox = 0;
6586
6587         if (!netif_running(dev))
6588                 return -ENODEV;
6589
6590         tg3_disable_ints(tp);
6591
6592         free_irq(tp->pdev->irq, dev);
6593
6594         err = request_irq(tp->pdev->irq, tg3_test_isr,
6595                           SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6596         if (err)
6597                 return err;
6598
6599         tp->hw_status->status &= ~SD_STATUS_UPDATED;
6600         tg3_enable_ints(tp);
6601
6602         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6603                HOSTCC_MODE_NOW);
6604
6605         for (i = 0; i < 5; i++) {
6606                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6607                                         TG3_64BIT_REG_LOW);
6608                 if (int_mbox != 0)
6609                         break;
6610                 msleep(10);
6611         }
6612
6613         tg3_disable_ints(tp);
6614
6615         free_irq(tp->pdev->irq, dev);
6616         
6617         err = tg3_request_irq(tp);
6618
6619         if (err)
6620                 return err;
6621
6622         if (int_mbox != 0)
6623                 return 0;
6624
6625         return -EIO;
6626 }
6627
6628 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6629  * successfully restored
6630  */
6631 static int tg3_test_msi(struct tg3 *tp)
6632 {
6633         struct net_device *dev = tp->dev;
6634         int err;
6635         u16 pci_cmd;
6636
6637         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6638                 return 0;
6639
6640         /* Turn off SERR reporting in case MSI terminates with Master
6641          * Abort.
6642          */
6643         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6644         pci_write_config_word(tp->pdev, PCI_COMMAND,
6645                               pci_cmd & ~PCI_COMMAND_SERR);
6646
6647         err = tg3_test_interrupt(tp);
6648
6649         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6650
6651         if (!err)
6652                 return 0;
6653
6654         /* other failures */
6655         if (err != -EIO)
6656                 return err;
6657
6658         /* MSI test failed, go back to INTx mode */
6659         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6660                "switching to INTx mode. Please report this failure to "
6661                "the PCI maintainer and include system chipset information.\n",
6662                        tp->dev->name);
6663
6664         free_irq(tp->pdev->irq, dev);
6665         pci_disable_msi(tp->pdev);
6666
6667         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6668
6669         err = tg3_request_irq(tp);
6670         if (err)
6671                 return err;
6672
6673         /* Need to reset the chip because the MSI cycle may have terminated
6674          * with Master Abort.
6675          */
6676         tg3_full_lock(tp, 1);
6677
6678         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6679         err = tg3_init_hw(tp);
6680
6681         tg3_full_unlock(tp);
6682
6683         if (err)
6684                 free_irq(tp->pdev->irq, dev);
6685
6686         return err;
6687 }
6688
6689 static int tg3_open(struct net_device *dev)
6690 {
6691         struct tg3 *tp = netdev_priv(dev);
6692         int err;
6693
6694         tg3_full_lock(tp, 0);
6695
6696         err = tg3_set_power_state(tp, PCI_D0);
6697         if (err)
6698                 return err;
6699
6700         tg3_disable_ints(tp);
6701         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6702
6703         tg3_full_unlock(tp);
6704
6705         /* The placement of this call is tied
6706          * to the setup and use of Host TX descriptors.
6707          */
6708         err = tg3_alloc_consistent(tp);
6709         if (err)
6710                 return err;
6711
6712         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6713             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6714             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
6715             !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
6716               (tp->pdev_peer == tp->pdev))) {
6717                 /* All MSI supporting chips should support tagged
6718                  * status.  Assert that this is the case.
6719                  */
6720                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6721                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6722                                "Not using MSI.\n", tp->dev->name);
6723                 } else if (pci_enable_msi(tp->pdev) == 0) {
6724                         u32 msi_mode;
6725
6726                         msi_mode = tr32(MSGINT_MODE);
6727                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6728                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6729                 }
6730         }
6731         err = tg3_request_irq(tp);
6732
6733         if (err) {
6734                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6735                         pci_disable_msi(tp->pdev);
6736                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6737                 }
6738                 tg3_free_consistent(tp);
6739                 return err;
6740         }
6741
6742         tg3_full_lock(tp, 0);
6743
6744         err = tg3_init_hw(tp);
6745         if (err) {
6746                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6747                 tg3_free_rings(tp);
6748         } else {
6749                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6750                         tp->timer_offset = HZ;
6751                 else
6752                         tp->timer_offset = HZ / 10;
6753
6754                 BUG_ON(tp->timer_offset > HZ);
6755                 tp->timer_counter = tp->timer_multiplier =
6756                         (HZ / tp->timer_offset);
6757                 tp->asf_counter = tp->asf_multiplier =
6758                         ((HZ / tp->timer_offset) * 2);
6759
6760                 init_timer(&tp->timer);
6761                 tp->timer.expires = jiffies + tp->timer_offset;
6762                 tp->timer.data = (unsigned long) tp;
6763                 tp->timer.function = tg3_timer;
6764         }
6765
6766         tg3_full_unlock(tp);
6767
6768         if (err) {
6769                 free_irq(tp->pdev->irq, dev);
6770                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6771                         pci_disable_msi(tp->pdev);
6772                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6773                 }
6774                 tg3_free_consistent(tp);
6775                 return err;
6776         }
6777
6778         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6779                 err = tg3_test_msi(tp);
6780
6781                 if (err) {
6782                         tg3_full_lock(tp, 0);
6783
6784                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6785                                 pci_disable_msi(tp->pdev);
6786                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6787                         }
6788                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6789                         tg3_free_rings(tp);
6790                         tg3_free_consistent(tp);
6791
6792                         tg3_full_unlock(tp);
6793
6794                         return err;
6795                 }
6796
6797                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6798                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
6799                                 u32 val = tr32(0x7c04);
6800
6801                                 tw32(0x7c04, val | (1 << 29));
6802                         }
6803                 }
6804         }
6805
6806         tg3_full_lock(tp, 0);
6807
6808         add_timer(&tp->timer);
6809         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6810         tg3_enable_ints(tp);
6811
6812         tg3_full_unlock(tp);
6813
6814         netif_start_queue(dev);
6815
6816         return 0;
6817 }
6818
6819 #if 0
6820 /*static*/ void tg3_dump_state(struct tg3 *tp)
6821 {
6822         u32 val32, val32_2, val32_3, val32_4, val32_5;
6823         u16 val16;
6824         int i;
6825
6826         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6827         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6828         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6829                val16, val32);
6830
6831         /* MAC block */
6832         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6833                tr32(MAC_MODE), tr32(MAC_STATUS));
6834         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6835                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6836         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6837                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6838         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6839                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6840
6841         /* Send data initiator control block */
6842         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6843                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6844         printk("       SNDDATAI_STATSCTRL[%08x]\n",
6845                tr32(SNDDATAI_STATSCTRL));
6846
6847         /* Send data completion control block */
6848         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6849
6850         /* Send BD ring selector block */
6851         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6852                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6853
6854         /* Send BD initiator control block */
6855         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6856                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6857
6858         /* Send BD completion control block */
6859         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6860
6861         /* Receive list placement control block */
6862         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6863                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6864         printk("       RCVLPC_STATSCTRL[%08x]\n",
6865                tr32(RCVLPC_STATSCTRL));
6866
6867         /* Receive data and receive BD initiator control block */
6868         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6869                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6870
6871         /* Receive data completion control block */
6872         printk("DEBUG: RCVDCC_MODE[%08x]\n",
6873                tr32(RCVDCC_MODE));
6874
6875         /* Receive BD initiator control block */
6876         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6877                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6878
6879         /* Receive BD completion control block */
6880         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6881                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6882
6883         /* Receive list selector control block */
6884         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6885                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6886
6887         /* Mbuf cluster free block */
6888         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6889                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6890
6891         /* Host coalescing control block */
6892         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6893                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6894         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6895                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6896                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6897         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6898                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6899                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6900         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6901                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6902         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6903                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6904
6905         /* Memory arbiter control block */
6906         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6907                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6908
6909         /* Buffer manager control block */
6910         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6911                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6912         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6913                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6914         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6915                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6916                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6917                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6918
6919         /* Read DMA control block */
6920         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6921                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6922
6923         /* Write DMA control block */
6924         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6925                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6926
6927         /* DMA completion block */
6928         printk("DEBUG: DMAC_MODE[%08x]\n",
6929                tr32(DMAC_MODE));
6930
6931         /* GRC block */
6932         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6933                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6934         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6935                tr32(GRC_LOCAL_CTRL));
6936
6937         /* TG3_BDINFOs */
6938         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6939                tr32(RCVDBDI_JUMBO_BD + 0x0),
6940                tr32(RCVDBDI_JUMBO_BD + 0x4),
6941                tr32(RCVDBDI_JUMBO_BD + 0x8),
6942                tr32(RCVDBDI_JUMBO_BD + 0xc));
6943         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6944                tr32(RCVDBDI_STD_BD + 0x0),
6945                tr32(RCVDBDI_STD_BD + 0x4),
6946                tr32(RCVDBDI_STD_BD + 0x8),
6947                tr32(RCVDBDI_STD_BD + 0xc));
6948         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6949                tr32(RCVDBDI_MINI_BD + 0x0),
6950                tr32(RCVDBDI_MINI_BD + 0x4),
6951                tr32(RCVDBDI_MINI_BD + 0x8),
6952                tr32(RCVDBDI_MINI_BD + 0xc));
6953
6954         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6955         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6956         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6957         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6958         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6959                val32, val32_2, val32_3, val32_4);
6960
6961         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6962         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6963         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6964         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6965         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6966                val32, val32_2, val32_3, val32_4);
6967
6968         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6969         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6970         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6971         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6972         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6973         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6974                val32, val32_2, val32_3, val32_4, val32_5);
6975
6976         /* SW status block */
6977         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6978                tp->hw_status->status,
6979                tp->hw_status->status_tag,
6980                tp->hw_status->rx_jumbo_consumer,
6981                tp->hw_status->rx_consumer,
6982                tp->hw_status->rx_mini_consumer,
6983                tp->hw_status->idx[0].rx_producer,
6984                tp->hw_status->idx[0].tx_consumer);
6985
6986         /* SW statistics block */
6987         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6988                ((u32 *)tp->hw_stats)[0],
6989                ((u32 *)tp->hw_stats)[1],
6990                ((u32 *)tp->hw_stats)[2],
6991                ((u32 *)tp->hw_stats)[3]);
6992
6993         /* Mailboxes */
6994         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
6995                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6996                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
6997                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
6998                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
6999
7000         /* NIC side send descriptors. */
7001         for (i = 0; i < 6; i++) {
7002                 unsigned long txd;
7003
7004                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7005                         + (i * sizeof(struct tg3_tx_buffer_desc));
7006                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7007                        i,
7008                        readl(txd + 0x0), readl(txd + 0x4),
7009                        readl(txd + 0x8), readl(txd + 0xc));
7010         }
7011
7012         /* NIC side RX descriptors. */
7013         for (i = 0; i < 6; i++) {
7014                 unsigned long rxd;
7015
7016                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7017                         + (i * sizeof(struct tg3_rx_buffer_desc));
7018                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7019                        i,
7020                        readl(rxd + 0x0), readl(rxd + 0x4),
7021                        readl(rxd + 0x8), readl(rxd + 0xc));
7022                 rxd += (4 * sizeof(u32));
7023                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7024                        i,
7025                        readl(rxd + 0x0), readl(rxd + 0x4),
7026                        readl(rxd + 0x8), readl(rxd + 0xc));
7027         }
7028
7029         for (i = 0; i < 6; i++) {
7030                 unsigned long rxd;
7031
7032                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7033                         + (i * sizeof(struct tg3_rx_buffer_desc));
7034                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7035                        i,
7036                        readl(rxd + 0x0), readl(rxd + 0x4),
7037                        readl(rxd + 0x8), readl(rxd + 0xc));
7038                 rxd += (4 * sizeof(u32));
7039                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7040                        i,
7041                        readl(rxd + 0x0), readl(rxd + 0x4),
7042                        readl(rxd + 0x8), readl(rxd + 0xc));
7043         }
7044 }
7045 #endif
7046
7047 static struct net_device_stats *tg3_get_stats(struct net_device *);
7048 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7049
7050 static int tg3_close(struct net_device *dev)
7051 {
7052         struct tg3 *tp = netdev_priv(dev);
7053
7054         /* Calling flush_scheduled_work() may deadlock because
7055          * linkwatch_event() may be on the workqueue and it will try to get
7056          * the rtnl_lock which we are holding.
7057          */
7058         while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
7059                 msleep(1);
7060
7061         netif_stop_queue(dev);
7062
7063         del_timer_sync(&tp->timer);
7064
7065         tg3_full_lock(tp, 1);
7066 #if 0
7067         tg3_dump_state(tp);
7068 #endif
7069
7070         tg3_disable_ints(tp);
7071
7072         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7073         tg3_free_rings(tp);
7074         tp->tg3_flags &=
7075                 ~(TG3_FLAG_INIT_COMPLETE |
7076                   TG3_FLAG_GOT_SERDES_FLOWCTL);
7077
7078         tg3_full_unlock(tp);
7079
7080         free_irq(tp->pdev->irq, dev);
7081         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7082                 pci_disable_msi(tp->pdev);
7083                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7084         }
7085
7086         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7087                sizeof(tp->net_stats_prev));
7088         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7089                sizeof(tp->estats_prev));
7090
7091         tg3_free_consistent(tp);
7092
7093         tg3_set_power_state(tp, PCI_D3hot);
7094
7095         netif_carrier_off(tp->dev);
7096
7097         return 0;
7098 }
7099
7100 static inline unsigned long get_stat64(tg3_stat64_t *val)
7101 {
7102         unsigned long ret;
7103
7104 #if (BITS_PER_LONG == 32)
7105         ret = val->low;
7106 #else
7107         ret = ((u64)val->high << 32) | ((u64)val->low);
7108 #endif
7109         return ret;
7110 }
7111
7112 static unsigned long calc_crc_errors(struct tg3 *tp)
7113 {
7114         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7115
7116         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7117             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7118              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7119                 u32 val;
7120
7121                 spin_lock_bh(&tp->lock);
7122                 if (!tg3_readphy(tp, 0x1e, &val)) {
7123                         tg3_writephy(tp, 0x1e, val | 0x8000);
7124                         tg3_readphy(tp, 0x14, &val);
7125                 } else
7126                         val = 0;
7127                 spin_unlock_bh(&tp->lock);
7128
7129                 tp->phy_crc_errors += val;
7130
7131                 return tp->phy_crc_errors;
7132         }
7133
7134         return get_stat64(&hw_stats->rx_fcs_errors);
7135 }
7136
7137 #define ESTAT_ADD(member) \
7138         estats->member =        old_estats->member + \
7139                                 get_stat64(&hw_stats->member)
7140
7141 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7142 {
7143         struct tg3_ethtool_stats *estats = &tp->estats;
7144         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7145         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7146
7147         if (!hw_stats)
7148                 return old_estats;
7149
7150         ESTAT_ADD(rx_octets);
7151         ESTAT_ADD(rx_fragments);
7152         ESTAT_ADD(rx_ucast_packets);
7153         ESTAT_ADD(rx_mcast_packets);
7154         ESTAT_ADD(rx_bcast_packets);
7155         ESTAT_ADD(rx_fcs_errors);
7156         ESTAT_ADD(rx_align_errors);
7157         ESTAT_ADD(rx_xon_pause_rcvd);
7158         ESTAT_ADD(rx_xoff_pause_rcvd);
7159         ESTAT_ADD(rx_mac_ctrl_rcvd);
7160         ESTAT_ADD(rx_xoff_entered);
7161         ESTAT_ADD(rx_frame_too_long_errors);
7162         ESTAT_ADD(rx_jabbers);
7163         ESTAT_ADD(rx_undersize_packets);
7164         ESTAT_ADD(rx_in_length_errors);
7165         ESTAT_ADD(rx_out_length_errors);
7166         ESTAT_ADD(rx_64_or_less_octet_packets);
7167         ESTAT_ADD(rx_65_to_127_octet_packets);
7168         ESTAT_ADD(rx_128_to_255_octet_packets);
7169         ESTAT_ADD(rx_256_to_511_octet_packets);
7170         ESTAT_ADD(rx_512_to_1023_octet_packets);
7171         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7172         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7173         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7174         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7175         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7176
7177         ESTAT_ADD(tx_octets);
7178         ESTAT_ADD(tx_collisions);
7179         ESTAT_ADD(tx_xon_sent);
7180         ESTAT_ADD(tx_xoff_sent);
7181         ESTAT_ADD(tx_flow_control);
7182         ESTAT_ADD(tx_mac_errors);
7183         ESTAT_ADD(tx_single_collisions);
7184         ESTAT_ADD(tx_mult_collisions);
7185         ESTAT_ADD(tx_deferred);
7186         ESTAT_ADD(tx_excessive_collisions);
7187         ESTAT_ADD(tx_late_collisions);
7188         ESTAT_ADD(tx_collide_2times);
7189         ESTAT_ADD(tx_collide_3times);
7190         ESTAT_ADD(tx_collide_4times);
7191         ESTAT_ADD(tx_collide_5times);
7192         ESTAT_ADD(tx_collide_6times);
7193         ESTAT_ADD(tx_collide_7times);
7194         ESTAT_ADD(tx_collide_8times);
7195         ESTAT_ADD(tx_collide_9times);
7196         ESTAT_ADD(tx_collide_10times);
7197         ESTAT_ADD(tx_collide_11times);
7198         ESTAT_ADD(tx_collide_12times);
7199         ESTAT_ADD(tx_collide_13times);
7200         ESTAT_ADD(tx_collide_14times);
7201         ESTAT_ADD(tx_collide_15times);
7202         ESTAT_ADD(tx_ucast_packets);
7203         ESTAT_ADD(tx_mcast_packets);
7204         ESTAT_ADD(tx_bcast_packets);
7205         ESTAT_ADD(tx_carrier_sense_errors);
7206         ESTAT_ADD(tx_discards);
7207         ESTAT_ADD(tx_errors);
7208
7209         ESTAT_ADD(dma_writeq_full);
7210         ESTAT_ADD(dma_write_prioq_full);
7211         ESTAT_ADD(rxbds_empty);
7212         ESTAT_ADD(rx_discards);
7213         ESTAT_ADD(rx_errors);
7214         ESTAT_ADD(rx_threshold_hit);
7215
7216         ESTAT_ADD(dma_readq_full);
7217         ESTAT_ADD(dma_read_prioq_full);
7218         ESTAT_ADD(tx_comp_queue_full);
7219
7220         ESTAT_ADD(ring_set_send_prod_index);
7221         ESTAT_ADD(ring_status_update);
7222         ESTAT_ADD(nic_irqs);
7223         ESTAT_ADD(nic_avoided_irqs);
7224         ESTAT_ADD(nic_tx_threshold_hit);
7225
7226         return estats;
7227 }
7228
7229 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7230 {
7231         struct tg3 *tp = netdev_priv(dev);
7232         struct net_device_stats *stats = &tp->net_stats;
7233         struct net_device_stats *old_stats = &tp->net_stats_prev;
7234         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7235
7236         if (!hw_stats)
7237                 return old_stats;
7238
7239         stats->rx_packets = old_stats->rx_packets +
7240                 get_stat64(&hw_stats->rx_ucast_packets) +
7241                 get_stat64(&hw_stats->rx_mcast_packets) +
7242                 get_stat64(&hw_stats->rx_bcast_packets);
7243                 
7244         stats->tx_packets = old_stats->tx_packets +
7245                 get_stat64(&hw_stats->tx_ucast_packets) +
7246                 get_stat64(&hw_stats->tx_mcast_packets) +
7247                 get_stat64(&hw_stats->tx_bcast_packets);
7248
7249         stats->rx_bytes = old_stats->rx_bytes +
7250                 get_stat64(&hw_stats->rx_octets);
7251         stats->tx_bytes = old_stats->tx_bytes +
7252                 get_stat64(&hw_stats->tx_octets);
7253
7254         stats->rx_errors = old_stats->rx_errors +
7255                 get_stat64(&hw_stats->rx_errors);
7256         stats->tx_errors = old_stats->tx_errors +
7257                 get_stat64(&hw_stats->tx_errors) +
7258                 get_stat64(&hw_stats->tx_mac_errors) +
7259                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7260                 get_stat64(&hw_stats->tx_discards);
7261
7262         stats->multicast = old_stats->multicast +
7263                 get_stat64(&hw_stats->rx_mcast_packets);
7264         stats->collisions = old_stats->collisions +
7265                 get_stat64(&hw_stats->tx_collisions);
7266
7267         stats->rx_length_errors = old_stats->rx_length_errors +
7268                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7269                 get_stat64(&hw_stats->rx_undersize_packets);
7270
7271         stats->rx_over_errors = old_stats->rx_over_errors +
7272                 get_stat64(&hw_stats->rxbds_empty);
7273         stats->rx_frame_errors = old_stats->rx_frame_errors +
7274                 get_stat64(&hw_stats->rx_align_errors);
7275         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7276                 get_stat64(&hw_stats->tx_discards);
7277         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7278                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7279
7280         stats->rx_crc_errors = old_stats->rx_crc_errors +
7281                 calc_crc_errors(tp);
7282
7283         stats->rx_missed_errors = old_stats->rx_missed_errors +
7284                 get_stat64(&hw_stats->rx_discards);
7285
7286         return stats;
7287 }
7288
7289 static inline u32 calc_crc(unsigned char *buf, int len)
7290 {
7291         u32 reg;
7292         u32 tmp;
7293         int j, k;
7294
7295         reg = 0xffffffff;
7296
7297         for (j = 0; j < len; j++) {
7298                 reg ^= buf[j];
7299
7300                 for (k = 0; k < 8; k++) {
7301                         tmp = reg & 0x01;
7302
7303                         reg >>= 1;
7304
7305                         if (tmp) {
7306                                 reg ^= 0xedb88320;
7307                         }
7308                 }
7309         }
7310
7311         return ~reg;
7312 }
7313
7314 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7315 {
7316         /* accept or reject all multicast frames */
7317         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7318         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7319         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7320         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7321 }
7322
7323 static void __tg3_set_rx_mode(struct net_device *dev)
7324 {
7325         struct tg3 *tp = netdev_priv(dev);
7326         u32 rx_mode;
7327
7328         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7329                                   RX_MODE_KEEP_VLAN_TAG);
7330
7331         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7332          * flag clear.
7333          */
7334 #if TG3_VLAN_TAG_USED
7335         if (!tp->vlgrp &&
7336             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7337                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7338 #else
7339         /* By definition, VLAN is disabled always in this
7340          * case.
7341          */
7342         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7343                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7344 #endif
7345
7346         if (dev->flags & IFF_PROMISC) {
7347                 /* Promiscuous mode. */
7348                 rx_mode |= RX_MODE_PROMISC;
7349         } else if (dev->flags & IFF_ALLMULTI) {
7350                 /* Accept all multicast. */
7351                 tg3_set_multi (tp, 1);
7352         } else if (dev->mc_count < 1) {
7353                 /* Reject all multicast. */
7354                 tg3_set_multi (tp, 0);
7355         } else {
7356                 /* Accept one or more multicast(s). */
7357                 struct dev_mc_list *mclist;
7358                 unsigned int i;
7359                 u32 mc_filter[4] = { 0, };
7360                 u32 regidx;
7361                 u32 bit;
7362                 u32 crc;
7363
7364                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7365                      i++, mclist = mclist->next) {
7366
7367                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7368                         bit = ~crc & 0x7f;
7369                         regidx = (bit & 0x60) >> 5;
7370                         bit &= 0x1f;
7371                         mc_filter[regidx] |= (1 << bit);
7372                 }
7373
7374                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7375                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7376                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7377                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7378         }
7379
7380         if (rx_mode != tp->rx_mode) {
7381                 tp->rx_mode = rx_mode;
7382                 tw32_f(MAC_RX_MODE, rx_mode);
7383                 udelay(10);
7384         }
7385 }
7386
7387 static void tg3_set_rx_mode(struct net_device *dev)
7388 {
7389         struct tg3 *tp = netdev_priv(dev);
7390
7391         if (!netif_running(dev))
7392                 return;
7393
7394         tg3_full_lock(tp, 0);
7395         __tg3_set_rx_mode(dev);
7396         tg3_full_unlock(tp);
7397 }
7398
7399 #define TG3_REGDUMP_LEN         (32 * 1024)
7400
7401 static int tg3_get_regs_len(struct net_device *dev)
7402 {
7403         return TG3_REGDUMP_LEN;
7404 }
7405
7406 static void tg3_get_regs(struct net_device *dev,
7407                 struct ethtool_regs *regs, void *_p)
7408 {
7409         u32 *p = _p;
7410         struct tg3 *tp = netdev_priv(dev);
7411         u8 *orig_p = _p;
7412         int i;
7413
7414         regs->version = 0;
7415
7416         memset(p, 0, TG3_REGDUMP_LEN);
7417
7418         if (tp->link_config.phy_is_low_power)
7419                 return;
7420
7421         tg3_full_lock(tp, 0);
7422
7423 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7424 #define GET_REG32_LOOP(base,len)                \
7425 do {    p = (u32 *)(orig_p + (base));           \
7426         for (i = 0; i < len; i += 4)            \
7427                 __GET_REG32((base) + i);        \
7428 } while (0)
7429 #define GET_REG32_1(reg)                        \
7430 do {    p = (u32 *)(orig_p + (reg));            \
7431         __GET_REG32((reg));                     \
7432 } while (0)
7433
7434         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7435         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7436         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7437         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7438         GET_REG32_1(SNDDATAC_MODE);
7439         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7440         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7441         GET_REG32_1(SNDBDC_MODE);
7442         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7443         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7444         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7445         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7446         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7447         GET_REG32_1(RCVDCC_MODE);
7448         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7449         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7450         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7451         GET_REG32_1(MBFREE_MODE);
7452         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7453         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7454         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7455         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7456         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7457         GET_REG32_1(RX_CPU_MODE);
7458         GET_REG32_1(RX_CPU_STATE);
7459         GET_REG32_1(RX_CPU_PGMCTR);
7460         GET_REG32_1(RX_CPU_HWBKPT);
7461         GET_REG32_1(TX_CPU_MODE);
7462         GET_REG32_1(TX_CPU_STATE);
7463         GET_REG32_1(TX_CPU_PGMCTR);
7464         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7465         GET_REG32_LOOP(FTQ_RESET, 0x120);
7466         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7467         GET_REG32_1(DMAC_MODE);
7468         GET_REG32_LOOP(GRC_MODE, 0x4c);
7469         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7470                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7471
7472 #undef __GET_REG32
7473 #undef GET_REG32_LOOP
7474 #undef GET_REG32_1
7475
7476         tg3_full_unlock(tp);
7477 }
7478
7479 static int tg3_get_eeprom_len(struct net_device *dev)
7480 {
7481         struct tg3 *tp = netdev_priv(dev);
7482
7483         return tp->nvram_size;
7484 }
7485
7486 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7487 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
7488
7489 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7490 {
7491         struct tg3 *tp = netdev_priv(dev);
7492         int ret;
7493         u8  *pd;
7494         u32 i, offset, len, val, b_offset, b_count;
7495
7496         if (tp->link_config.phy_is_low_power)
7497                 return -EAGAIN;
7498
7499         offset = eeprom->offset;
7500         len = eeprom->len;
7501         eeprom->len = 0;
7502
7503         eeprom->magic = TG3_EEPROM_MAGIC;
7504
7505         if (offset & 3) {
7506                 /* adjustments to start on required 4 byte boundary */
7507                 b_offset = offset & 3;
7508                 b_count = 4 - b_offset;
7509                 if (b_count > len) {
7510                         /* i.e. offset=1 len=2 */
7511                         b_count = len;
7512                 }
7513                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7514                 if (ret)
7515                         return ret;
7516                 val = cpu_to_le32(val);
7517                 memcpy(data, ((char*)&val) + b_offset, b_count);
7518                 len -= b_count;
7519                 offset += b_count;
7520                 eeprom->len += b_count;
7521         }
7522
7523         /* read bytes upto the last 4 byte boundary */
7524         pd = &data[eeprom->len];
7525         for (i = 0; i < (len - (len & 3)); i += 4) {
7526                 ret = tg3_nvram_read(tp, offset + i, &val);
7527                 if (ret) {
7528                         eeprom->len += i;
7529                         return ret;
7530                 }
7531                 val = cpu_to_le32(val);
7532                 memcpy(pd + i, &val, 4);
7533         }
7534         eeprom->len += i;
7535
7536         if (len & 3) {
7537                 /* read last bytes not ending on 4 byte boundary */
7538                 pd = &data[eeprom->len];
7539                 b_count = len & 3;
7540                 b_offset = offset + len - b_count;
7541                 ret = tg3_nvram_read(tp, b_offset, &val);
7542                 if (ret)
7543                         return ret;
7544                 val = cpu_to_le32(val);
7545                 memcpy(pd, ((char*)&val), b_count);
7546                 eeprom->len += b_count;
7547         }
7548         return 0;
7549 }
7550
7551 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
7552
7553 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7554 {
7555         struct tg3 *tp = netdev_priv(dev);
7556         int ret;
7557         u32 offset, len, b_offset, odd_len, start, end;
7558         u8 *buf;
7559
7560         if (tp->link_config.phy_is_low_power)
7561                 return -EAGAIN;
7562
7563         if (eeprom->magic != TG3_EEPROM_MAGIC)
7564                 return -EINVAL;
7565
7566         offset = eeprom->offset;
7567         len = eeprom->len;
7568
7569         if ((b_offset = (offset & 3))) {
7570                 /* adjustments to start on required 4 byte boundary */
7571                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7572                 if (ret)
7573                         return ret;
7574                 start = cpu_to_le32(start);
7575                 len += b_offset;
7576                 offset &= ~3;
7577                 if (len < 4)
7578                         len = 4;
7579         }
7580
7581         odd_len = 0;
7582         if (len & 3) {
7583                 /* adjustments to end on required 4 byte boundary */
7584                 odd_len = 1;
7585                 len = (len + 3) & ~3;
7586                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7587                 if (ret)
7588                         return ret;
7589                 end = cpu_to_le32(end);
7590         }
7591
7592         buf = data;
7593         if (b_offset || odd_len) {
7594                 buf = kmalloc(len, GFP_KERNEL);
7595                 if (buf == 0)
7596                         return -ENOMEM;
7597                 if (b_offset)
7598                         memcpy(buf, &start, 4);
7599                 if (odd_len)
7600                         memcpy(buf+len-4, &end, 4);
7601                 memcpy(buf + b_offset, data, eeprom->len);
7602         }
7603
7604         ret = tg3_nvram_write_block(tp, offset, len, buf);
7605
7606         if (buf != data)
7607                 kfree(buf);
7608
7609         return ret;
7610 }
7611
7612 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7613 {
7614         struct tg3 *tp = netdev_priv(dev);
7615   
7616         cmd->supported = (SUPPORTED_Autoneg);
7617
7618         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7619                 cmd->supported |= (SUPPORTED_1000baseT_Half |
7620                                    SUPPORTED_1000baseT_Full);
7621
7622         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
7623                 cmd->supported |= (SUPPORTED_100baseT_Half |
7624                                   SUPPORTED_100baseT_Full |
7625                                   SUPPORTED_10baseT_Half |
7626                                   SUPPORTED_10baseT_Full |
7627                                   SUPPORTED_MII);
7628         else
7629                 cmd->supported |= SUPPORTED_FIBRE;
7630   
7631         cmd->advertising = tp->link_config.advertising;
7632         if (netif_running(dev)) {
7633                 cmd->speed = tp->link_config.active_speed;
7634                 cmd->duplex = tp->link_config.active_duplex;
7635         }
7636         cmd->port = 0;
7637         cmd->phy_address = PHY_ADDR;
7638         cmd->transceiver = 0;
7639         cmd->autoneg = tp->link_config.autoneg;
7640         cmd->maxtxpkt = 0;
7641         cmd->maxrxpkt = 0;
7642         return 0;
7643 }
7644   
7645 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7646 {
7647         struct tg3 *tp = netdev_priv(dev);
7648   
7649         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { 
7650                 /* These are the only valid advertisement bits allowed.  */
7651                 if (cmd->autoneg == AUTONEG_ENABLE &&
7652                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7653                                           ADVERTISED_1000baseT_Full |
7654                                           ADVERTISED_Autoneg |
7655                                           ADVERTISED_FIBRE)))
7656                         return -EINVAL;
7657                 /* Fiber can only do SPEED_1000.  */
7658                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7659                          (cmd->speed != SPEED_1000))
7660                         return -EINVAL;
7661         /* Copper cannot force SPEED_1000.  */
7662         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7663                    (cmd->speed == SPEED_1000))
7664                 return -EINVAL;
7665         else if ((cmd->speed == SPEED_1000) &&
7666                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7667                 return -EINVAL;
7668
7669         tg3_full_lock(tp, 0);
7670
7671         tp->link_config.autoneg = cmd->autoneg;
7672         if (cmd->autoneg == AUTONEG_ENABLE) {
7673                 tp->link_config.advertising = cmd->advertising;
7674                 tp->link_config.speed = SPEED_INVALID;
7675                 tp->link_config.duplex = DUPLEX_INVALID;
7676         } else {
7677                 tp->link_config.advertising = 0;
7678                 tp->link_config.speed = cmd->speed;
7679                 tp->link_config.duplex = cmd->duplex;
7680         }
7681   
7682         if (netif_running(dev))
7683                 tg3_setup_phy(tp, 1);
7684
7685         tg3_full_unlock(tp);
7686   
7687         return 0;
7688 }
7689   
7690 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7691 {
7692         struct tg3 *tp = netdev_priv(dev);
7693   
7694         strcpy(info->driver, DRV_MODULE_NAME);
7695         strcpy(info->version, DRV_MODULE_VERSION);
7696         strcpy(info->fw_version, tp->fw_ver);
7697         strcpy(info->bus_info, pci_name(tp->pdev));
7698 }
7699   
7700 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7701 {
7702         struct tg3 *tp = netdev_priv(dev);
7703   
7704         wol->supported = WAKE_MAGIC;
7705         wol->wolopts = 0;
7706         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7707                 wol->wolopts = WAKE_MAGIC;
7708         memset(&wol->sopass, 0, sizeof(wol->sopass));
7709 }
7710   
7711 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7712 {
7713         struct tg3 *tp = netdev_priv(dev);
7714   
7715         if (wol->wolopts & ~WAKE_MAGIC)
7716                 return -EINVAL;
7717         if ((wol->wolopts & WAKE_MAGIC) &&
7718             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7719             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7720                 return -EINVAL;
7721   
7722         spin_lock_bh(&tp->lock);
7723         if (wol->wolopts & WAKE_MAGIC)
7724                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7725         else
7726                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7727         spin_unlock_bh(&tp->lock);
7728   
7729         return 0;
7730 }
7731   
7732 static u32 tg3_get_msglevel(struct net_device *dev)
7733 {
7734         struct tg3 *tp = netdev_priv(dev);
7735         return tp->msg_enable;
7736 }
7737   
7738 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7739 {
7740         struct tg3 *tp = netdev_priv(dev);
7741         tp->msg_enable = value;
7742 }
7743   
7744 #if TG3_TSO_SUPPORT != 0
7745 static int tg3_set_tso(struct net_device *dev, u32 value)
7746 {
7747         struct tg3 *tp = netdev_priv(dev);
7748
7749         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7750                 if (value)
7751                         return -EINVAL;
7752                 return 0;
7753         }
7754         return ethtool_op_set_tso(dev, value);
7755 }
7756 #endif
7757   
7758 static int tg3_nway_reset(struct net_device *dev)
7759 {
7760         struct tg3 *tp = netdev_priv(dev);
7761         u32 bmcr;
7762         int r;
7763   
7764         if (!netif_running(dev))
7765                 return -EAGAIN;
7766
7767         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7768                 return -EINVAL;
7769
7770         spin_lock_bh(&tp->lock);
7771         r = -EINVAL;
7772         tg3_readphy(tp, MII_BMCR, &bmcr);
7773         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7774             ((bmcr & BMCR_ANENABLE) ||
7775              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7776                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7777                                            BMCR_ANENABLE);
7778                 r = 0;
7779         }
7780         spin_unlock_bh(&tp->lock);
7781   
7782         return r;
7783 }
7784   
7785 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7786 {
7787         struct tg3 *tp = netdev_priv(dev);
7788   
7789         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7790         ering->rx_mini_max_pending = 0;
7791         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7792                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7793         else
7794                 ering->rx_jumbo_max_pending = 0;
7795
7796         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
7797
7798         ering->rx_pending = tp->rx_pending;
7799         ering->rx_mini_pending = 0;
7800         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7801                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7802         else
7803                 ering->rx_jumbo_pending = 0;
7804
7805         ering->tx_pending = tp->tx_pending;
7806 }
7807   
7808 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7809 {
7810         struct tg3 *tp = netdev_priv(dev);
7811         int irq_sync = 0;
7812   
7813         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7814             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7815             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7816                 return -EINVAL;
7817   
7818         if (netif_running(dev)) {
7819                 tg3_netif_stop(tp);
7820                 irq_sync = 1;
7821         }
7822
7823         tg3_full_lock(tp, irq_sync);
7824   
7825         tp->rx_pending = ering->rx_pending;
7826
7827         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7828             tp->rx_pending > 63)
7829                 tp->rx_pending = 63;
7830         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7831         tp->tx_pending = ering->tx_pending;
7832
7833         if (netif_running(dev)) {
7834                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7835                 tg3_init_hw(tp);
7836                 tg3_netif_start(tp);
7837         }
7838
7839         tg3_full_unlock(tp);
7840   
7841         return 0;
7842 }
7843   
7844 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7845 {
7846         struct tg3 *tp = netdev_priv(dev);
7847   
7848         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7849         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7850         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7851 }
7852   
7853 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7854 {
7855         struct tg3 *tp = netdev_priv(dev);
7856         int irq_sync = 0;
7857   
7858         if (netif_running(dev)) {
7859                 tg3_netif_stop(tp);
7860                 irq_sync = 1;
7861         }
7862
7863         tg3_full_lock(tp, irq_sync);
7864
7865         if (epause->autoneg)
7866                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7867         else
7868                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7869         if (epause->rx_pause)
7870                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7871         else
7872                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7873         if (epause->tx_pause)
7874                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7875         else
7876                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7877
7878         if (netif_running(dev)) {
7879                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7880                 tg3_init_hw(tp);
7881                 tg3_netif_start(tp);
7882         }
7883
7884         tg3_full_unlock(tp);
7885   
7886         return 0;
7887 }
7888   
7889 static u32 tg3_get_rx_csum(struct net_device *dev)
7890 {
7891         struct tg3 *tp = netdev_priv(dev);
7892         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7893 }
7894   
7895 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7896 {
7897         struct tg3 *tp = netdev_priv(dev);
7898   
7899         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7900                 if (data != 0)
7901                         return -EINVAL;
7902                 return 0;
7903         }
7904   
7905         spin_lock_bh(&tp->lock);
7906         if (data)
7907                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7908         else
7909                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
7910         spin_unlock_bh(&tp->lock);
7911   
7912         return 0;
7913 }
7914   
7915 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7916 {
7917         struct tg3 *tp = netdev_priv(dev);
7918   
7919         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7920                 if (data != 0)
7921                         return -EINVAL;
7922                 return 0;
7923         }
7924   
7925         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7926             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
7927                 ethtool_op_set_tx_hw_csum(dev, data);
7928         else
7929                 ethtool_op_set_tx_csum(dev, data);
7930
7931         return 0;
7932 }
7933
7934 static int tg3_get_stats_count (struct net_device *dev)
7935 {
7936         return TG3_NUM_STATS;
7937 }
7938
7939 static int tg3_get_test_count (struct net_device *dev)
7940 {
7941         return TG3_NUM_TEST;
7942 }
7943
7944 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7945 {
7946         switch (stringset) {
7947         case ETH_SS_STATS:
7948                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7949                 break;
7950         case ETH_SS_TEST:
7951                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
7952                 break;
7953         default:
7954                 WARN_ON(1);     /* we need a WARN() */
7955                 break;
7956         }
7957 }
7958
7959 static int tg3_phys_id(struct net_device *dev, u32 data)
7960 {
7961         struct tg3 *tp = netdev_priv(dev);
7962         int i;
7963
7964         if (!netif_running(tp->dev))
7965                 return -EAGAIN;
7966
7967         if (data == 0)
7968                 data = 2;
7969
7970         for (i = 0; i < (data * 2); i++) {
7971                 if ((i % 2) == 0)
7972                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7973                                            LED_CTRL_1000MBPS_ON |
7974                                            LED_CTRL_100MBPS_ON |
7975                                            LED_CTRL_10MBPS_ON |
7976                                            LED_CTRL_TRAFFIC_OVERRIDE |
7977                                            LED_CTRL_TRAFFIC_BLINK |
7978                                            LED_CTRL_TRAFFIC_LED);
7979         
7980                 else
7981                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7982                                            LED_CTRL_TRAFFIC_OVERRIDE);
7983
7984                 if (msleep_interruptible(500))
7985                         break;
7986         }
7987         tw32(MAC_LED_CTRL, tp->led_ctrl);
7988         return 0;
7989 }
7990
7991 static void tg3_get_ethtool_stats (struct net_device *dev,
7992                                    struct ethtool_stats *estats, u64 *tmp_stats)
7993 {
7994         struct tg3 *tp = netdev_priv(dev);
7995         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7996 }
7997
7998 #define NVRAM_TEST_SIZE 0x100
7999 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
8000
8001 static int tg3_test_nvram(struct tg3 *tp)
8002 {
8003         u32 *buf, csum, magic;
8004         int i, j, err = 0, size;
8005
8006         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8007                 return -EIO;
8008
8009         if (magic == TG3_EEPROM_MAGIC)
8010                 size = NVRAM_TEST_SIZE;
8011         else if ((magic & 0xff000000) == 0xa5000000) {
8012                 if ((magic & 0xe00000) == 0x200000)
8013                         size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8014                 else
8015                         return 0;
8016         } else
8017                 return -EIO;
8018
8019         buf = kmalloc(size, GFP_KERNEL);
8020         if (buf == NULL)
8021                 return -ENOMEM;
8022
8023         err = -EIO;
8024         for (i = 0, j = 0; i < size; i += 4, j++) {
8025                 u32 val;
8026
8027                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8028                         break;
8029                 buf[j] = cpu_to_le32(val);
8030         }
8031         if (i < size)
8032                 goto out;
8033
8034         /* Selfboot format */
8035         if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) {
8036                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8037
8038                 for (i = 0; i < size; i++)
8039                         csum8 += buf8[i];
8040
8041                 if (csum8 == 0) {
8042                         err = 0;
8043                         goto out;
8044                 }
8045
8046                 err = -EIO;
8047                 goto out;
8048         }
8049
8050         /* Bootstrap checksum at offset 0x10 */
8051         csum = calc_crc((unsigned char *) buf, 0x10);
8052         if(csum != cpu_to_le32(buf[0x10/4]))
8053                 goto out;
8054
8055         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8056         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8057         if (csum != cpu_to_le32(buf[0xfc/4]))
8058                  goto out;
8059
8060         err = 0;
8061
8062 out:
8063         kfree(buf);
8064         return err;
8065 }
8066
8067 #define TG3_SERDES_TIMEOUT_SEC  2
8068 #define TG3_COPPER_TIMEOUT_SEC  6
8069
8070 static int tg3_test_link(struct tg3 *tp)
8071 {
8072         int i, max;
8073
8074         if (!netif_running(tp->dev))
8075                 return -ENODEV;
8076
8077         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8078                 max = TG3_SERDES_TIMEOUT_SEC;
8079         else
8080                 max = TG3_COPPER_TIMEOUT_SEC;
8081
8082         for (i = 0; i < max; i++) {
8083                 if (netif_carrier_ok(tp->dev))
8084                         return 0;
8085
8086                 if (msleep_interruptible(1000))
8087                         break;
8088         }
8089
8090         return -EIO;
8091 }
8092
8093 /* Only test the commonly used registers */
8094 static int tg3_test_registers(struct tg3 *tp)
8095 {
8096         int i, is_5705;
8097         u32 offset, read_mask, write_mask, val, save_val, read_val;
8098         static struct {
8099                 u16 offset;
8100                 u16 flags;
8101 #define TG3_FL_5705     0x1
8102 #define TG3_FL_NOT_5705 0x2
8103 #define TG3_FL_NOT_5788 0x4
8104                 u32 read_mask;
8105                 u32 write_mask;
8106         } reg_tbl[] = {
8107                 /* MAC Control Registers */
8108                 { MAC_MODE, TG3_FL_NOT_5705,
8109                         0x00000000, 0x00ef6f8c },
8110                 { MAC_MODE, TG3_FL_5705,
8111                         0x00000000, 0x01ef6b8c },
8112                 { MAC_STATUS, TG3_FL_NOT_5705,
8113                         0x03800107, 0x00000000 },
8114                 { MAC_STATUS, TG3_FL_5705,
8115                         0x03800100, 0x00000000 },
8116                 { MAC_ADDR_0_HIGH, 0x0000,
8117                         0x00000000, 0x0000ffff },
8118                 { MAC_ADDR_0_LOW, 0x0000,
8119                         0x00000000, 0xffffffff },
8120                 { MAC_RX_MTU_SIZE, 0x0000,
8121                         0x00000000, 0x0000ffff },
8122                 { MAC_TX_MODE, 0x0000,
8123                         0x00000000, 0x00000070 },
8124                 { MAC_TX_LENGTHS, 0x0000,
8125                         0x00000000, 0x00003fff },
8126                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8127                         0x00000000, 0x000007fc },
8128                 { MAC_RX_MODE, TG3_FL_5705,
8129                         0x00000000, 0x000007dc },
8130                 { MAC_HASH_REG_0, 0x0000,
8131                         0x00000000, 0xffffffff },
8132                 { MAC_HASH_REG_1, 0x0000,
8133                         0x00000000, 0xffffffff },
8134                 { MAC_HASH_REG_2, 0x0000,
8135                         0x00000000, 0xffffffff },
8136                 { MAC_HASH_REG_3, 0x0000,
8137                         0x00000000, 0xffffffff },
8138
8139                 /* Receive Data and Receive BD Initiator Control Registers. */
8140                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8141                         0x00000000, 0xffffffff },
8142                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8143                         0x00000000, 0xffffffff },
8144                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8145                         0x00000000, 0x00000003 },
8146                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8147                         0x00000000, 0xffffffff },
8148                 { RCVDBDI_STD_BD+0, 0x0000,
8149                         0x00000000, 0xffffffff },
8150                 { RCVDBDI_STD_BD+4, 0x0000,
8151                         0x00000000, 0xffffffff },
8152                 { RCVDBDI_STD_BD+8, 0x0000,
8153                         0x00000000, 0xffff0002 },
8154                 { RCVDBDI_STD_BD+0xc, 0x0000,
8155                         0x00000000, 0xffffffff },
8156         
8157                 /* Receive BD Initiator Control Registers. */
8158                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8159                         0x00000000, 0xffffffff },
8160                 { RCVBDI_STD_THRESH, TG3_FL_5705,
8161                         0x00000000, 0x000003ff },
8162                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8163                         0x00000000, 0xffffffff },
8164         
8165                 /* Host Coalescing Control Registers. */
8166                 { HOSTCC_MODE, TG3_FL_NOT_5705,
8167                         0x00000000, 0x00000004 },
8168                 { HOSTCC_MODE, TG3_FL_5705,
8169                         0x00000000, 0x000000f6 },
8170                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8171                         0x00000000, 0xffffffff },
8172                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8173                         0x00000000, 0x000003ff },
8174                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8175                         0x00000000, 0xffffffff },
8176                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8177                         0x00000000, 0x000003ff },
8178                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8179                         0x00000000, 0xffffffff },
8180                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8181                         0x00000000, 0x000000ff },
8182                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8183                         0x00000000, 0xffffffff },
8184                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8185                         0x00000000, 0x000000ff },
8186                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8187                         0x00000000, 0xffffffff },
8188                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8189                         0x00000000, 0xffffffff },
8190                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8191                         0x00000000, 0xffffffff },
8192                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8193                         0x00000000, 0x000000ff },
8194                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8195                         0x00000000, 0xffffffff },
8196                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8197                         0x00000000, 0x000000ff },
8198                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8199                         0x00000000, 0xffffffff },
8200                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8201                         0x00000000, 0xffffffff },
8202                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8203                         0x00000000, 0xffffffff },
8204                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8205                         0x00000000, 0xffffffff },
8206                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8207                         0x00000000, 0xffffffff },
8208                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8209                         0xffffffff, 0x00000000 },
8210                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8211                         0xffffffff, 0x00000000 },
8212
8213                 /* Buffer Manager Control Registers. */
8214                 { BUFMGR_MB_POOL_ADDR, 0x0000,
8215                         0x00000000, 0x007fff80 },
8216                 { BUFMGR_MB_POOL_SIZE, 0x0000,
8217                         0x00000000, 0x007fffff },
8218                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8219                         0x00000000, 0x0000003f },
8220                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8221                         0x00000000, 0x000001ff },
8222                 { BUFMGR_MB_HIGH_WATER, 0x0000,
8223                         0x00000000, 0x000001ff },
8224                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8225                         0xffffffff, 0x00000000 },
8226                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8227                         0xffffffff, 0x00000000 },
8228         
8229                 /* Mailbox Registers */
8230                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8231                         0x00000000, 0x000001ff },
8232                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8233                         0x00000000, 0x000001ff },
8234                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8235                         0x00000000, 0x000007ff },
8236                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8237                         0x00000000, 0x000001ff },
8238
8239                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8240         };
8241
8242         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8243                 is_5705 = 1;
8244         else
8245                 is_5705 = 0;
8246
8247         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8248                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8249                         continue;
8250
8251                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8252                         continue;
8253
8254                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8255                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
8256                         continue;
8257
8258                 offset = (u32) reg_tbl[i].offset;
8259                 read_mask = reg_tbl[i].read_mask;
8260                 write_mask = reg_tbl[i].write_mask;
8261
8262                 /* Save the original register content */
8263                 save_val = tr32(offset);
8264
8265                 /* Determine the read-only value. */
8266                 read_val = save_val & read_mask;
8267
8268                 /* Write zero to the register, then make sure the read-only bits
8269                  * are not changed and the read/write bits are all zeros.
8270                  */
8271                 tw32(offset, 0);
8272
8273                 val = tr32(offset);
8274
8275                 /* Test the read-only and read/write bits. */
8276                 if (((val & read_mask) != read_val) || (val & write_mask))
8277                         goto out;
8278
8279                 /* Write ones to all the bits defined by RdMask and WrMask, then
8280                  * make sure the read-only bits are not changed and the
8281                  * read/write bits are all ones.
8282                  */
8283                 tw32(offset, read_mask | write_mask);
8284
8285                 val = tr32(offset);
8286
8287                 /* Test the read-only bits. */
8288                 if ((val & read_mask) != read_val)
8289                         goto out;
8290
8291                 /* Test the read/write bits. */
8292                 if ((val & write_mask) != write_mask)
8293                         goto out;
8294
8295                 tw32(offset, save_val);
8296         }
8297
8298         return 0;
8299
8300 out:
8301         printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
8302         tw32(offset, save_val);
8303         return -EIO;
8304 }
8305
8306 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8307 {
8308         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8309         int i;
8310         u32 j;
8311
8312         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8313                 for (j = 0; j < len; j += 4) {
8314                         u32 val;
8315
8316                         tg3_write_mem(tp, offset + j, test_pattern[i]);
8317                         tg3_read_mem(tp, offset + j, &val);
8318                         if (val != test_pattern[i])
8319                                 return -EIO;
8320                 }
8321         }
8322         return 0;
8323 }
8324
8325 static int tg3_test_memory(struct tg3 *tp)
8326 {
8327         static struct mem_entry {
8328                 u32 offset;
8329                 u32 len;
8330         } mem_tbl_570x[] = {
8331                 { 0x00000000, 0x00b50},
8332                 { 0x00002000, 0x1c000},
8333                 { 0xffffffff, 0x00000}
8334         }, mem_tbl_5705[] = {
8335                 { 0x00000100, 0x0000c},
8336                 { 0x00000200, 0x00008},
8337                 { 0x00004000, 0x00800},
8338                 { 0x00006000, 0x01000},
8339                 { 0x00008000, 0x02000},
8340                 { 0x00010000, 0x0e000},
8341                 { 0xffffffff, 0x00000}
8342         }, mem_tbl_5755[] = {
8343                 { 0x00000200, 0x00008},
8344                 { 0x00004000, 0x00800},
8345                 { 0x00006000, 0x00800},
8346                 { 0x00008000, 0x02000},
8347                 { 0x00010000, 0x0c000},
8348                 { 0xffffffff, 0x00000}
8349         };
8350         struct mem_entry *mem_tbl;
8351         int err = 0;
8352         int i;
8353
8354         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8355                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8356                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8357                         mem_tbl = mem_tbl_5755;
8358                 else
8359                         mem_tbl = mem_tbl_5705;
8360         } else
8361                 mem_tbl = mem_tbl_570x;
8362
8363         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8364                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8365                     mem_tbl[i].len)) != 0)
8366                         break;
8367         }
8368         
8369         return err;
8370 }
8371
8372 #define TG3_MAC_LOOPBACK        0
8373 #define TG3_PHY_LOOPBACK        1
8374
8375 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8376 {
8377         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8378         u32 desc_idx;
8379         struct sk_buff *skb, *rx_skb;
8380         u8 *tx_data;
8381         dma_addr_t map;
8382         int num_pkts, tx_len, rx_len, i, err;
8383         struct tg3_rx_buffer_desc *desc;
8384
8385         if (loopback_mode == TG3_MAC_LOOPBACK) {
8386                 /* HW errata - mac loopback fails in some cases on 5780.
8387                  * Normal traffic and PHY loopback are not affected by
8388                  * errata.
8389                  */
8390                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8391                         return 0;
8392
8393                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8394                            MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
8395                            MAC_MODE_PORT_MODE_GMII;
8396                 tw32(MAC_MODE, mac_mode);
8397         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
8398                 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
8399                                            BMCR_SPEED1000);
8400                 udelay(40);
8401                 /* reset to prevent losing 1st rx packet intermittently */
8402                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8403                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8404                         udelay(10);
8405                         tw32_f(MAC_RX_MODE, tp->rx_mode);
8406                 }
8407                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8408                            MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
8409                 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
8410                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8411                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
8412                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8413                 }
8414                 tw32(MAC_MODE, mac_mode);
8415         }
8416         else
8417                 return -EINVAL;
8418
8419         err = -EIO;
8420
8421         tx_len = 1514;
8422         skb = dev_alloc_skb(tx_len);
8423         tx_data = skb_put(skb, tx_len);
8424         memcpy(tx_data, tp->dev->dev_addr, 6);
8425         memset(tx_data + 6, 0x0, 8);
8426
8427         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8428
8429         for (i = 14; i < tx_len; i++)
8430                 tx_data[i] = (u8) (i & 0xff);
8431
8432         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8433
8434         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8435              HOSTCC_MODE_NOW);
8436
8437         udelay(10);
8438
8439         rx_start_idx = tp->hw_status->idx[0].rx_producer;
8440
8441         num_pkts = 0;
8442
8443         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8444
8445         tp->tx_prod++;
8446         num_pkts++;
8447
8448         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8449                      tp->tx_prod);
8450         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8451
8452         udelay(10);
8453
8454         for (i = 0; i < 10; i++) {
8455                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8456                        HOSTCC_MODE_NOW);
8457
8458                 udelay(10);
8459
8460                 tx_idx = tp->hw_status->idx[0].tx_consumer;
8461                 rx_idx = tp->hw_status->idx[0].rx_producer;
8462                 if ((tx_idx == tp->tx_prod) &&
8463                     (rx_idx == (rx_start_idx + num_pkts)))
8464                         break;
8465         }
8466
8467         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8468         dev_kfree_skb(skb);
8469
8470         if (tx_idx != tp->tx_prod)
8471                 goto out;
8472
8473         if (rx_idx != rx_start_idx + num_pkts)
8474                 goto out;
8475
8476         desc = &tp->rx_rcb[rx_start_idx];
8477         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8478         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8479         if (opaque_key != RXD_OPAQUE_RING_STD)
8480                 goto out;
8481
8482         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8483             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8484                 goto out;
8485
8486         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8487         if (rx_len != tx_len)
8488                 goto out;
8489
8490         rx_skb = tp->rx_std_buffers[desc_idx].skb;
8491
8492         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8493         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8494
8495         for (i = 14; i < tx_len; i++) {
8496                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8497                         goto out;
8498         }
8499         err = 0;
8500         
8501         /* tg3_free_rings will unmap and free the rx_skb */
8502 out:
8503         return err;
8504 }
8505
8506 #define TG3_MAC_LOOPBACK_FAILED         1
8507 #define TG3_PHY_LOOPBACK_FAILED         2
8508 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
8509                                          TG3_PHY_LOOPBACK_FAILED)
8510
8511 static int tg3_test_loopback(struct tg3 *tp)
8512 {
8513         int err = 0;
8514
8515         if (!netif_running(tp->dev))
8516                 return TG3_LOOPBACK_FAILED;
8517
8518         tg3_reset_hw(tp);
8519
8520         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8521                 err |= TG3_MAC_LOOPBACK_FAILED;
8522         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8523                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8524                         err |= TG3_PHY_LOOPBACK_FAILED;
8525         }
8526
8527         return err;
8528 }
8529
8530 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8531                           u64 *data)
8532 {
8533         struct tg3 *tp = netdev_priv(dev);
8534
8535         if (tp->link_config.phy_is_low_power)
8536                 tg3_set_power_state(tp, PCI_D0);
8537
8538         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8539
8540         if (tg3_test_nvram(tp) != 0) {
8541                 etest->flags |= ETH_TEST_FL_FAILED;
8542                 data[0] = 1;
8543         }
8544         if (tg3_test_link(tp) != 0) {
8545                 etest->flags |= ETH_TEST_FL_FAILED;
8546                 data[1] = 1;
8547         }
8548         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8549                 int err, irq_sync = 0;
8550
8551                 if (netif_running(dev)) {
8552                         tg3_netif_stop(tp);
8553                         irq_sync = 1;
8554                 }
8555
8556                 tg3_full_lock(tp, irq_sync);
8557
8558                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8559                 err = tg3_nvram_lock(tp);
8560                 tg3_halt_cpu(tp, RX_CPU_BASE);
8561                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8562                         tg3_halt_cpu(tp, TX_CPU_BASE);
8563                 if (!err)
8564                         tg3_nvram_unlock(tp);
8565
8566                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8567                         tg3_phy_reset(tp);
8568
8569                 if (tg3_test_registers(tp) != 0) {
8570                         etest->flags |= ETH_TEST_FL_FAILED;
8571                         data[2] = 1;
8572                 }
8573                 if (tg3_test_memory(tp) != 0) {
8574                         etest->flags |= ETH_TEST_FL_FAILED;
8575                         data[3] = 1;
8576                 }
8577                 if ((data[4] = tg3_test_loopback(tp)) != 0)
8578                         etest->flags |= ETH_TEST_FL_FAILED;
8579
8580                 tg3_full_unlock(tp);
8581
8582                 if (tg3_test_interrupt(tp) != 0) {
8583                         etest->flags |= ETH_TEST_FL_FAILED;
8584                         data[5] = 1;
8585                 }
8586
8587                 tg3_full_lock(tp, 0);
8588
8589                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8590                 if (netif_running(dev)) {
8591                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8592                         tg3_init_hw(tp);
8593                         tg3_netif_start(tp);
8594                 }
8595
8596                 tg3_full_unlock(tp);
8597         }
8598         if (tp->link_config.phy_is_low_power)
8599                 tg3_set_power_state(tp, PCI_D3hot);
8600
8601 }
8602
8603 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8604 {
8605         struct mii_ioctl_data *data = if_mii(ifr);
8606         struct tg3 *tp = netdev_priv(dev);
8607         int err;
8608
8609         switch(cmd) {
8610         case SIOCGMIIPHY:
8611                 data->phy_id = PHY_ADDR;
8612
8613                 /* fallthru */
8614         case SIOCGMIIREG: {
8615                 u32 mii_regval;
8616
8617                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8618                         break;                  /* We have no PHY */
8619
8620                 if (tp->link_config.phy_is_low_power)
8621                         return -EAGAIN;
8622
8623                 spin_lock_bh(&tp->lock);
8624                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8625                 spin_unlock_bh(&tp->lock);
8626
8627                 data->val_out = mii_regval;
8628
8629                 return err;
8630         }
8631
8632         case SIOCSMIIREG:
8633                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8634                         break;                  /* We have no PHY */
8635
8636                 if (!capable(CAP_NET_ADMIN))
8637                         return -EPERM;
8638
8639                 if (tp->link_config.phy_is_low_power)
8640                         return -EAGAIN;
8641
8642                 spin_lock_bh(&tp->lock);
8643                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8644                 spin_unlock_bh(&tp->lock);
8645
8646                 return err;
8647
8648         default:
8649                 /* do nothing */
8650                 break;
8651         }
8652         return -EOPNOTSUPP;
8653 }
8654
8655 #if TG3_VLAN_TAG_USED
8656 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8657 {
8658         struct tg3 *tp = netdev_priv(dev);
8659
8660         tg3_full_lock(tp, 0);
8661
8662         tp->vlgrp = grp;
8663
8664         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8665         __tg3_set_rx_mode(dev);
8666
8667         tg3_full_unlock(tp);
8668 }
8669
8670 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8671 {
8672         struct tg3 *tp = netdev_priv(dev);
8673
8674         tg3_full_lock(tp, 0);
8675         if (tp->vlgrp)
8676                 tp->vlgrp->vlan_devices[vid] = NULL;
8677         tg3_full_unlock(tp);
8678 }
8679 #endif
8680
8681 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8682 {
8683         struct tg3 *tp = netdev_priv(dev);
8684
8685         memcpy(ec, &tp->coal, sizeof(*ec));
8686         return 0;
8687 }
8688
8689 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8690 {
8691         struct tg3 *tp = netdev_priv(dev);
8692         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8693         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8694
8695         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8696                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8697                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8698                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8699                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8700         }
8701
8702         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8703             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8704             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8705             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8706             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8707             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8708             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8709             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8710             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8711             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8712                 return -EINVAL;
8713
8714         /* No rx interrupts will be generated if both are zero */
8715         if ((ec->rx_coalesce_usecs == 0) &&
8716             (ec->rx_max_coalesced_frames == 0))
8717                 return -EINVAL;
8718
8719         /* No tx interrupts will be generated if both are zero */
8720         if ((ec->tx_coalesce_usecs == 0) &&
8721             (ec->tx_max_coalesced_frames == 0))
8722                 return -EINVAL;
8723
8724         /* Only copy relevant parameters, ignore all others. */
8725         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8726         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8727         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8728         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8729         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8730         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8731         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8732         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8733         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8734
8735         if (netif_running(dev)) {
8736                 tg3_full_lock(tp, 0);
8737                 __tg3_set_coalesce(tp, &tp->coal);
8738                 tg3_full_unlock(tp);
8739         }
8740         return 0;
8741 }
8742
8743 static struct ethtool_ops tg3_ethtool_ops = {
8744         .get_settings           = tg3_get_settings,
8745         .set_settings           = tg3_set_settings,
8746         .get_drvinfo            = tg3_get_drvinfo,
8747         .get_regs_len           = tg3_get_regs_len,
8748         .get_regs               = tg3_get_regs,
8749         .get_wol                = tg3_get_wol,
8750         .set_wol                = tg3_set_wol,
8751         .get_msglevel           = tg3_get_msglevel,
8752         .set_msglevel           = tg3_set_msglevel,
8753         .nway_reset             = tg3_nway_reset,
8754         .get_link               = ethtool_op_get_link,
8755         .get_eeprom_len         = tg3_get_eeprom_len,
8756         .get_eeprom             = tg3_get_eeprom,
8757         .set_eeprom             = tg3_set_eeprom,
8758         .get_ringparam          = tg3_get_ringparam,
8759         .set_ringparam          = tg3_set_ringparam,
8760         .get_pauseparam         = tg3_get_pauseparam,
8761         .set_pauseparam         = tg3_set_pauseparam,
8762         .get_rx_csum            = tg3_get_rx_csum,
8763         .set_rx_csum            = tg3_set_rx_csum,
8764         .get_tx_csum            = ethtool_op_get_tx_csum,
8765         .set_tx_csum            = tg3_set_tx_csum,
8766         .get_sg                 = ethtool_op_get_sg,
8767         .set_sg                 = ethtool_op_set_sg,
8768 #if TG3_TSO_SUPPORT != 0
8769         .get_tso                = ethtool_op_get_tso,
8770         .set_tso                = tg3_set_tso,
8771 #endif
8772         .self_test_count        = tg3_get_test_count,
8773         .self_test              = tg3_self_test,
8774         .get_strings            = tg3_get_strings,
8775         .phys_id                = tg3_phys_id,
8776         .get_stats_count        = tg3_get_stats_count,
8777         .get_ethtool_stats      = tg3_get_ethtool_stats,
8778         .get_coalesce           = tg3_get_coalesce,
8779         .set_coalesce           = tg3_set_coalesce,
8780         .get_perm_addr          = ethtool_op_get_perm_addr,
8781 };
8782
8783 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8784 {
8785         u32 cursize, val, magic;
8786
8787         tp->nvram_size = EEPROM_CHIP_SIZE;
8788
8789         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8790                 return;
8791
8792         if ((magic != TG3_EEPROM_MAGIC) && ((magic & 0xff000000) != 0xa5000000))
8793                 return;
8794
8795         /*
8796          * Size the chip by reading offsets at increasing powers of two.
8797          * When we encounter our validation signature, we know the addressing
8798          * has wrapped around, and thus have our chip size.
8799          */
8800         cursize = 0x10;
8801
8802         while (cursize < tp->nvram_size) {
8803                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
8804                         return;
8805
8806                 if (val == magic)
8807                         break;
8808
8809                 cursize <<= 1;
8810         }
8811
8812         tp->nvram_size = cursize;
8813 }
8814                 
8815 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8816 {
8817         u32 val;
8818
8819         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
8820                 return;
8821
8822         /* Selfboot format */
8823         if (val != TG3_EEPROM_MAGIC) {
8824                 tg3_get_eeprom_size(tp);
8825                 return;
8826         }
8827
8828         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8829                 if (val != 0) {
8830                         tp->nvram_size = (val >> 16) * 1024;
8831                         return;
8832                 }
8833         }
8834         tp->nvram_size = 0x20000;
8835 }
8836
8837 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8838 {
8839         u32 nvcfg1;
8840
8841         nvcfg1 = tr32(NVRAM_CFG1);
8842         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8843                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8844         }
8845         else {
8846                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8847                 tw32(NVRAM_CFG1, nvcfg1);
8848         }
8849
8850         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
8851             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
8852                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8853                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8854                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8855                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8856                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8857                                 break;
8858                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
8859                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8860                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
8861                                 break;
8862                         case FLASH_VENDOR_ATMEL_EEPROM:
8863                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8864                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8865                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8866                                 break;
8867                         case FLASH_VENDOR_ST:
8868                                 tp->nvram_jedecnum = JEDEC_ST;
8869                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
8870                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8871                                 break;
8872                         case FLASH_VENDOR_SAIFUN:
8873                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
8874                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
8875                                 break;
8876                         case FLASH_VENDOR_SST_SMALL:
8877                         case FLASH_VENDOR_SST_LARGE:
8878                                 tp->nvram_jedecnum = JEDEC_SST;
8879                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
8880                                 break;
8881                 }
8882         }
8883         else {
8884                 tp->nvram_jedecnum = JEDEC_ATMEL;
8885                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8886                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8887         }
8888 }
8889
8890 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
8891 {
8892         u32 nvcfg1;
8893
8894         nvcfg1 = tr32(NVRAM_CFG1);
8895
8896         /* NVRAM protection for TPM */
8897         if (nvcfg1 & (1 << 27))
8898                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8899
8900         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8901                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
8902                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
8903                         tp->nvram_jedecnum = JEDEC_ATMEL;
8904                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8905                         break;
8906                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8907                         tp->nvram_jedecnum = JEDEC_ATMEL;
8908                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8909                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8910                         break;
8911                 case FLASH_5752VENDOR_ST_M45PE10:
8912                 case FLASH_5752VENDOR_ST_M45PE20:
8913                 case FLASH_5752VENDOR_ST_M45PE40:
8914                         tp->nvram_jedecnum = JEDEC_ST;
8915                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8916                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8917                         break;
8918         }
8919
8920         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
8921                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
8922                         case FLASH_5752PAGE_SIZE_256:
8923                                 tp->nvram_pagesize = 256;
8924                                 break;
8925                         case FLASH_5752PAGE_SIZE_512:
8926                                 tp->nvram_pagesize = 512;
8927                                 break;
8928                         case FLASH_5752PAGE_SIZE_1K:
8929                                 tp->nvram_pagesize = 1024;
8930                                 break;
8931                         case FLASH_5752PAGE_SIZE_2K:
8932                                 tp->nvram_pagesize = 2048;
8933                                 break;
8934                         case FLASH_5752PAGE_SIZE_4K:
8935                                 tp->nvram_pagesize = 4096;
8936                                 break;
8937                         case FLASH_5752PAGE_SIZE_264:
8938                                 tp->nvram_pagesize = 264;
8939                                 break;
8940                 }
8941         }
8942         else {
8943                 /* For eeprom, set pagesize to maximum eeprom size */
8944                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8945
8946                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8947                 tw32(NVRAM_CFG1, nvcfg1);
8948         }
8949 }
8950
8951 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
8952 {
8953         u32 nvcfg1;
8954
8955         nvcfg1 = tr32(NVRAM_CFG1);
8956
8957         /* NVRAM protection for TPM */
8958         if (nvcfg1 & (1 << 27))
8959                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8960
8961         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8962                 case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ:
8963                 case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ:
8964                         tp->nvram_jedecnum = JEDEC_ATMEL;
8965                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8966                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8967
8968                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8969                         tw32(NVRAM_CFG1, nvcfg1);
8970                         break;
8971                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8972                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
8973                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
8974                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
8975                 case FLASH_5755VENDOR_ATMEL_FLASH_4:
8976                         tp->nvram_jedecnum = JEDEC_ATMEL;
8977                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8978                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8979                         tp->nvram_pagesize = 264;
8980                         break;
8981                 case FLASH_5752VENDOR_ST_M45PE10:
8982                 case FLASH_5752VENDOR_ST_M45PE20:
8983                 case FLASH_5752VENDOR_ST_M45PE40:
8984                         tp->nvram_jedecnum = JEDEC_ST;
8985                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8986                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8987                         tp->nvram_pagesize = 256;
8988                         break;
8989         }
8990 }
8991
8992 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
8993 {
8994         u32 nvcfg1;
8995
8996         nvcfg1 = tr32(NVRAM_CFG1);
8997
8998         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8999                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9000                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9001                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9002                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9003                         tp->nvram_jedecnum = JEDEC_ATMEL;
9004                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9005                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9006
9007                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9008                         tw32(NVRAM_CFG1, nvcfg1);
9009                         break;
9010                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9011                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9012                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9013                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9014                         tp->nvram_jedecnum = JEDEC_ATMEL;
9015                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9016                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9017                         tp->nvram_pagesize = 264;
9018                         break;
9019                 case FLASH_5752VENDOR_ST_M45PE10:
9020                 case FLASH_5752VENDOR_ST_M45PE20:
9021                 case FLASH_5752VENDOR_ST_M45PE40:
9022                         tp->nvram_jedecnum = JEDEC_ST;
9023                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9024                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9025                         tp->nvram_pagesize = 256;
9026                         break;
9027         }
9028 }
9029
9030 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9031 static void __devinit tg3_nvram_init(struct tg3 *tp)
9032 {
9033         int j;
9034
9035         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
9036                 return;
9037
9038         tw32_f(GRC_EEPROM_ADDR,
9039              (EEPROM_ADDR_FSM_RESET |
9040               (EEPROM_DEFAULT_CLOCK_PERIOD <<
9041                EEPROM_ADDR_CLKPERD_SHIFT)));
9042
9043         /* XXX schedule_timeout() ... */
9044         for (j = 0; j < 100; j++)
9045                 udelay(10);
9046
9047         /* Enable seeprom accesses. */
9048         tw32_f(GRC_LOCAL_CTRL,
9049              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9050         udelay(100);
9051
9052         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9053             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9054                 tp->tg3_flags |= TG3_FLAG_NVRAM;
9055
9056                 if (tg3_nvram_lock(tp)) {
9057                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9058                                "tg3_nvram_init failed.\n", tp->dev->name);
9059                         return;
9060                 }
9061                 tg3_enable_nvram_access(tp);
9062
9063                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9064                         tg3_get_5752_nvram_info(tp);
9065                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9066                         tg3_get_5755_nvram_info(tp);
9067                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9068                         tg3_get_5787_nvram_info(tp);
9069                 else
9070                         tg3_get_nvram_info(tp);
9071
9072                 tg3_get_nvram_size(tp);
9073
9074                 tg3_disable_nvram_access(tp);
9075                 tg3_nvram_unlock(tp);
9076
9077         } else {
9078                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9079
9080                 tg3_get_eeprom_size(tp);
9081         }
9082 }
9083
9084 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9085                                         u32 offset, u32 *val)
9086 {
9087         u32 tmp;
9088         int i;
9089
9090         if (offset > EEPROM_ADDR_ADDR_MASK ||
9091             (offset % 4) != 0)
9092                 return -EINVAL;
9093
9094         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9095                                         EEPROM_ADDR_DEVID_MASK |
9096                                         EEPROM_ADDR_READ);
9097         tw32(GRC_EEPROM_ADDR,
9098              tmp |
9099              (0 << EEPROM_ADDR_DEVID_SHIFT) |
9100              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9101               EEPROM_ADDR_ADDR_MASK) |
9102              EEPROM_ADDR_READ | EEPROM_ADDR_START);
9103
9104         for (i = 0; i < 10000; i++) {
9105                 tmp = tr32(GRC_EEPROM_ADDR);
9106
9107                 if (tmp & EEPROM_ADDR_COMPLETE)
9108                         break;
9109                 udelay(100);
9110         }
9111         if (!(tmp & EEPROM_ADDR_COMPLETE))
9112                 return -EBUSY;
9113
9114         *val = tr32(GRC_EEPROM_DATA);
9115         return 0;
9116 }
9117
9118 #define NVRAM_CMD_TIMEOUT 10000
9119
9120 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9121 {
9122         int i;
9123
9124         tw32(NVRAM_CMD, nvram_cmd);
9125         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9126                 udelay(10);
9127                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9128                         udelay(10);
9129                         break;
9130                 }
9131         }
9132         if (i == NVRAM_CMD_TIMEOUT) {
9133                 return -EBUSY;
9134         }
9135         return 0;
9136 }
9137
9138 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9139 {
9140         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9141             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9142             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9143             (tp->nvram_jedecnum == JEDEC_ATMEL))
9144
9145                 addr = ((addr / tp->nvram_pagesize) <<
9146                         ATMEL_AT45DB0X1B_PAGE_POS) +
9147                        (addr % tp->nvram_pagesize);
9148
9149         return addr;
9150 }
9151
9152 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9153 {
9154         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9155             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9156             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9157             (tp->nvram_jedecnum == JEDEC_ATMEL))
9158
9159                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9160                         tp->nvram_pagesize) +
9161                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9162
9163         return addr;
9164 }
9165
9166 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9167 {
9168         int ret;
9169
9170         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9171                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
9172                 return -EINVAL;
9173         }
9174
9175         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9176                 return tg3_nvram_read_using_eeprom(tp, offset, val);
9177
9178         offset = tg3_nvram_phys_addr(tp, offset);
9179
9180         if (offset > NVRAM_ADDR_MSK)
9181                 return -EINVAL;
9182
9183         ret = tg3_nvram_lock(tp);
9184         if (ret)
9185                 return ret;
9186
9187         tg3_enable_nvram_access(tp);
9188
9189         tw32(NVRAM_ADDR, offset);
9190         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9191                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9192
9193         if (ret == 0)
9194                 *val = swab32(tr32(NVRAM_RDDATA));
9195
9196         tg3_disable_nvram_access(tp);
9197
9198         tg3_nvram_unlock(tp);
9199
9200         return ret;
9201 }
9202
9203 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9204 {
9205         int err;
9206         u32 tmp;
9207
9208         err = tg3_nvram_read(tp, offset, &tmp);
9209         *val = swab32(tmp);
9210         return err;
9211 }
9212
9213 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9214                                     u32 offset, u32 len, u8 *buf)
9215 {
9216         int i, j, rc = 0;
9217         u32 val;
9218
9219         for (i = 0; i < len; i += 4) {
9220                 u32 addr, data;
9221
9222                 addr = offset + i;
9223
9224                 memcpy(&data, buf + i, 4);
9225
9226                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9227
9228                 val = tr32(GRC_EEPROM_ADDR);
9229                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9230
9231                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9232                         EEPROM_ADDR_READ);
9233                 tw32(GRC_EEPROM_ADDR, val |
9234                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
9235                         (addr & EEPROM_ADDR_ADDR_MASK) |
9236                         EEPROM_ADDR_START |
9237                         EEPROM_ADDR_WRITE);
9238                 
9239                 for (j = 0; j < 10000; j++) {
9240                         val = tr32(GRC_EEPROM_ADDR);
9241
9242                         if (val & EEPROM_ADDR_COMPLETE)
9243                                 break;
9244                         udelay(100);
9245                 }
9246                 if (!(val & EEPROM_ADDR_COMPLETE)) {
9247                         rc = -EBUSY;
9248                         break;
9249                 }
9250         }
9251
9252         return rc;
9253 }
9254
9255 /* offset and length are dword aligned */
9256 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9257                 u8 *buf)
9258 {
9259         int ret = 0;
9260         u32 pagesize = tp->nvram_pagesize;
9261         u32 pagemask = pagesize - 1;
9262         u32 nvram_cmd;
9263         u8 *tmp;
9264
9265         tmp = kmalloc(pagesize, GFP_KERNEL);
9266         if (tmp == NULL)
9267                 return -ENOMEM;
9268
9269         while (len) {
9270                 int j;
9271                 u32 phy_addr, page_off, size;
9272
9273                 phy_addr = offset & ~pagemask;
9274         
9275                 for (j = 0; j < pagesize; j += 4) {
9276                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
9277                                                 (u32 *) (tmp + j))))
9278                                 break;
9279                 }
9280                 if (ret)
9281                         break;
9282
9283                 page_off = offset & pagemask;
9284                 size = pagesize;
9285                 if (len < size)
9286                         size = len;
9287
9288                 len -= size;
9289
9290                 memcpy(tmp + page_off, buf, size);
9291
9292                 offset = offset + (pagesize - page_off);
9293
9294                 tg3_enable_nvram_access(tp);
9295
9296                 /*
9297                  * Before we can erase the flash page, we need
9298                  * to issue a special "write enable" command.
9299                  */
9300                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9301
9302                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9303                         break;
9304
9305                 /* Erase the target page */
9306                 tw32(NVRAM_ADDR, phy_addr);
9307
9308                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9309                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9310
9311                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9312                         break;
9313
9314                 /* Issue another write enable to start the write. */
9315                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9316
9317                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9318                         break;
9319
9320                 for (j = 0; j < pagesize; j += 4) {
9321                         u32 data;
9322
9323                         data = *((u32 *) (tmp + j));
9324                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
9325
9326                         tw32(NVRAM_ADDR, phy_addr + j);
9327
9328                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9329                                 NVRAM_CMD_WR;
9330
9331                         if (j == 0)
9332                                 nvram_cmd |= NVRAM_CMD_FIRST;
9333                         else if (j == (pagesize - 4))
9334                                 nvram_cmd |= NVRAM_CMD_LAST;
9335
9336                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9337                                 break;
9338                 }
9339                 if (ret)
9340                         break;
9341         }
9342
9343         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9344         tg3_nvram_exec_cmd(tp, nvram_cmd);
9345
9346         kfree(tmp);
9347
9348         return ret;
9349 }
9350
9351 /* offset and length are dword aligned */
9352 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9353                 u8 *buf)
9354 {
9355         int i, ret = 0;
9356
9357         for (i = 0; i < len; i += 4, offset += 4) {
9358                 u32 data, page_off, phy_addr, nvram_cmd;
9359
9360                 memcpy(&data, buf + i, 4);
9361                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9362
9363                 page_off = offset % tp->nvram_pagesize;
9364
9365                 phy_addr = tg3_nvram_phys_addr(tp, offset);
9366
9367                 tw32(NVRAM_ADDR, phy_addr);
9368
9369                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9370
9371                 if ((page_off == 0) || (i == 0))
9372                         nvram_cmd |= NVRAM_CMD_FIRST;
9373                 else if (page_off == (tp->nvram_pagesize - 4))
9374                         nvram_cmd |= NVRAM_CMD_LAST;
9375
9376                 if (i == (len - 4))
9377                         nvram_cmd |= NVRAM_CMD_LAST;
9378
9379                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
9380                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
9381                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
9382                     (tp->nvram_jedecnum == JEDEC_ST) &&
9383                     (nvram_cmd & NVRAM_CMD_FIRST)) {
9384
9385                         if ((ret = tg3_nvram_exec_cmd(tp,
9386                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9387                                 NVRAM_CMD_DONE)))
9388
9389                                 break;
9390                 }
9391                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9392                         /* We always do complete word writes to eeprom. */
9393                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9394                 }
9395
9396                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9397                         break;
9398         }
9399         return ret;
9400 }
9401
9402 /* offset and length are dword aligned */
9403 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9404 {
9405         int ret;
9406
9407         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9408                 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
9409                 return -EINVAL;
9410         }
9411
9412         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9413                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9414                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
9415                 udelay(40);
9416         }
9417
9418         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9419                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9420         }
9421         else {
9422                 u32 grc_mode;
9423
9424                 ret = tg3_nvram_lock(tp);
9425                 if (ret)
9426                         return ret;
9427
9428                 tg3_enable_nvram_access(tp);
9429                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9430                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
9431                         tw32(NVRAM_WRITE1, 0x406);
9432
9433                 grc_mode = tr32(GRC_MODE);
9434                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9435
9436                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9437                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9438
9439                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
9440                                 buf);
9441                 }
9442                 else {
9443                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9444                                 buf);
9445                 }
9446
9447                 grc_mode = tr32(GRC_MODE);
9448                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9449
9450                 tg3_disable_nvram_access(tp);
9451                 tg3_nvram_unlock(tp);
9452         }
9453
9454         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9455                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9456                 udelay(40);
9457         }
9458
9459         return ret;
9460 }
9461
9462 struct subsys_tbl_ent {
9463         u16 subsys_vendor, subsys_devid;
9464         u32 phy_id;
9465 };
9466
9467 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9468         /* Broadcom boards. */
9469         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9470         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9471         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9472         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
9473         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9474         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9475         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
9476         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9477         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9478         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9479         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9480
9481         /* 3com boards. */
9482         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9483         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9484         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
9485         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9486         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9487
9488         /* DELL boards. */
9489         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9490         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9491         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9492         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9493
9494         /* Compaq boards. */
9495         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9496         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9497         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
9498         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9499         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9500
9501         /* IBM boards. */
9502         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9503 };
9504
9505 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9506 {
9507         int i;
9508
9509         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9510                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9511                      tp->pdev->subsystem_vendor) &&
9512                     (subsys_id_to_phy_id[i].subsys_devid ==
9513                      tp->pdev->subsystem_device))
9514                         return &subsys_id_to_phy_id[i];
9515         }
9516         return NULL;
9517 }
9518
9519 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9520 {
9521         u32 val;
9522         u16 pmcsr;
9523
9524         /* On some early chips the SRAM cannot be accessed in D3hot state,
9525          * so need make sure we're in D0.
9526          */
9527         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
9528         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9529         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
9530         msleep(1);
9531
9532         /* Make sure register accesses (indirect or otherwise)
9533          * will function correctly.
9534          */
9535         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9536                                tp->misc_host_ctrl);
9537
9538         tp->phy_id = PHY_ID_INVALID;
9539         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9540
9541         /* Do not even try poking around in here on Sun parts.  */
9542         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
9543                 return;
9544
9545         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9546         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9547                 u32 nic_cfg, led_cfg;
9548                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9549                 int eeprom_phy_serdes = 0;
9550
9551                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9552                 tp->nic_sram_data_cfg = nic_cfg;
9553
9554                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9555                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9556                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9557                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9558                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9559                     (ver > 0) && (ver < 0x100))
9560                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9561
9562                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9563                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9564                         eeprom_phy_serdes = 1;
9565
9566                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9567                 if (nic_phy_id != 0) {
9568                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9569                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9570
9571                         eeprom_phy_id  = (id1 >> 16) << 10;
9572                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
9573                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
9574                 } else
9575                         eeprom_phy_id = 0;
9576
9577                 tp->phy_id = eeprom_phy_id;
9578                 if (eeprom_phy_serdes) {
9579                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
9580                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9581                         else
9582                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9583                 }
9584
9585                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9586                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9587                                     SHASTA_EXT_LED_MODE_MASK);
9588                 else
9589                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9590
9591                 switch (led_cfg) {
9592                 default:
9593                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9594                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9595                         break;
9596
9597                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9598                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9599                         break;
9600
9601                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9602                         tp->led_ctrl = LED_CTRL_MODE_MAC;
9603
9604                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9605                          * read on some older 5700/5701 bootcode.
9606                          */
9607                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9608                             ASIC_REV_5700 ||
9609                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
9610                             ASIC_REV_5701)
9611                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9612
9613                         break;
9614
9615                 case SHASTA_EXT_LED_SHARED:
9616                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
9617                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9618                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9619                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9620                                                  LED_CTRL_MODE_PHY_2);
9621                         break;
9622
9623                 case SHASTA_EXT_LED_MAC:
9624                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9625                         break;
9626
9627                 case SHASTA_EXT_LED_COMBO:
9628                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
9629                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9630                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9631                                                  LED_CTRL_MODE_PHY_2);
9632                         break;
9633
9634                 };
9635
9636                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9637                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9638                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9639                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9640
9641                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9642                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9643                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
9644                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9645
9646                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9647                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
9648                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9649                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9650                 }
9651                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9652                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9653
9654                 if (cfg2 & (1 << 17))
9655                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9656
9657                 /* serdes signal pre-emphasis in register 0x590 set by */
9658                 /* bootcode if bit 18 is set */
9659                 if (cfg2 & (1 << 18))
9660                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9661         }
9662 }
9663
9664 static int __devinit tg3_phy_probe(struct tg3 *tp)
9665 {
9666         u32 hw_phy_id_1, hw_phy_id_2;
9667         u32 hw_phy_id, hw_phy_id_masked;
9668         int err;
9669
9670         /* Reading the PHY ID register can conflict with ASF
9671          * firwmare access to the PHY hardware.
9672          */
9673         err = 0;
9674         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9675                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9676         } else {
9677                 /* Now read the physical PHY_ID from the chip and verify
9678                  * that it is sane.  If it doesn't look good, we fall back
9679                  * to either the hard-coded table based PHY_ID and failing
9680                  * that the value found in the eeprom area.
9681                  */
9682                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9683                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9684
9685                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
9686                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9687                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
9688
9689                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9690         }
9691
9692         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9693                 tp->phy_id = hw_phy_id;
9694                 if (hw_phy_id_masked == PHY_ID_BCM8002)
9695                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9696                 else
9697                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
9698         } else {
9699                 if (tp->phy_id != PHY_ID_INVALID) {
9700                         /* Do nothing, phy ID already set up in
9701                          * tg3_get_eeprom_hw_cfg().
9702                          */
9703                 } else {
9704                         struct subsys_tbl_ent *p;
9705
9706                         /* No eeprom signature?  Try the hardcoded
9707                          * subsys device table.
9708                          */
9709                         p = lookup_by_subsys(tp);
9710                         if (!p)
9711                                 return -ENODEV;
9712
9713                         tp->phy_id = p->phy_id;
9714                         if (!tp->phy_id ||
9715                             tp->phy_id == PHY_ID_BCM8002)
9716                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9717                 }
9718         }
9719
9720         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
9721             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9722                 u32 bmsr, adv_reg, tg3_ctrl;
9723
9724                 tg3_readphy(tp, MII_BMSR, &bmsr);
9725                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9726                     (bmsr & BMSR_LSTATUS))
9727                         goto skip_phy_reset;
9728                     
9729                 err = tg3_phy_reset(tp);
9730                 if (err)
9731                         return err;
9732
9733                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9734                            ADVERTISE_100HALF | ADVERTISE_100FULL |
9735                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9736                 tg3_ctrl = 0;
9737                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9738                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9739                                     MII_TG3_CTRL_ADV_1000_FULL);
9740                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9741                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9742                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9743                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
9744                 }
9745
9746                 if (!tg3_copper_is_advertising_all(tp)) {
9747                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9748
9749                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9750                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9751
9752                         tg3_writephy(tp, MII_BMCR,
9753                                      BMCR_ANENABLE | BMCR_ANRESTART);
9754                 }
9755                 tg3_phy_set_wirespeed(tp);
9756
9757                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9758                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9759                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9760         }
9761
9762 skip_phy_reset:
9763         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9764                 err = tg3_init_5401phy_dsp(tp);
9765                 if (err)
9766                         return err;
9767         }
9768
9769         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9770                 err = tg3_init_5401phy_dsp(tp);
9771         }
9772
9773         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9774                 tp->link_config.advertising =
9775                         (ADVERTISED_1000baseT_Half |
9776                          ADVERTISED_1000baseT_Full |
9777                          ADVERTISED_Autoneg |
9778                          ADVERTISED_FIBRE);
9779         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9780                 tp->link_config.advertising &=
9781                         ~(ADVERTISED_1000baseT_Half |
9782                           ADVERTISED_1000baseT_Full);
9783
9784         return err;
9785 }
9786
9787 static void __devinit tg3_read_partno(struct tg3 *tp)
9788 {
9789         unsigned char vpd_data[256];
9790         int i;
9791         u32 magic;
9792
9793         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9794                 /* Sun decided not to put the necessary bits in the
9795                  * NVRAM of their onboard tg3 parts :(
9796                  */
9797                 strcpy(tp->board_part_number, "Sun 570X");
9798                 return;
9799         }
9800
9801         if (tg3_nvram_read_swab(tp, 0x0, &magic))
9802                 return;
9803
9804         if (magic == TG3_EEPROM_MAGIC) {
9805                 for (i = 0; i < 256; i += 4) {
9806                         u32 tmp;
9807
9808                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9809                                 goto out_not_found;
9810
9811                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
9812                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
9813                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9814                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9815                 }
9816         } else {
9817                 int vpd_cap;
9818
9819                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
9820                 for (i = 0; i < 256; i += 4) {
9821                         u32 tmp, j = 0;
9822                         u16 tmp16;
9823
9824                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
9825                                               i);
9826                         while (j++ < 100) {
9827                                 pci_read_config_word(tp->pdev, vpd_cap +
9828                                                      PCI_VPD_ADDR, &tmp16);
9829                                 if (tmp16 & 0x8000)
9830                                         break;
9831                                 msleep(1);
9832                         }
9833                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
9834                                               &tmp);
9835                         tmp = cpu_to_le32(tmp);
9836                         memcpy(&vpd_data[i], &tmp, 4);
9837                 }
9838         }
9839
9840         /* Now parse and find the part number. */
9841         for (i = 0; i < 256; ) {
9842                 unsigned char val = vpd_data[i];
9843                 int block_end;
9844
9845                 if (val == 0x82 || val == 0x91) {
9846                         i = (i + 3 +
9847                              (vpd_data[i + 1] +
9848                               (vpd_data[i + 2] << 8)));
9849                         continue;
9850                 }
9851
9852                 if (val != 0x90)
9853                         goto out_not_found;
9854
9855                 block_end = (i + 3 +
9856                              (vpd_data[i + 1] +
9857                               (vpd_data[i + 2] << 8)));
9858                 i += 3;
9859                 while (i < block_end) {
9860                         if (vpd_data[i + 0] == 'P' &&
9861                             vpd_data[i + 1] == 'N') {
9862                                 int partno_len = vpd_data[i + 2];
9863
9864                                 if (partno_len > 24)
9865                                         goto out_not_found;
9866
9867                                 memcpy(tp->board_part_number,
9868                                        &vpd_data[i + 3],
9869                                        partno_len);
9870
9871                                 /* Success. */
9872                                 return;
9873                         }
9874                 }
9875
9876                 /* Part number not found. */
9877                 goto out_not_found;
9878         }
9879
9880 out_not_found:
9881         strcpy(tp->board_part_number, "none");
9882 }
9883
9884 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
9885 {
9886         u32 val, offset, start;
9887
9888         if (tg3_nvram_read_swab(tp, 0, &val))
9889                 return;
9890
9891         if (val != TG3_EEPROM_MAGIC)
9892                 return;
9893
9894         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
9895             tg3_nvram_read_swab(tp, 0x4, &start))
9896                 return;
9897
9898         offset = tg3_nvram_logical_addr(tp, offset);
9899         if (tg3_nvram_read_swab(tp, offset, &val))
9900                 return;
9901
9902         if ((val & 0xfc000000) == 0x0c000000) {
9903                 u32 ver_offset, addr;
9904                 int i;
9905
9906                 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
9907                     tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
9908                         return;
9909
9910                 if (val != 0)
9911                         return;
9912
9913                 addr = offset + ver_offset - start;
9914                 for (i = 0; i < 16; i += 4) {
9915                         if (tg3_nvram_read(tp, addr + i, &val))
9916                                 return;
9917
9918                         val = cpu_to_le32(val);
9919                         memcpy(tp->fw_ver + i, &val, 4);
9920                 }
9921         }
9922 }
9923
9924 #ifdef CONFIG_SPARC64
9925 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
9926 {
9927         struct pci_dev *pdev = tp->pdev;
9928         struct pcidev_cookie *pcp = pdev->sysdata;
9929
9930         if (pcp != NULL) {
9931                 int node = pcp->prom_node;
9932                 u32 venid;
9933                 int err;
9934
9935                 err = prom_getproperty(node, "subsystem-vendor-id",
9936                                        (char *) &venid, sizeof(venid));
9937                 if (err == 0 || err == -1)
9938                         return 0;
9939                 if (venid == PCI_VENDOR_ID_SUN)
9940                         return 1;
9941
9942                 /* TG3 chips onboard the SunBlade-2500 don't have the
9943                  * subsystem-vendor-id set to PCI_VENDOR_ID_SUN but they
9944                  * are distinguishable from non-Sun variants by being
9945                  * named "network" by the firmware.  Non-Sun cards will
9946                  * show up as being named "ethernet".
9947                  */
9948                 if (!strcmp(pcp->prom_name, "network"))
9949                         return 1;
9950         }
9951         return 0;
9952 }
9953 #endif
9954
9955 static int __devinit tg3_get_invariants(struct tg3 *tp)
9956 {
9957         static struct pci_device_id write_reorder_chipsets[] = {
9958                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9959                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
9960                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
9961                              PCI_DEVICE_ID_VIA_8385_0) },
9962                 { },
9963         };
9964         u32 misc_ctrl_reg;
9965         u32 cacheline_sz_reg;
9966         u32 pci_state_reg, grc_misc_cfg;
9967         u32 val;
9968         u16 pci_cmd;
9969         int err;
9970
9971 #ifdef CONFIG_SPARC64
9972         if (tg3_is_sun_570X(tp))
9973                 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
9974 #endif
9975
9976         /* Force memory write invalidate off.  If we leave it on,
9977          * then on 5700_BX chips we have to enable a workaround.
9978          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
9979          * to match the cacheline size.  The Broadcom driver have this
9980          * workaround but turns MWI off all the times so never uses
9981          * it.  This seems to suggest that the workaround is insufficient.
9982          */
9983         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9984         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
9985         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9986
9987         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
9988          * has the register indirect write enable bit set before
9989          * we try to access any of the MMIO registers.  It is also
9990          * critical that the PCI-X hw workaround situation is decided
9991          * before that as well.
9992          */
9993         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9994                               &misc_ctrl_reg);
9995
9996         tp->pci_chip_rev_id = (misc_ctrl_reg >>
9997                                MISC_HOST_CTRL_CHIPREV_SHIFT);
9998
9999         /* Wrong chip ID in 5752 A0. This code can be removed later
10000          * as A0 is not in production.
10001          */
10002         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10003                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10004
10005         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10006          * we need to disable memory and use config. cycles
10007          * only to access all registers. The 5702/03 chips
10008          * can mistakenly decode the special cycles from the
10009          * ICH chipsets as memory write cycles, causing corruption
10010          * of register and memory space. Only certain ICH bridges
10011          * will drive special cycles with non-zero data during the
10012          * address phase which can fall within the 5703's address
10013          * range. This is not an ICH bug as the PCI spec allows
10014          * non-zero address during special cycles. However, only
10015          * these ICH bridges are known to drive non-zero addresses
10016          * during special cycles.
10017          *
10018          * Since special cycles do not cross PCI bridges, we only
10019          * enable this workaround if the 5703 is on the secondary
10020          * bus of these ICH bridges.
10021          */
10022         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10023             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10024                 static struct tg3_dev_id {
10025                         u32     vendor;
10026                         u32     device;
10027                         u32     rev;
10028                 } ich_chipsets[] = {
10029                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10030                           PCI_ANY_ID },
10031                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10032                           PCI_ANY_ID },
10033                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10034                           0xa },
10035                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10036                           PCI_ANY_ID },
10037                         { },
10038                 };
10039                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10040                 struct pci_dev *bridge = NULL;
10041
10042                 while (pci_id->vendor != 0) {
10043                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
10044                                                 bridge);
10045                         if (!bridge) {
10046                                 pci_id++;
10047                                 continue;
10048                         }
10049                         if (pci_id->rev != PCI_ANY_ID) {
10050                                 u8 rev;
10051
10052                                 pci_read_config_byte(bridge, PCI_REVISION_ID,
10053                                                      &rev);
10054                                 if (rev > pci_id->rev)
10055                                         continue;
10056                         }
10057                         if (bridge->subordinate &&
10058                             (bridge->subordinate->number ==
10059                              tp->pdev->bus->number)) {
10060
10061                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10062                                 pci_dev_put(bridge);
10063                                 break;
10064                         }
10065                 }
10066         }
10067
10068         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10069          * DMA addresses > 40-bit. This bridge may have other additional
10070          * 57xx devices behind it in some 4-port NIC designs for example.
10071          * Any tg3 device found behind the bridge will also need the 40-bit
10072          * DMA workaround.
10073          */
10074         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10075             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10076                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
10077                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10078                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
10079         }
10080         else {
10081                 struct pci_dev *bridge = NULL;
10082
10083                 do {
10084                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10085                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
10086                                                 bridge);
10087                         if (bridge && bridge->subordinate &&
10088                             (bridge->subordinate->number <=
10089                              tp->pdev->bus->number) &&
10090                             (bridge->subordinate->subordinate >=
10091                              tp->pdev->bus->number)) {
10092                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10093                                 pci_dev_put(bridge);
10094                                 break;
10095                         }
10096                 } while (bridge);
10097         }
10098
10099         /* Initialize misc host control in PCI block. */
10100         tp->misc_host_ctrl |= (misc_ctrl_reg &
10101                                MISC_HOST_CTRL_CHIPREV);
10102         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10103                                tp->misc_host_ctrl);
10104
10105         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10106                               &cacheline_sz_reg);
10107
10108         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
10109         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
10110         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
10111         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
10112
10113         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10114             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10115             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10116             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10117             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10118                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10119
10120         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10121             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10122                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10123
10124         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
10125                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10126                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
10127                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
10128                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
10129                 } else
10130                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1;
10131         }
10132
10133         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10134             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
10135             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
10136             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
10137             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)
10138                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10139
10140         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
10141                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
10142
10143         /* If we have an AMD 762 or VIA K8T800 chipset, write
10144          * reordering to the mailbox registers done by the host
10145          * controller can cause major troubles.  We read back from
10146          * every mailbox register write to force the writes to be
10147          * posted to the chip in order.
10148          */
10149         if (pci_dev_present(write_reorder_chipsets) &&
10150             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10151                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
10152
10153         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10154             tp->pci_lat_timer < 64) {
10155                 tp->pci_lat_timer = 64;
10156
10157                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
10158                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
10159                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
10160                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
10161
10162                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10163                                        cacheline_sz_reg);
10164         }
10165
10166         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10167                               &pci_state_reg);
10168
10169         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
10170                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
10171
10172                 /* If this is a 5700 BX chipset, and we are in PCI-X
10173                  * mode, enable register write workaround.
10174                  *
10175                  * The workaround is to use indirect register accesses
10176                  * for all chip writes not to mailbox registers.
10177                  */
10178                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
10179                         u32 pm_reg;
10180                         u16 pci_cmd;
10181
10182                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10183
10184                         /* The chip can have it's power management PCI config
10185                          * space registers clobbered due to this bug.
10186                          * So explicitly force the chip into D0 here.
10187                          */
10188                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10189                                               &pm_reg);
10190                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10191                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10192                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10193                                                pm_reg);
10194
10195                         /* Also, force SERR#/PERR# in PCI command. */
10196                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10197                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10198                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10199                 }
10200         }
10201
10202         /* 5700 BX chips need to have their TX producer index mailboxes
10203          * written twice to workaround a bug.
10204          */
10205         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10206                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10207
10208         /* Back to back register writes can cause problems on this chip,
10209          * the workaround is to read back all reg writes except those to
10210          * mailbox regs.  See tg3_write_indirect_reg32().
10211          *
10212          * PCI Express 5750_A0 rev chips need this workaround too.
10213          */
10214         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10215             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10216              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
10217                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
10218
10219         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10220                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10221         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10222                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10223
10224         /* Chip-specific fixup from Broadcom driver */
10225         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10226             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10227                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10228                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10229         }
10230
10231         /* Default fast path register access methods */
10232         tp->read32 = tg3_read32;
10233         tp->write32 = tg3_write32;
10234         tp->read32_mbox = tg3_read32;
10235         tp->write32_mbox = tg3_write32;
10236         tp->write32_tx_mbox = tg3_write32;
10237         tp->write32_rx_mbox = tg3_write32;
10238
10239         /* Various workaround register access methods */
10240         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10241                 tp->write32 = tg3_write_indirect_reg32;
10242         else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
10243                 tp->write32 = tg3_write_flush_reg32;
10244
10245         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10246             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10247                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10248                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10249                         tp->write32_rx_mbox = tg3_write_flush_reg32;
10250         }
10251
10252         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10253                 tp->read32 = tg3_read_indirect_reg32;
10254                 tp->write32 = tg3_write_indirect_reg32;
10255                 tp->read32_mbox = tg3_read_indirect_mbox;
10256                 tp->write32_mbox = tg3_write_indirect_mbox;
10257                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
10258                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
10259
10260                 iounmap(tp->regs);
10261                 tp->regs = NULL;
10262
10263                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10264                 pci_cmd &= ~PCI_COMMAND_MEMORY;
10265                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10266         }
10267
10268         /* Get eeprom hw config before calling tg3_set_power_state().
10269          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
10270          * determined before calling tg3_set_power_state() so that
10271          * we know whether or not to switch out of Vaux power.
10272          * When the flag is set, it means that GPIO1 is used for eeprom
10273          * write protect and also implies that it is a LOM where GPIOs
10274          * are not used to switch power.
10275          */ 
10276         tg3_get_eeprom_hw_cfg(tp);
10277
10278         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10279          * GPIO1 driven high will bring 5700's external PHY out of reset.
10280          * It is also used as eeprom write protect on LOMs.
10281          */
10282         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10283         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10284             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10285                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10286                                        GRC_LCLCTRL_GPIO_OUTPUT1);
10287         /* Unused GPIO3 must be driven as output on 5752 because there
10288          * are no pull-up resistors on unused GPIO pins.
10289          */
10290         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10291                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
10292
10293         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10294                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
10295
10296         /* Force the chip into D0. */
10297         err = tg3_set_power_state(tp, PCI_D0);
10298         if (err) {
10299                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10300                        pci_name(tp->pdev));
10301                 return err;
10302         }
10303
10304         /* 5700 B0 chips do not support checksumming correctly due
10305          * to hardware bugs.
10306          */
10307         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10308                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10309
10310         /* Derive initial jumbo mode from MTU assigned in
10311          * ether_setup() via the alloc_etherdev() call
10312          */
10313         if (tp->dev->mtu > ETH_DATA_LEN &&
10314             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10315                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
10316
10317         /* Determine WakeOnLan speed to use. */
10318         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10319             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10320             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10321             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10322                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10323         } else {
10324                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10325         }
10326
10327         /* A few boards don't want Ethernet@WireSpeed phy feature */
10328         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10329             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10330              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
10331              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10332             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
10333                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10334
10335         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10336             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10337                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10338         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10339                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10340
10341         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
10342             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
10343             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787))
10344                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10345
10346         tp->coalesce_mode = 0;
10347         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10348             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10349                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10350
10351         /* Initialize MAC MI mode, polling disabled. */
10352         tw32_f(MAC_MI_MODE, tp->mi_mode);
10353         udelay(80);
10354
10355         /* Initialize data/descriptor byte/word swapping. */
10356         val = tr32(GRC_MODE);
10357         val &= GRC_MODE_HOST_STACKUP;
10358         tw32(GRC_MODE, val | tp->grc_mode);
10359
10360         tg3_switch_clocks(tp);
10361
10362         /* Clear this out for sanity. */
10363         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10364
10365         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10366                               &pci_state_reg);
10367         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10368             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10369                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10370
10371                 if (chiprevid == CHIPREV_ID_5701_A0 ||
10372                     chiprevid == CHIPREV_ID_5701_B0 ||
10373                     chiprevid == CHIPREV_ID_5701_B2 ||
10374                     chiprevid == CHIPREV_ID_5701_B5) {
10375                         void __iomem *sram_base;
10376
10377                         /* Write some dummy words into the SRAM status block
10378                          * area, see if it reads back correctly.  If the return
10379                          * value is bad, force enable the PCIX workaround.
10380                          */
10381                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10382
10383                         writel(0x00000000, sram_base);
10384                         writel(0x00000000, sram_base + 4);
10385                         writel(0xffffffff, sram_base + 4);
10386                         if (readl(sram_base) != 0x00000000)
10387                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10388                 }
10389         }
10390
10391         udelay(50);
10392         tg3_nvram_init(tp);
10393
10394         grc_misc_cfg = tr32(GRC_MISC_CFG);
10395         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10396
10397         /* Broadcom's driver says that CIOBE multisplit has a bug */
10398 #if 0
10399         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10400             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
10401                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
10402                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
10403         }
10404 #endif
10405         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10406             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
10407              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
10408                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
10409
10410         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10411             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
10412                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
10413         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
10414                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
10415                                       HOSTCC_MODE_CLRTICK_TXBD);
10416
10417                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
10418                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10419                                        tp->misc_host_ctrl);
10420         }
10421
10422         /* these are limited to 10/100 only */
10423         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10424              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
10425             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10426              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10427              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
10428               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
10429               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
10430             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10431              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
10432               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
10433                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
10434
10435         err = tg3_phy_probe(tp);
10436         if (err) {
10437                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10438                        pci_name(tp->pdev), err);
10439                 /* ... but do not return immediately ... */
10440         }
10441
10442         tg3_read_partno(tp);
10443         tg3_read_fw_ver(tp);
10444
10445         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10446                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10447         } else {
10448                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10449                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10450                 else
10451                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10452         }
10453
10454         /* 5700 {AX,BX} chips have a broken status block link
10455          * change bit implementation, so we must use the
10456          * status register in those cases.
10457          */
10458         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10459                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10460         else
10461                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10462
10463         /* The led_ctrl is set during tg3_phy_probe, here we might
10464          * have to force the link status polling mechanism based
10465          * upon subsystem IDs.
10466          */
10467         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10468             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10469                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10470                                   TG3_FLAG_USE_LINKCHG_REG);
10471         }
10472
10473         /* For all SERDES we poll the MAC status register. */
10474         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10475                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10476         else
10477                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10478
10479         /* All chips before 5787 can get confused if TX buffers
10480          * straddle the 4GB address boundary in some cases.
10481          */
10482         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10483             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10484                 tp->dev->hard_start_xmit = tg3_start_xmit;
10485         else
10486                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
10487
10488         tp->rx_offset = 2;
10489         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10490             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10491                 tp->rx_offset = 0;
10492
10493         /* By default, disable wake-on-lan.  User can change this
10494          * using ETHTOOL_SWOL.
10495          */
10496         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10497
10498         return err;
10499 }
10500
10501 #ifdef CONFIG_SPARC64
10502 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10503 {
10504         struct net_device *dev = tp->dev;
10505         struct pci_dev *pdev = tp->pdev;
10506         struct pcidev_cookie *pcp = pdev->sysdata;
10507
10508         if (pcp != NULL) {
10509                 int node = pcp->prom_node;
10510
10511                 if (prom_getproplen(node, "local-mac-address") == 6) {
10512                         prom_getproperty(node, "local-mac-address",
10513                                          dev->dev_addr, 6);
10514                         memcpy(dev->perm_addr, dev->dev_addr, 6);
10515                         return 0;
10516                 }
10517         }
10518         return -ENODEV;
10519 }
10520
10521 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
10522 {
10523         struct net_device *dev = tp->dev;
10524
10525         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
10526         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
10527         return 0;
10528 }
10529 #endif
10530
10531 static int __devinit tg3_get_device_address(struct tg3 *tp)
10532 {
10533         struct net_device *dev = tp->dev;
10534         u32 hi, lo, mac_offset;
10535         int addr_ok = 0;
10536
10537 #ifdef CONFIG_SPARC64
10538         if (!tg3_get_macaddr_sparc(tp))
10539                 return 0;
10540 #endif
10541
10542         mac_offset = 0x7c;
10543         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10544              !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
10545             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10546                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10547                         mac_offset = 0xcc;
10548                 if (tg3_nvram_lock(tp))
10549                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
10550                 else
10551                         tg3_nvram_unlock(tp);
10552         }
10553
10554         /* First try to get it from MAC address mailbox. */
10555         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
10556         if ((hi >> 16) == 0x484b) {
10557                 dev->dev_addr[0] = (hi >>  8) & 0xff;
10558                 dev->dev_addr[1] = (hi >>  0) & 0xff;
10559
10560                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
10561                 dev->dev_addr[2] = (lo >> 24) & 0xff;
10562                 dev->dev_addr[3] = (lo >> 16) & 0xff;
10563                 dev->dev_addr[4] = (lo >>  8) & 0xff;
10564                 dev->dev_addr[5] = (lo >>  0) & 0xff;
10565
10566                 /* Some old bootcode may report a 0 MAC address in SRAM */
10567                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
10568         }
10569         if (!addr_ok) {
10570                 /* Next, try NVRAM. */
10571                 if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
10572                     !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
10573                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10574                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
10575                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
10576                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
10577                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
10578                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
10579                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
10580                 }
10581                 /* Finally just fetch it out of the MAC control regs. */
10582                 else {
10583                         hi = tr32(MAC_ADDR_0_HIGH);
10584                         lo = tr32(MAC_ADDR_0_LOW);
10585
10586                         dev->dev_addr[5] = lo & 0xff;
10587                         dev->dev_addr[4] = (lo >> 8) & 0xff;
10588                         dev->dev_addr[3] = (lo >> 16) & 0xff;
10589                         dev->dev_addr[2] = (lo >> 24) & 0xff;
10590                         dev->dev_addr[1] = hi & 0xff;
10591                         dev->dev_addr[0] = (hi >> 8) & 0xff;
10592                 }
10593         }
10594
10595         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
10596 #ifdef CONFIG_SPARC64
10597                 if (!tg3_get_default_macaddr_sparc(tp))
10598                         return 0;
10599 #endif
10600                 return -EINVAL;
10601         }
10602         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10603         return 0;
10604 }
10605
10606 #define BOUNDARY_SINGLE_CACHELINE       1
10607 #define BOUNDARY_MULTI_CACHELINE        2
10608
10609 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
10610 {
10611         int cacheline_size;
10612         u8 byte;
10613         int goal;
10614
10615         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
10616         if (byte == 0)
10617                 cacheline_size = 1024;
10618         else
10619                 cacheline_size = (int) byte * 4;
10620
10621         /* On 5703 and later chips, the boundary bits have no
10622          * effect.
10623          */
10624         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10625             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
10626             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10627                 goto out;
10628
10629 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10630         goal = BOUNDARY_MULTI_CACHELINE;
10631 #else
10632 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10633         goal = BOUNDARY_SINGLE_CACHELINE;
10634 #else
10635         goal = 0;
10636 #endif
10637 #endif
10638
10639         if (!goal)
10640                 goto out;
10641
10642         /* PCI controllers on most RISC systems tend to disconnect
10643          * when a device tries to burst across a cache-line boundary.
10644          * Therefore, letting tg3 do so just wastes PCI bandwidth.
10645          *
10646          * Unfortunately, for PCI-E there are only limited
10647          * write-side controls for this, and thus for reads
10648          * we will still get the disconnects.  We'll also waste
10649          * these PCI cycles for both read and write for chips
10650          * other than 5700 and 5701 which do not implement the
10651          * boundary bits.
10652          */
10653         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10654             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
10655                 switch (cacheline_size) {
10656                 case 16:
10657                 case 32:
10658                 case 64:
10659                 case 128:
10660                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10661                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
10662                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
10663                         } else {
10664                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10665                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10666                         }
10667                         break;
10668
10669                 case 256:
10670                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
10671                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
10672                         break;
10673
10674                 default:
10675                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10676                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10677                         break;
10678                 };
10679         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10680                 switch (cacheline_size) {
10681                 case 16:
10682                 case 32:
10683                 case 64:
10684                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10685                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10686                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
10687                                 break;
10688                         }
10689                         /* fallthrough */
10690                 case 128:
10691                 default:
10692                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10693                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10694                         break;
10695                 };
10696         } else {
10697                 switch (cacheline_size) {
10698                 case 16:
10699                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10700                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10701                                         DMA_RWCTRL_WRITE_BNDRY_16);
10702                                 break;
10703                         }
10704                         /* fallthrough */
10705                 case 32:
10706                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10707                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10708                                         DMA_RWCTRL_WRITE_BNDRY_32);
10709                                 break;
10710                         }
10711                         /* fallthrough */
10712                 case 64:
10713                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10714                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10715                                         DMA_RWCTRL_WRITE_BNDRY_64);
10716                                 break;
10717                         }
10718                         /* fallthrough */
10719                 case 128:
10720                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10721                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10722                                         DMA_RWCTRL_WRITE_BNDRY_128);
10723                                 break;
10724                         }
10725                         /* fallthrough */
10726                 case 256:
10727                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
10728                                 DMA_RWCTRL_WRITE_BNDRY_256);
10729                         break;
10730                 case 512:
10731                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
10732                                 DMA_RWCTRL_WRITE_BNDRY_512);
10733                         break;
10734                 case 1024:
10735                 default:
10736                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10737                                 DMA_RWCTRL_WRITE_BNDRY_1024);
10738                         break;
10739                 };
10740         }
10741
10742 out:
10743         return val;
10744 }
10745
10746 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10747 {
10748         struct tg3_internal_buffer_desc test_desc;
10749         u32 sram_dma_descs;
10750         int i, ret;
10751
10752         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10753
10754         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10755         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10756         tw32(RDMAC_STATUS, 0);
10757         tw32(WDMAC_STATUS, 0);
10758
10759         tw32(BUFMGR_MODE, 0);
10760         tw32(FTQ_RESET, 0);
10761
10762         test_desc.addr_hi = ((u64) buf_dma) >> 32;
10763         test_desc.addr_lo = buf_dma & 0xffffffff;
10764         test_desc.nic_mbuf = 0x00002100;
10765         test_desc.len = size;
10766
10767         /*
10768          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10769          * the *second* time the tg3 driver was getting loaded after an
10770          * initial scan.
10771          *
10772          * Broadcom tells me:
10773          *   ...the DMA engine is connected to the GRC block and a DMA
10774          *   reset may affect the GRC block in some unpredictable way...
10775          *   The behavior of resets to individual blocks has not been tested.
10776          *
10777          * Broadcom noted the GRC reset will also reset all sub-components.
10778          */
10779         if (to_device) {
10780                 test_desc.cqid_sqid = (13 << 8) | 2;
10781
10782                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10783                 udelay(40);
10784         } else {
10785                 test_desc.cqid_sqid = (16 << 8) | 7;
10786
10787                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10788                 udelay(40);
10789         }
10790         test_desc.flags = 0x00000005;
10791
10792         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10793                 u32 val;
10794
10795                 val = *(((u32 *)&test_desc) + i);
10796                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10797                                        sram_dma_descs + (i * sizeof(u32)));
10798                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10799         }
10800         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10801
10802         if (to_device) {
10803                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10804         } else {
10805                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
10806         }
10807
10808         ret = -ENODEV;
10809         for (i = 0; i < 40; i++) {
10810                 u32 val;
10811
10812                 if (to_device)
10813                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
10814                 else
10815                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
10816                 if ((val & 0xffff) == sram_dma_descs) {
10817                         ret = 0;
10818                         break;
10819                 }
10820
10821                 udelay(100);
10822         }
10823
10824         return ret;
10825 }
10826
10827 #define TEST_BUFFER_SIZE        0x2000
10828
10829 static int __devinit tg3_test_dma(struct tg3 *tp)
10830 {
10831         dma_addr_t buf_dma;
10832         u32 *buf, saved_dma_rwctrl;
10833         int ret;
10834
10835         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
10836         if (!buf) {
10837                 ret = -ENOMEM;
10838                 goto out_nofree;
10839         }
10840
10841         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
10842                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
10843
10844         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
10845
10846         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10847                 /* DMA read watermark not used on PCIE */
10848                 tp->dma_rwctrl |= 0x00180000;
10849         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
10850                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
10851                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
10852                         tp->dma_rwctrl |= 0x003f0000;
10853                 else
10854                         tp->dma_rwctrl |= 0x003f000f;
10855         } else {
10856                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10857                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
10858                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
10859
10860                         /* If the 5704 is behind the EPB bridge, we can
10861                          * do the less restrictive ONE_DMA workaround for
10862                          * better performance.
10863                          */
10864                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
10865                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10866                                 tp->dma_rwctrl |= 0x8000;
10867                         else if (ccval == 0x6 || ccval == 0x7)
10868                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
10869
10870                         /* Set bit 23 to enable PCIX hw bug fix */
10871                         tp->dma_rwctrl |= 0x009f0000;
10872                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
10873                         /* 5780 always in PCIX mode */
10874                         tp->dma_rwctrl |= 0x00144000;
10875                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10876                         /* 5714 always in PCIX mode */
10877                         tp->dma_rwctrl |= 0x00148000;
10878                 } else {
10879                         tp->dma_rwctrl |= 0x001b000f;
10880                 }
10881         }
10882
10883         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10884             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10885                 tp->dma_rwctrl &= 0xfffffff0;
10886
10887         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10888             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
10889                 /* Remove this if it causes problems for some boards. */
10890                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
10891
10892                 /* On 5700/5701 chips, we need to set this bit.
10893                  * Otherwise the chip will issue cacheline transactions
10894                  * to streamable DMA memory with not all the byte
10895                  * enables turned on.  This is an error on several
10896                  * RISC PCI controllers, in particular sparc64.
10897                  *
10898                  * On 5703/5704 chips, this bit has been reassigned
10899                  * a different meaning.  In particular, it is used
10900                  * on those chips to enable a PCI-X workaround.
10901                  */
10902                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
10903         }
10904
10905         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10906
10907 #if 0
10908         /* Unneeded, already done by tg3_get_invariants.  */
10909         tg3_switch_clocks(tp);
10910 #endif
10911
10912         ret = 0;
10913         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10914             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
10915                 goto out;
10916
10917         /* It is best to perform DMA test with maximum write burst size
10918          * to expose the 5700/5701 write DMA bug.
10919          */
10920         saved_dma_rwctrl = tp->dma_rwctrl;
10921         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10922         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10923
10924         while (1) {
10925                 u32 *p = buf, i;
10926
10927                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
10928                         p[i] = i;
10929
10930                 /* Send the buffer to the chip. */
10931                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
10932                 if (ret) {
10933                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
10934                         break;
10935                 }
10936
10937 #if 0
10938                 /* validate data reached card RAM correctly. */
10939                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10940                         u32 val;
10941                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
10942                         if (le32_to_cpu(val) != p[i]) {
10943                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
10944                                 /* ret = -ENODEV here? */
10945                         }
10946                         p[i] = 0;
10947                 }
10948 #endif
10949                 /* Now read it back. */
10950                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
10951                 if (ret) {
10952                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
10953
10954                         break;
10955                 }
10956
10957                 /* Verify it. */
10958                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10959                         if (p[i] == i)
10960                                 continue;
10961
10962                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10963                             DMA_RWCTRL_WRITE_BNDRY_16) {
10964                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10965                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10966                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10967                                 break;
10968                         } else {
10969                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
10970                                 ret = -ENODEV;
10971                                 goto out;
10972                         }
10973                 }
10974
10975                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
10976                         /* Success. */
10977                         ret = 0;
10978                         break;
10979                 }
10980         }
10981         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10982             DMA_RWCTRL_WRITE_BNDRY_16) {
10983                 static struct pci_device_id dma_wait_state_chipsets[] = {
10984                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
10985                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
10986                         { },
10987                 };
10988
10989                 /* DMA test passed without adjusting DMA boundary,
10990                  * now look for chipsets that are known to expose the
10991                  * DMA bug without failing the test.
10992                  */
10993                 if (pci_dev_present(dma_wait_state_chipsets)) {
10994                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10995                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10996                 }
10997                 else
10998                         /* Safe to use the calculated DMA boundary. */
10999                         tp->dma_rwctrl = saved_dma_rwctrl;
11000
11001                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11002         }
11003
11004 out:
11005         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
11006 out_nofree:
11007         return ret;
11008 }
11009
11010 static void __devinit tg3_init_link_config(struct tg3 *tp)
11011 {
11012         tp->link_config.advertising =
11013                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11014                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11015                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11016                  ADVERTISED_Autoneg | ADVERTISED_MII);
11017         tp->link_config.speed = SPEED_INVALID;
11018         tp->link_config.duplex = DUPLEX_INVALID;
11019         tp->link_config.autoneg = AUTONEG_ENABLE;
11020         tp->link_config.active_speed = SPEED_INVALID;
11021         tp->link_config.active_duplex = DUPLEX_INVALID;
11022         tp->link_config.phy_is_low_power = 0;
11023         tp->link_config.orig_speed = SPEED_INVALID;
11024         tp->link_config.orig_duplex = DUPLEX_INVALID;
11025         tp->link_config.orig_autoneg = AUTONEG_INVALID;
11026 }
11027
11028 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
11029 {
11030         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11031                 tp->bufmgr_config.mbuf_read_dma_low_water =
11032                         DEFAULT_MB_RDMA_LOW_WATER_5705;
11033                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11034                         DEFAULT_MB_MACRX_LOW_WATER_5705;
11035                 tp->bufmgr_config.mbuf_high_water =
11036                         DEFAULT_MB_HIGH_WATER_5705;
11037
11038                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11039                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
11040                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11041                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
11042                 tp->bufmgr_config.mbuf_high_water_jumbo =
11043                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
11044         } else {
11045                 tp->bufmgr_config.mbuf_read_dma_low_water =
11046                         DEFAULT_MB_RDMA_LOW_WATER;
11047                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11048                         DEFAULT_MB_MACRX_LOW_WATER;
11049                 tp->bufmgr_config.mbuf_high_water =
11050                         DEFAULT_MB_HIGH_WATER;
11051
11052                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11053                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
11054                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11055                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
11056                 tp->bufmgr_config.mbuf_high_water_jumbo =
11057                         DEFAULT_MB_HIGH_WATER_JUMBO;
11058         }
11059
11060         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
11061         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
11062 }
11063
11064 static char * __devinit tg3_phy_string(struct tg3 *tp)
11065 {
11066         switch (tp->phy_id & PHY_ID_MASK) {
11067         case PHY_ID_BCM5400:    return "5400";
11068         case PHY_ID_BCM5401:    return "5401";
11069         case PHY_ID_BCM5411:    return "5411";
11070         case PHY_ID_BCM5701:    return "5701";
11071         case PHY_ID_BCM5703:    return "5703";
11072         case PHY_ID_BCM5704:    return "5704";
11073         case PHY_ID_BCM5705:    return "5705";
11074         case PHY_ID_BCM5750:    return "5750";
11075         case PHY_ID_BCM5752:    return "5752";
11076         case PHY_ID_BCM5714:    return "5714";
11077         case PHY_ID_BCM5780:    return "5780";
11078         case PHY_ID_BCM5755:    return "5755";
11079         case PHY_ID_BCM5787:    return "5787";
11080         case PHY_ID_BCM8002:    return "8002/serdes";
11081         case 0:                 return "serdes";
11082         default:                return "unknown";
11083         };
11084 }
11085
11086 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
11087 {
11088         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11089                 strcpy(str, "PCI Express");
11090                 return str;
11091         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11092                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
11093
11094                 strcpy(str, "PCIX:");
11095
11096                 if ((clock_ctrl == 7) ||
11097                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
11098                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
11099                         strcat(str, "133MHz");
11100                 else if (clock_ctrl == 0)
11101                         strcat(str, "33MHz");
11102                 else if (clock_ctrl == 2)
11103                         strcat(str, "50MHz");
11104                 else if (clock_ctrl == 4)
11105                         strcat(str, "66MHz");
11106                 else if (clock_ctrl == 6)
11107                         strcat(str, "100MHz");
11108         } else {
11109                 strcpy(str, "PCI:");
11110                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
11111                         strcat(str, "66MHz");
11112                 else
11113                         strcat(str, "33MHz");
11114         }
11115         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
11116                 strcat(str, ":32-bit");
11117         else
11118                 strcat(str, ":64-bit");
11119         return str;
11120 }
11121
11122 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
11123 {
11124         struct pci_dev *peer;
11125         unsigned int func, devnr = tp->pdev->devfn & ~7;
11126
11127         for (func = 0; func < 8; func++) {
11128                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
11129                 if (peer && peer != tp->pdev)
11130                         break;
11131                 pci_dev_put(peer);
11132         }
11133         /* 5704 can be configured in single-port mode, set peer to
11134          * tp->pdev in that case.
11135          */
11136         if (!peer) {
11137                 peer = tp->pdev;
11138                 return peer;
11139         }
11140
11141         /*
11142          * We don't need to keep the refcount elevated; there's no way
11143          * to remove one half of this device without removing the other
11144          */
11145         pci_dev_put(peer);
11146
11147         return peer;
11148 }
11149
11150 static void __devinit tg3_init_coal(struct tg3 *tp)
11151 {
11152         struct ethtool_coalesce *ec = &tp->coal;
11153
11154         memset(ec, 0, sizeof(*ec));
11155         ec->cmd = ETHTOOL_GCOALESCE;
11156         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
11157         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
11158         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
11159         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
11160         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
11161         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
11162         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
11163         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
11164         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
11165
11166         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
11167                                  HOSTCC_MODE_CLRTICK_TXBD)) {
11168                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
11169                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
11170                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
11171                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
11172         }
11173
11174         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11175                 ec->rx_coalesce_usecs_irq = 0;
11176                 ec->tx_coalesce_usecs_irq = 0;
11177                 ec->stats_block_coalesce_usecs = 0;
11178         }
11179 }
11180
11181 static int __devinit tg3_init_one(struct pci_dev *pdev,
11182                                   const struct pci_device_id *ent)
11183 {
11184         static int tg3_version_printed = 0;
11185         unsigned long tg3reg_base, tg3reg_len;
11186         struct net_device *dev;
11187         struct tg3 *tp;
11188         int i, err, pm_cap;
11189         char str[40];
11190         u64 dma_mask, persist_dma_mask;
11191
11192         if (tg3_version_printed++ == 0)
11193                 printk(KERN_INFO "%s", version);
11194
11195         err = pci_enable_device(pdev);
11196         if (err) {
11197                 printk(KERN_ERR PFX "Cannot enable PCI device, "
11198                        "aborting.\n");
11199                 return err;
11200         }
11201
11202         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11203                 printk(KERN_ERR PFX "Cannot find proper PCI device "
11204                        "base address, aborting.\n");
11205                 err = -ENODEV;
11206                 goto err_out_disable_pdev;
11207         }
11208
11209         err = pci_request_regions(pdev, DRV_MODULE_NAME);
11210         if (err) {
11211                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11212                        "aborting.\n");
11213                 goto err_out_disable_pdev;
11214         }
11215
11216         pci_set_master(pdev);
11217
11218         /* Find power-management capability. */
11219         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11220         if (pm_cap == 0) {
11221                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11222                        "aborting.\n");
11223                 err = -EIO;
11224                 goto err_out_free_res;
11225         }
11226
11227         tg3reg_base = pci_resource_start(pdev, 0);
11228         tg3reg_len = pci_resource_len(pdev, 0);
11229
11230         dev = alloc_etherdev(sizeof(*tp));
11231         if (!dev) {
11232                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11233                 err = -ENOMEM;
11234                 goto err_out_free_res;
11235         }
11236
11237         SET_MODULE_OWNER(dev);
11238         SET_NETDEV_DEV(dev, &pdev->dev);
11239
11240         dev->features |= NETIF_F_LLTX;
11241 #if TG3_VLAN_TAG_USED
11242         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11243         dev->vlan_rx_register = tg3_vlan_rx_register;
11244         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
11245 #endif
11246
11247         tp = netdev_priv(dev);
11248         tp->pdev = pdev;
11249         tp->dev = dev;
11250         tp->pm_cap = pm_cap;
11251         tp->mac_mode = TG3_DEF_MAC_MODE;
11252         tp->rx_mode = TG3_DEF_RX_MODE;
11253         tp->tx_mode = TG3_DEF_TX_MODE;
11254         tp->mi_mode = MAC_MI_MODE_BASE;
11255         if (tg3_debug > 0)
11256                 tp->msg_enable = tg3_debug;
11257         else
11258                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
11259
11260         /* The word/byte swap controls here control register access byte
11261          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
11262          * setting below.
11263          */
11264         tp->misc_host_ctrl =
11265                 MISC_HOST_CTRL_MASK_PCI_INT |
11266                 MISC_HOST_CTRL_WORD_SWAP |
11267                 MISC_HOST_CTRL_INDIR_ACCESS |
11268                 MISC_HOST_CTRL_PCISTATE_RW;
11269
11270         /* The NONFRM (non-frame) byte/word swap controls take effect
11271          * on descriptor entries, anything which isn't packet data.
11272          *
11273          * The StrongARM chips on the board (one for tx, one for rx)
11274          * are running in big-endian mode.
11275          */
11276         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11277                         GRC_MODE_WSWAP_NONFRM_DATA);
11278 #ifdef __BIG_ENDIAN
11279         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11280 #endif
11281         spin_lock_init(&tp->lock);
11282         spin_lock_init(&tp->tx_lock);
11283         spin_lock_init(&tp->indirect_lock);
11284         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
11285
11286         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11287         if (tp->regs == 0UL) {
11288                 printk(KERN_ERR PFX "Cannot map device registers, "
11289                        "aborting.\n");
11290                 err = -ENOMEM;
11291                 goto err_out_free_dev;
11292         }
11293
11294         tg3_init_link_config(tp);
11295
11296         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11297         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11298         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11299
11300         dev->open = tg3_open;
11301         dev->stop = tg3_close;
11302         dev->get_stats = tg3_get_stats;
11303         dev->set_multicast_list = tg3_set_rx_mode;
11304         dev->set_mac_address = tg3_set_mac_addr;
11305         dev->do_ioctl = tg3_ioctl;
11306         dev->tx_timeout = tg3_tx_timeout;
11307         dev->poll = tg3_poll;
11308         dev->ethtool_ops = &tg3_ethtool_ops;
11309         dev->weight = 64;
11310         dev->watchdog_timeo = TG3_TX_TIMEOUT;
11311         dev->change_mtu = tg3_change_mtu;
11312         dev->irq = pdev->irq;
11313 #ifdef CONFIG_NET_POLL_CONTROLLER
11314         dev->poll_controller = tg3_poll_controller;
11315 #endif
11316
11317         err = tg3_get_invariants(tp);
11318         if (err) {
11319                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11320                        "aborting.\n");
11321                 goto err_out_iounmap;
11322         }
11323
11324         /* The EPB bridge inside 5714, 5715, and 5780 and any
11325          * device behind the EPB cannot support DMA addresses > 40-bit.
11326          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11327          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11328          * do DMA address check in tg3_start_xmit().
11329          */
11330         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11331                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11332         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
11333                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11334 #ifdef CONFIG_HIGHMEM
11335                 dma_mask = DMA_64BIT_MASK;
11336 #endif
11337         } else
11338                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11339
11340         /* Configure DMA attributes. */
11341         if (dma_mask > DMA_32BIT_MASK) {
11342                 err = pci_set_dma_mask(pdev, dma_mask);
11343                 if (!err) {
11344                         dev->features |= NETIF_F_HIGHDMA;
11345                         err = pci_set_consistent_dma_mask(pdev,
11346                                                           persist_dma_mask);
11347                         if (err < 0) {
11348                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11349                                        "DMA for consistent allocations\n");
11350                                 goto err_out_iounmap;
11351                         }
11352                 }
11353         }
11354         if (err || dma_mask == DMA_32BIT_MASK) {
11355                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11356                 if (err) {
11357                         printk(KERN_ERR PFX "No usable DMA configuration, "
11358                                "aborting.\n");
11359                         goto err_out_iounmap;
11360                 }
11361         }
11362
11363         tg3_init_bufmgr_config(tp);
11364
11365 #if TG3_TSO_SUPPORT != 0
11366         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11367                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11368         }
11369         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11370             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11371             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11372             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11373                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11374         } else {
11375                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11376         }
11377
11378         /* TSO is on by default on chips that support hardware TSO.
11379          * Firmware TSO on older chips gives lower performance, so it
11380          * is off by default, but can be enabled using ethtool.
11381          */
11382         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
11383                 dev->features |= NETIF_F_TSO;
11384
11385 #endif
11386
11387         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11388             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11389             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
11390                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
11391                 tp->rx_pending = 63;
11392         }
11393
11394         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11395             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11396                 tp->pdev_peer = tg3_find_peer(tp);
11397
11398         err = tg3_get_device_address(tp);
11399         if (err) {
11400                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
11401                        "aborting.\n");
11402                 goto err_out_iounmap;
11403         }
11404
11405         /*
11406          * Reset chip in case UNDI or EFI driver did not shutdown
11407          * DMA self test will enable WDMAC and we'll see (spurious)
11408          * pending DMA on the PCI bus at that point.
11409          */
11410         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
11411             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11412                 pci_save_state(tp->pdev);
11413                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
11414                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11415         }
11416
11417         err = tg3_test_dma(tp);
11418         if (err) {
11419                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
11420                 goto err_out_iounmap;
11421         }
11422
11423         /* Tigon3 can do ipv4 only... and some chips have buggy
11424          * checksumming.
11425          */
11426         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
11427                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11428                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
11429                         dev->features |= NETIF_F_HW_CSUM;
11430                 else
11431                         dev->features |= NETIF_F_IP_CSUM;
11432                 dev->features |= NETIF_F_SG;
11433                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11434         } else
11435                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
11436
11437         /* flow control autonegotiation is default behavior */
11438         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
11439
11440         tg3_init_coal(tp);
11441
11442         /* Now that we have fully setup the chip, save away a snapshot
11443          * of the PCI config space.  We need to restore this after
11444          * GRC_MISC_CFG core clock resets and some resume events.
11445          */
11446         pci_save_state(tp->pdev);
11447
11448         err = register_netdev(dev);
11449         if (err) {
11450                 printk(KERN_ERR PFX "Cannot register net device, "
11451                        "aborting.\n");
11452                 goto err_out_iounmap;
11453         }
11454
11455         pci_set_drvdata(pdev, dev);
11456
11457         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
11458                dev->name,
11459                tp->board_part_number,
11460                tp->pci_chip_rev_id,
11461                tg3_phy_string(tp),
11462                tg3_bus_string(tp, str),
11463                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
11464
11465         for (i = 0; i < 6; i++)
11466                 printk("%2.2x%c", dev->dev_addr[i],
11467                        i == 5 ? '\n' : ':');
11468
11469         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11470                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11471                "TSOcap[%d] \n",
11472                dev->name,
11473                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11474                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11475                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11476                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11477                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
11478                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11479                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
11480         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11481                dev->name, tp->dma_rwctrl,
11482                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11483                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
11484
11485         netif_carrier_off(tp->dev);
11486
11487         return 0;
11488
11489 err_out_iounmap:
11490         if (tp->regs) {
11491                 iounmap(tp->regs);
11492                 tp->regs = NULL;
11493         }
11494
11495 err_out_free_dev:
11496         free_netdev(dev);
11497
11498 err_out_free_res:
11499         pci_release_regions(pdev);
11500
11501 err_out_disable_pdev:
11502         pci_disable_device(pdev);
11503         pci_set_drvdata(pdev, NULL);
11504         return err;
11505 }
11506
11507 static void __devexit tg3_remove_one(struct pci_dev *pdev)
11508 {
11509         struct net_device *dev = pci_get_drvdata(pdev);
11510
11511         if (dev) {
11512                 struct tg3 *tp = netdev_priv(dev);
11513
11514                 flush_scheduled_work();
11515                 unregister_netdev(dev);
11516                 if (tp->regs) {
11517                         iounmap(tp->regs);
11518                         tp->regs = NULL;
11519                 }
11520                 free_netdev(dev);
11521                 pci_release_regions(pdev);
11522                 pci_disable_device(pdev);
11523                 pci_set_drvdata(pdev, NULL);
11524         }
11525 }
11526
11527 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11528 {
11529         struct net_device *dev = pci_get_drvdata(pdev);
11530         struct tg3 *tp = netdev_priv(dev);
11531         int err;
11532
11533         if (!netif_running(dev))
11534                 return 0;
11535
11536         flush_scheduled_work();
11537         tg3_netif_stop(tp);
11538
11539         del_timer_sync(&tp->timer);
11540
11541         tg3_full_lock(tp, 1);
11542         tg3_disable_ints(tp);
11543         tg3_full_unlock(tp);
11544
11545         netif_device_detach(dev);
11546
11547         tg3_full_lock(tp, 0);
11548         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11549         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
11550         tg3_full_unlock(tp);
11551
11552         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
11553         if (err) {
11554                 tg3_full_lock(tp, 0);
11555
11556                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11557                 tg3_init_hw(tp);
11558
11559                 tp->timer.expires = jiffies + tp->timer_offset;
11560                 add_timer(&tp->timer);
11561
11562                 netif_device_attach(dev);
11563                 tg3_netif_start(tp);
11564
11565                 tg3_full_unlock(tp);
11566         }
11567
11568         return err;
11569 }
11570
11571 static int tg3_resume(struct pci_dev *pdev)
11572 {
11573         struct net_device *dev = pci_get_drvdata(pdev);
11574         struct tg3 *tp = netdev_priv(dev);
11575         int err;
11576
11577         if (!netif_running(dev))
11578                 return 0;
11579
11580         pci_restore_state(tp->pdev);
11581
11582         err = tg3_set_power_state(tp, PCI_D0);
11583         if (err)
11584                 return err;
11585
11586         netif_device_attach(dev);
11587
11588         tg3_full_lock(tp, 0);
11589
11590         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11591         tg3_init_hw(tp);
11592
11593         tp->timer.expires = jiffies + tp->timer_offset;
11594         add_timer(&tp->timer);
11595
11596         tg3_netif_start(tp);
11597
11598         tg3_full_unlock(tp);
11599
11600         return 0;
11601 }
11602
11603 static struct pci_driver tg3_driver = {
11604         .name           = DRV_MODULE_NAME,
11605         .id_table       = tg3_pci_tbl,
11606         .probe          = tg3_init_one,
11607         .remove         = __devexit_p(tg3_remove_one),
11608         .suspend        = tg3_suspend,
11609         .resume         = tg3_resume
11610 };
11611
11612 static int __init tg3_init(void)
11613 {
11614         return pci_module_init(&tg3_driver);
11615 }
11616
11617 static void __exit tg3_cleanup(void)
11618 {
11619         pci_unregister_driver(&tg3_driver);
11620 }
11621
11622 module_init(tg3_init);
11623 module_exit(tg3_cleanup);