]> Pileus Git - ~andy/linux/blob - drivers/net/tg3.c
[TG3]: Add msi support
[~andy/linux] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Copyright (C) 2000-2003 Broadcom Corporation.
11  */
12
13 #include <linux/config.h>
14
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
17 #include <linux/kernel.h>
18 #include <linux/types.h>
19 #include <linux/compiler.h>
20 #include <linux/slab.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/ioport.h>
24 #include <linux/pci.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/ethtool.h>
29 #include <linux/mii.h>
30 #include <linux/if_vlan.h>
31 #include <linux/ip.h>
32 #include <linux/tcp.h>
33 #include <linux/workqueue.h>
34
35 #include <net/checksum.h>
36
37 #include <asm/system.h>
38 #include <asm/io.h>
39 #include <asm/byteorder.h>
40 #include <asm/uaccess.h>
41
42 #ifdef CONFIG_SPARC64
43 #include <asm/idprom.h>
44 #include <asm/oplib.h>
45 #include <asm/pbm.h>
46 #endif
47
48 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
49 #define TG3_VLAN_TAG_USED 1
50 #else
51 #define TG3_VLAN_TAG_USED 0
52 #endif
53
54 #ifdef NETIF_F_TSO
55 #define TG3_TSO_SUPPORT 1
56 #else
57 #define TG3_TSO_SUPPORT 0
58 #endif
59
60 #include "tg3.h"
61
62 #define DRV_MODULE_NAME         "tg3"
63 #define PFX DRV_MODULE_NAME     ": "
64 #define DRV_MODULE_VERSION      "3.25"
65 #define DRV_MODULE_RELDATE      "March 24, 2005"
66
67 #define TG3_DEF_MAC_MODE        0
68 #define TG3_DEF_RX_MODE         0
69 #define TG3_DEF_TX_MODE         0
70 #define TG3_DEF_MSG_ENABLE        \
71         (NETIF_MSG_DRV          | \
72          NETIF_MSG_PROBE        | \
73          NETIF_MSG_LINK         | \
74          NETIF_MSG_TIMER        | \
75          NETIF_MSG_IFDOWN       | \
76          NETIF_MSG_IFUP         | \
77          NETIF_MSG_RX_ERR       | \
78          NETIF_MSG_TX_ERR)
79
80 /* length of time before we decide the hardware is borked,
81  * and dev->tx_timeout() should be called to fix the problem
82  */
83 #define TG3_TX_TIMEOUT                  (5 * HZ)
84
85 /* hardware minimum and maximum for a single frame's data payload */
86 #define TG3_MIN_MTU                     60
87 #define TG3_MAX_MTU(tp) \
88         (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 9000 : 1500)
89
90 /* These numbers seem to be hard coded in the NIC firmware somehow.
91  * You can't change the ring sizes, but you can change where you place
92  * them in the NIC onboard memory.
93  */
94 #define TG3_RX_RING_SIZE                512
95 #define TG3_DEF_RX_RING_PENDING         200
96 #define TG3_RX_JUMBO_RING_SIZE          256
97 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
98
99 /* Do not place this n-ring entries value into the tp struct itself,
100  * we really want to expose these constants to GCC so that modulo et
101  * al.  operations are done with shifts and masks instead of with
102  * hw multiply/modulo instructions.  Another solution would be to
103  * replace things like '% foo' with '& (foo - 1)'.
104  */
105 #define TG3_RX_RCB_RING_SIZE(tp)        \
106         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
107
108 #define TG3_TX_RING_SIZE                512
109 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
110
111 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
112                                  TG3_RX_RING_SIZE)
113 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
114                                  TG3_RX_JUMBO_RING_SIZE)
115 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
116                                    TG3_RX_RCB_RING_SIZE(tp))
117 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
118                                  TG3_TX_RING_SIZE)
119 #define TX_RING_GAP(TP) \
120         (TG3_TX_RING_SIZE - (TP)->tx_pending)
121 #define TX_BUFFS_AVAIL(TP)                                              \
122         (((TP)->tx_cons <= (TP)->tx_prod) ?                             \
123           (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod :            \
124           (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
125 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
126
127 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
128 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
129
130 /* minimum number of free TX descriptors required to wake up TX process */
131 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
132
133 /* number of ETHTOOL_GSTATS u64's */
134 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
135
136 static char version[] __devinitdata =
137         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
138
139 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
140 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
141 MODULE_LICENSE("GPL");
142 MODULE_VERSION(DRV_MODULE_VERSION);
143
144 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
145 module_param(tg3_debug, int, 0);
146 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
147
148 static struct pci_device_id tg3_pci_tbl[] = {
149         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
150           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
151         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
152           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
153         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
154           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
155         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
156           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
157         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
158           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
159         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
160           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
161         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
162           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
163         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
164           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
165         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
166           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
167         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
168           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
169         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
170           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
171         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
172           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
173         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
174           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
175         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
176           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
177         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
178           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
179         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
180           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
181         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
182           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
183         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
184           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
185         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
186           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
187         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
188           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
189         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
190           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
191         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
192           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
193         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
194           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
195         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
196           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
197         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
198           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
199         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
200           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
201         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
202           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
203         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
204           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
205         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
206           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
207         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
208           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
209         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
210           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
211         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
212           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
213         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
214           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
215         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
216           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
217         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
218           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
219         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
220           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
221         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
222           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
223         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
224           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
225         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
226           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
227         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
228           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
229         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
230           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
231         { 0, }
232 };
233
234 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
235
236 static struct {
237         const char string[ETH_GSTRING_LEN];
238 } ethtool_stats_keys[TG3_NUM_STATS] = {
239         { "rx_octets" },
240         { "rx_fragments" },
241         { "rx_ucast_packets" },
242         { "rx_mcast_packets" },
243         { "rx_bcast_packets" },
244         { "rx_fcs_errors" },
245         { "rx_align_errors" },
246         { "rx_xon_pause_rcvd" },
247         { "rx_xoff_pause_rcvd" },
248         { "rx_mac_ctrl_rcvd" },
249         { "rx_xoff_entered" },
250         { "rx_frame_too_long_errors" },
251         { "rx_jabbers" },
252         { "rx_undersize_packets" },
253         { "rx_in_length_errors" },
254         { "rx_out_length_errors" },
255         { "rx_64_or_less_octet_packets" },
256         { "rx_65_to_127_octet_packets" },
257         { "rx_128_to_255_octet_packets" },
258         { "rx_256_to_511_octet_packets" },
259         { "rx_512_to_1023_octet_packets" },
260         { "rx_1024_to_1522_octet_packets" },
261         { "rx_1523_to_2047_octet_packets" },
262         { "rx_2048_to_4095_octet_packets" },
263         { "rx_4096_to_8191_octet_packets" },
264         { "rx_8192_to_9022_octet_packets" },
265
266         { "tx_octets" },
267         { "tx_collisions" },
268
269         { "tx_xon_sent" },
270         { "tx_xoff_sent" },
271         { "tx_flow_control" },
272         { "tx_mac_errors" },
273         { "tx_single_collisions" },
274         { "tx_mult_collisions" },
275         { "tx_deferred" },
276         { "tx_excessive_collisions" },
277         { "tx_late_collisions" },
278         { "tx_collide_2times" },
279         { "tx_collide_3times" },
280         { "tx_collide_4times" },
281         { "tx_collide_5times" },
282         { "tx_collide_6times" },
283         { "tx_collide_7times" },
284         { "tx_collide_8times" },
285         { "tx_collide_9times" },
286         { "tx_collide_10times" },
287         { "tx_collide_11times" },
288         { "tx_collide_12times" },
289         { "tx_collide_13times" },
290         { "tx_collide_14times" },
291         { "tx_collide_15times" },
292         { "tx_ucast_packets" },
293         { "tx_mcast_packets" },
294         { "tx_bcast_packets" },
295         { "tx_carrier_sense_errors" },
296         { "tx_discards" },
297         { "tx_errors" },
298
299         { "dma_writeq_full" },
300         { "dma_write_prioq_full" },
301         { "rxbds_empty" },
302         { "rx_discards" },
303         { "rx_errors" },
304         { "rx_threshold_hit" },
305
306         { "dma_readq_full" },
307         { "dma_read_prioq_full" },
308         { "tx_comp_queue_full" },
309
310         { "ring_set_send_prod_index" },
311         { "ring_status_update" },
312         { "nic_irqs" },
313         { "nic_avoided_irqs" },
314         { "nic_tx_threshold_hit" }
315 };
316
317 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
318 {
319         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
320                 unsigned long flags;
321
322                 spin_lock_irqsave(&tp->indirect_lock, flags);
323                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
324                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
325                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
326         } else {
327                 writel(val, tp->regs + off);
328                 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
329                         readl(tp->regs + off);
330         }
331 }
332
333 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
334 {
335         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
336                 unsigned long flags;
337
338                 spin_lock_irqsave(&tp->indirect_lock, flags);
339                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
340                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
341                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
342         } else {
343                 void __iomem *dest = tp->regs + off;
344                 writel(val, dest);
345                 readl(dest);    /* always flush PCI write */
346         }
347 }
348
349 static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val)
350 {
351         void __iomem *mbox = tp->regs + off;
352         writel(val, mbox);
353         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
354                 readl(mbox);
355 }
356
357 static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
358 {
359         void __iomem *mbox = tp->regs + off;
360         writel(val, mbox);
361         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
362                 writel(val, mbox);
363         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
364                 readl(mbox);
365 }
366
367 #define tw32_mailbox(reg, val)  writel(((val) & 0xffffffff), tp->regs + (reg))
368 #define tw32_rx_mbox(reg, val)  _tw32_rx_mbox(tp, reg, val)
369 #define tw32_tx_mbox(reg, val)  _tw32_tx_mbox(tp, reg, val)
370
371 #define tw32(reg,val)           tg3_write_indirect_reg32(tp,(reg),(val))
372 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val))
373 #define tw16(reg,val)           writew(((val) & 0xffff), tp->regs + (reg))
374 #define tw8(reg,val)            writeb(((val) & 0xff), tp->regs + (reg))
375 #define tr32(reg)               readl(tp->regs + (reg))
376 #define tr16(reg)               readw(tp->regs + (reg))
377 #define tr8(reg)                readb(tp->regs + (reg))
378
379 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
380 {
381         unsigned long flags;
382
383         spin_lock_irqsave(&tp->indirect_lock, flags);
384         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
385         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
386
387         /* Always leave this as zero. */
388         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
389         spin_unlock_irqrestore(&tp->indirect_lock, flags);
390 }
391
392 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
393 {
394         unsigned long flags;
395
396         spin_lock_irqsave(&tp->indirect_lock, flags);
397         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
398         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
399
400         /* Always leave this as zero. */
401         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
402         spin_unlock_irqrestore(&tp->indirect_lock, flags);
403 }
404
405 static void tg3_disable_ints(struct tg3 *tp)
406 {
407         tw32(TG3PCI_MISC_HOST_CTRL,
408              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
409         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
410         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
411 }
412
413 static inline void tg3_cond_int(struct tg3 *tp)
414 {
415         if (tp->hw_status->status & SD_STATUS_UPDATED)
416                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
417 }
418
419 static void tg3_enable_ints(struct tg3 *tp)
420 {
421         tw32(TG3PCI_MISC_HOST_CTRL,
422              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
423         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
424         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
425
426         tg3_cond_int(tp);
427 }
428
429 /* tg3_restart_ints
430  *  similar to tg3_enable_ints, but it can return without flushing the
431  *  PIO write which reenables interrupts
432  */
433 static void tg3_restart_ints(struct tg3 *tp)
434 {
435         tw32(TG3PCI_MISC_HOST_CTRL,
436                 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
437         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
438         mmiowb();
439
440         tg3_cond_int(tp);
441 }
442
443 static inline void tg3_netif_stop(struct tg3 *tp)
444 {
445         netif_poll_disable(tp->dev);
446         netif_tx_disable(tp->dev);
447 }
448
449 static inline void tg3_netif_start(struct tg3 *tp)
450 {
451         netif_wake_queue(tp->dev);
452         /* NOTE: unconditional netif_wake_queue is only appropriate
453          * so long as all callers are assured to have free tx slots
454          * (such as after tg3_init_hw)
455          */
456         netif_poll_enable(tp->dev);
457         tg3_cond_int(tp);
458 }
459
460 static void tg3_switch_clocks(struct tg3 *tp)
461 {
462         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
463         u32 orig_clock_ctrl;
464
465         orig_clock_ctrl = clock_ctrl;
466         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
467                        CLOCK_CTRL_CLKRUN_OENABLE |
468                        0x1f);
469         tp->pci_clock_ctrl = clock_ctrl;
470
471         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
472                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
473                         tw32_f(TG3PCI_CLOCK_CTRL,
474                                clock_ctrl | CLOCK_CTRL_625_CORE);
475                         udelay(40);
476                 }
477         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
478                 tw32_f(TG3PCI_CLOCK_CTRL,
479                      clock_ctrl |
480                      (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
481                 udelay(40);
482                 tw32_f(TG3PCI_CLOCK_CTRL,
483                      clock_ctrl | (CLOCK_CTRL_ALTCLK));
484                 udelay(40);
485         }
486         tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
487         udelay(40);
488 }
489
490 #define PHY_BUSY_LOOPS  5000
491
492 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
493 {
494         u32 frame_val;
495         unsigned int loops;
496         int ret;
497
498         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
499                 tw32_f(MAC_MI_MODE,
500                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
501                 udelay(80);
502         }
503
504         *val = 0x0;
505
506         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
507                       MI_COM_PHY_ADDR_MASK);
508         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
509                       MI_COM_REG_ADDR_MASK);
510         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
511         
512         tw32_f(MAC_MI_COM, frame_val);
513
514         loops = PHY_BUSY_LOOPS;
515         while (loops != 0) {
516                 udelay(10);
517                 frame_val = tr32(MAC_MI_COM);
518
519                 if ((frame_val & MI_COM_BUSY) == 0) {
520                         udelay(5);
521                         frame_val = tr32(MAC_MI_COM);
522                         break;
523                 }
524                 loops -= 1;
525         }
526
527         ret = -EBUSY;
528         if (loops != 0) {
529                 *val = frame_val & MI_COM_DATA_MASK;
530                 ret = 0;
531         }
532
533         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
534                 tw32_f(MAC_MI_MODE, tp->mi_mode);
535                 udelay(80);
536         }
537
538         return ret;
539 }
540
541 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
542 {
543         u32 frame_val;
544         unsigned int loops;
545         int ret;
546
547         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
548                 tw32_f(MAC_MI_MODE,
549                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
550                 udelay(80);
551         }
552
553         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
554                       MI_COM_PHY_ADDR_MASK);
555         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
556                       MI_COM_REG_ADDR_MASK);
557         frame_val |= (val & MI_COM_DATA_MASK);
558         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
559         
560         tw32_f(MAC_MI_COM, frame_val);
561
562         loops = PHY_BUSY_LOOPS;
563         while (loops != 0) {
564                 udelay(10);
565                 frame_val = tr32(MAC_MI_COM);
566                 if ((frame_val & MI_COM_BUSY) == 0) {
567                         udelay(5);
568                         frame_val = tr32(MAC_MI_COM);
569                         break;
570                 }
571                 loops -= 1;
572         }
573
574         ret = -EBUSY;
575         if (loops != 0)
576                 ret = 0;
577
578         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
579                 tw32_f(MAC_MI_MODE, tp->mi_mode);
580                 udelay(80);
581         }
582
583         return ret;
584 }
585
586 static void tg3_phy_set_wirespeed(struct tg3 *tp)
587 {
588         u32 val;
589
590         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
591                 return;
592
593         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
594             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
595                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
596                              (val | (1 << 15) | (1 << 4)));
597 }
598
599 static int tg3_bmcr_reset(struct tg3 *tp)
600 {
601         u32 phy_control;
602         int limit, err;
603
604         /* OK, reset it, and poll the BMCR_RESET bit until it
605          * clears or we time out.
606          */
607         phy_control = BMCR_RESET;
608         err = tg3_writephy(tp, MII_BMCR, phy_control);
609         if (err != 0)
610                 return -EBUSY;
611
612         limit = 5000;
613         while (limit--) {
614                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
615                 if (err != 0)
616                         return -EBUSY;
617
618                 if ((phy_control & BMCR_RESET) == 0) {
619                         udelay(40);
620                         break;
621                 }
622                 udelay(10);
623         }
624         if (limit <= 0)
625                 return -EBUSY;
626
627         return 0;
628 }
629
630 static int tg3_wait_macro_done(struct tg3 *tp)
631 {
632         int limit = 100;
633
634         while (limit--) {
635                 u32 tmp32;
636
637                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
638                         if ((tmp32 & 0x1000) == 0)
639                                 break;
640                 }
641         }
642         if (limit <= 0)
643                 return -EBUSY;
644
645         return 0;
646 }
647
648 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
649 {
650         static const u32 test_pat[4][6] = {
651         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
652         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
653         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
654         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
655         };
656         int chan;
657
658         for (chan = 0; chan < 4; chan++) {
659                 int i;
660
661                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
662                              (chan * 0x2000) | 0x0200);
663                 tg3_writephy(tp, 0x16, 0x0002);
664
665                 for (i = 0; i < 6; i++)
666                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
667                                      test_pat[chan][i]);
668
669                 tg3_writephy(tp, 0x16, 0x0202);
670                 if (tg3_wait_macro_done(tp)) {
671                         *resetp = 1;
672                         return -EBUSY;
673                 }
674
675                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
676                              (chan * 0x2000) | 0x0200);
677                 tg3_writephy(tp, 0x16, 0x0082);
678                 if (tg3_wait_macro_done(tp)) {
679                         *resetp = 1;
680                         return -EBUSY;
681                 }
682
683                 tg3_writephy(tp, 0x16, 0x0802);
684                 if (tg3_wait_macro_done(tp)) {
685                         *resetp = 1;
686                         return -EBUSY;
687                 }
688
689                 for (i = 0; i < 6; i += 2) {
690                         u32 low, high;
691
692                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
693                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
694                             tg3_wait_macro_done(tp)) {
695                                 *resetp = 1;
696                                 return -EBUSY;
697                         }
698                         low &= 0x7fff;
699                         high &= 0x000f;
700                         if (low != test_pat[chan][i] ||
701                             high != test_pat[chan][i+1]) {
702                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
703                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
704                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
705
706                                 return -EBUSY;
707                         }
708                 }
709         }
710
711         return 0;
712 }
713
714 static int tg3_phy_reset_chanpat(struct tg3 *tp)
715 {
716         int chan;
717
718         for (chan = 0; chan < 4; chan++) {
719                 int i;
720
721                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
722                              (chan * 0x2000) | 0x0200);
723                 tg3_writephy(tp, 0x16, 0x0002);
724                 for (i = 0; i < 6; i++)
725                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
726                 tg3_writephy(tp, 0x16, 0x0202);
727                 if (tg3_wait_macro_done(tp))
728                         return -EBUSY;
729         }
730
731         return 0;
732 }
733
734 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
735 {
736         u32 reg32, phy9_orig;
737         int retries, do_phy_reset, err;
738
739         retries = 10;
740         do_phy_reset = 1;
741         do {
742                 if (do_phy_reset) {
743                         err = tg3_bmcr_reset(tp);
744                         if (err)
745                                 return err;
746                         do_phy_reset = 0;
747                 }
748
749                 /* Disable transmitter and interrupt.  */
750                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
751                         continue;
752
753                 reg32 |= 0x3000;
754                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
755
756                 /* Set full-duplex, 1000 mbps.  */
757                 tg3_writephy(tp, MII_BMCR,
758                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
759
760                 /* Set to master mode.  */
761                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
762                         continue;
763
764                 tg3_writephy(tp, MII_TG3_CTRL,
765                              (MII_TG3_CTRL_AS_MASTER |
766                               MII_TG3_CTRL_ENABLE_AS_MASTER));
767
768                 /* Enable SM_DSP_CLOCK and 6dB.  */
769                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
770
771                 /* Block the PHY control access.  */
772                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
773                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
774
775                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
776                 if (!err)
777                         break;
778         } while (--retries);
779
780         err = tg3_phy_reset_chanpat(tp);
781         if (err)
782                 return err;
783
784         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
785         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
786
787         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
788         tg3_writephy(tp, 0x16, 0x0000);
789
790         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
791             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
792                 /* Set Extended packet length bit for jumbo frames */
793                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
794         }
795         else {
796                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
797         }
798
799         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
800
801         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
802                 reg32 &= ~0x3000;
803                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
804         } else if (!err)
805                 err = -EBUSY;
806
807         return err;
808 }
809
810 /* This will reset the tigon3 PHY if there is no valid
811  * link unless the FORCE argument is non-zero.
812  */
813 static int tg3_phy_reset(struct tg3 *tp)
814 {
815         u32 phy_status;
816         int err;
817
818         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
819         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
820         if (err != 0)
821                 return -EBUSY;
822
823         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
824             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
825             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
826                 err = tg3_phy_reset_5703_4_5(tp);
827                 if (err)
828                         return err;
829                 goto out;
830         }
831
832         err = tg3_bmcr_reset(tp);
833         if (err)
834                 return err;
835
836 out:
837         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
838                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
839                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
840                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
841                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
842                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
843                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
844         }
845         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
846                 tg3_writephy(tp, 0x1c, 0x8d68);
847                 tg3_writephy(tp, 0x1c, 0x8d68);
848         }
849         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
850                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
851                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
852                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
853                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
854                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
855                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
856                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
857                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
858         }
859         /* Set Extended packet length bit (bit 14) on all chips that */
860         /* support jumbo frames */
861         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
862                 /* Cannot do read-modify-write on 5401 */
863                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
864         } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
865                 u32 phy_reg;
866
867                 /* Set bit 14 with read-modify-write to preserve other bits */
868                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
869                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
870                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
871         }
872
873         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
874          * jumbo frames transmission.
875          */
876         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
877                 u32 phy_reg;
878
879                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
880                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
881                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
882         }
883
884         tg3_phy_set_wirespeed(tp);
885         return 0;
886 }
887
888 static void tg3_frob_aux_power(struct tg3 *tp)
889 {
890         struct tg3 *tp_peer = tp;
891
892         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
893                 return;
894
895         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
896                 tp_peer = pci_get_drvdata(tp->pdev_peer);
897                 if (!tp_peer)
898                         BUG();
899         }
900
901
902         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
903             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
904                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
905                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
906                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
907                              (GRC_LCLCTRL_GPIO_OE0 |
908                               GRC_LCLCTRL_GPIO_OE1 |
909                               GRC_LCLCTRL_GPIO_OE2 |
910                               GRC_LCLCTRL_GPIO_OUTPUT0 |
911                               GRC_LCLCTRL_GPIO_OUTPUT1));
912                         udelay(100);
913                 } else {
914                         u32 no_gpio2;
915                         u32 grc_local_ctrl;
916
917                         if (tp_peer != tp &&
918                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
919                                 return;
920
921                         /* On 5753 and variants, GPIO2 cannot be used. */
922                         no_gpio2 = tp->nic_sram_data_cfg &
923                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
924
925                         grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
926                                          GRC_LCLCTRL_GPIO_OE1 |
927                                          GRC_LCLCTRL_GPIO_OE2 |
928                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
929                                          GRC_LCLCTRL_GPIO_OUTPUT2;
930                         if (no_gpio2) {
931                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
932                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
933                         }
934                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
935                                                 grc_local_ctrl);
936                         udelay(100);
937
938                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
939
940                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
941                                                 grc_local_ctrl);
942                         udelay(100);
943
944                         if (!no_gpio2) {
945                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
946                                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
947                                        grc_local_ctrl);
948                                 udelay(100);
949                         }
950                 }
951         } else {
952                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
953                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
954                         if (tp_peer != tp &&
955                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
956                                 return;
957
958                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
959                              (GRC_LCLCTRL_GPIO_OE1 |
960                               GRC_LCLCTRL_GPIO_OUTPUT1));
961                         udelay(100);
962
963                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
964                              (GRC_LCLCTRL_GPIO_OE1));
965                         udelay(100);
966
967                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
968                              (GRC_LCLCTRL_GPIO_OE1 |
969                               GRC_LCLCTRL_GPIO_OUTPUT1));
970                         udelay(100);
971                 }
972         }
973 }
974
975 static int tg3_setup_phy(struct tg3 *, int);
976
977 #define RESET_KIND_SHUTDOWN     0
978 #define RESET_KIND_INIT         1
979 #define RESET_KIND_SUSPEND      2
980
981 static void tg3_write_sig_post_reset(struct tg3 *, int);
982 static int tg3_halt_cpu(struct tg3 *, u32);
983
984 static int tg3_set_power_state(struct tg3 *tp, int state)
985 {
986         u32 misc_host_ctrl;
987         u16 power_control, power_caps;
988         int pm = tp->pm_cap;
989
990         /* Make sure register accesses (indirect or otherwise)
991          * will function correctly.
992          */
993         pci_write_config_dword(tp->pdev,
994                                TG3PCI_MISC_HOST_CTRL,
995                                tp->misc_host_ctrl);
996
997         pci_read_config_word(tp->pdev,
998                              pm + PCI_PM_CTRL,
999                              &power_control);
1000         power_control |= PCI_PM_CTRL_PME_STATUS;
1001         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1002         switch (state) {
1003         case 0:
1004                 power_control |= 0;
1005                 pci_write_config_word(tp->pdev,
1006                                       pm + PCI_PM_CTRL,
1007                                       power_control);
1008                 udelay(100);    /* Delay after power state change */
1009
1010                 /* Switch out of Vaux if it is not a LOM */
1011                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)) {
1012                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1013                         udelay(100);
1014                 }
1015
1016                 return 0;
1017
1018         case 1:
1019                 power_control |= 1;
1020                 break;
1021
1022         case 2:
1023                 power_control |= 2;
1024                 break;
1025
1026         case 3:
1027                 power_control |= 3;
1028                 break;
1029
1030         default:
1031                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1032                        "requested.\n",
1033                        tp->dev->name, state);
1034                 return -EINVAL;
1035         };
1036
1037         power_control |= PCI_PM_CTRL_PME_ENABLE;
1038
1039         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1040         tw32(TG3PCI_MISC_HOST_CTRL,
1041              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1042
1043         if (tp->link_config.phy_is_low_power == 0) {
1044                 tp->link_config.phy_is_low_power = 1;
1045                 tp->link_config.orig_speed = tp->link_config.speed;
1046                 tp->link_config.orig_duplex = tp->link_config.duplex;
1047                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1048         }
1049
1050         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1051                 tp->link_config.speed = SPEED_10;
1052                 tp->link_config.duplex = DUPLEX_HALF;
1053                 tp->link_config.autoneg = AUTONEG_ENABLE;
1054                 tg3_setup_phy(tp, 0);
1055         }
1056
1057         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1058
1059         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1060                 u32 mac_mode;
1061
1062                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1063                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1064                         udelay(40);
1065
1066                         mac_mode = MAC_MODE_PORT_MODE_MII;
1067
1068                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1069                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1070                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1071                 } else {
1072                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1073                 }
1074
1075                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1076                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1077
1078                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1079                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1080                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1081
1082                 tw32_f(MAC_MODE, mac_mode);
1083                 udelay(100);
1084
1085                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1086                 udelay(10);
1087         }
1088
1089         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1090             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1091              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1092                 u32 base_val;
1093
1094                 base_val = tp->pci_clock_ctrl;
1095                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1096                              CLOCK_CTRL_TXCLK_DISABLE);
1097
1098                 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1099                      CLOCK_CTRL_ALTCLK |
1100                      CLOCK_CTRL_PWRDOWN_PLL133);
1101                 udelay(40);
1102         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1103                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1104                 u32 newbits1, newbits2;
1105
1106                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1107                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1108                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1109                                     CLOCK_CTRL_TXCLK_DISABLE |
1110                                     CLOCK_CTRL_ALTCLK);
1111                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1112                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1113                         newbits1 = CLOCK_CTRL_625_CORE;
1114                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1115                 } else {
1116                         newbits1 = CLOCK_CTRL_ALTCLK;
1117                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1118                 }
1119
1120                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1121                 udelay(40);
1122
1123                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1124                 udelay(40);
1125
1126                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1127                         u32 newbits3;
1128
1129                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1130                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1131                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1132                                             CLOCK_CTRL_TXCLK_DISABLE |
1133                                             CLOCK_CTRL_44MHZ_CORE);
1134                         } else {
1135                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1136                         }
1137
1138                         tw32_f(TG3PCI_CLOCK_CTRL,
1139                                          tp->pci_clock_ctrl | newbits3);
1140                         udelay(40);
1141                 }
1142         }
1143
1144         tg3_frob_aux_power(tp);
1145
1146         /* Workaround for unstable PLL clock */
1147         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1148             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1149                 u32 val = tr32(0x7d00);
1150
1151                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1152                 tw32(0x7d00, val);
1153                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1154                         tg3_halt_cpu(tp, RX_CPU_BASE);
1155         }
1156
1157         /* Finally, set the new power state. */
1158         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1159         udelay(100);    /* Delay after power state change */
1160
1161         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1162
1163         return 0;
1164 }
1165
1166 static void tg3_link_report(struct tg3 *tp)
1167 {
1168         if (!netif_carrier_ok(tp->dev)) {
1169                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1170         } else {
1171                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1172                        tp->dev->name,
1173                        (tp->link_config.active_speed == SPEED_1000 ?
1174                         1000 :
1175                         (tp->link_config.active_speed == SPEED_100 ?
1176                          100 : 10)),
1177                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1178                         "full" : "half"));
1179
1180                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1181                        "%s for RX.\n",
1182                        tp->dev->name,
1183                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1184                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1185         }
1186 }
1187
1188 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1189 {
1190         u32 new_tg3_flags = 0;
1191         u32 old_rx_mode = tp->rx_mode;
1192         u32 old_tx_mode = tp->tx_mode;
1193
1194         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1195                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1196                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1197                                 if (remote_adv & LPA_PAUSE_CAP)
1198                                         new_tg3_flags |=
1199                                                 (TG3_FLAG_RX_PAUSE |
1200                                                 TG3_FLAG_TX_PAUSE);
1201                                 else if (remote_adv & LPA_PAUSE_ASYM)
1202                                         new_tg3_flags |=
1203                                                 (TG3_FLAG_RX_PAUSE);
1204                         } else {
1205                                 if (remote_adv & LPA_PAUSE_CAP)
1206                                         new_tg3_flags |=
1207                                                 (TG3_FLAG_RX_PAUSE |
1208                                                 TG3_FLAG_TX_PAUSE);
1209                         }
1210                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1211                         if ((remote_adv & LPA_PAUSE_CAP) &&
1212                         (remote_adv & LPA_PAUSE_ASYM))
1213                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1214                 }
1215
1216                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1217                 tp->tg3_flags |= new_tg3_flags;
1218         } else {
1219                 new_tg3_flags = tp->tg3_flags;
1220         }
1221
1222         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1223                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1224         else
1225                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1226
1227         if (old_rx_mode != tp->rx_mode) {
1228                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1229         }
1230         
1231         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1232                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1233         else
1234                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1235
1236         if (old_tx_mode != tp->tx_mode) {
1237                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1238         }
1239 }
1240
1241 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1242 {
1243         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1244         case MII_TG3_AUX_STAT_10HALF:
1245                 *speed = SPEED_10;
1246                 *duplex = DUPLEX_HALF;
1247                 break;
1248
1249         case MII_TG3_AUX_STAT_10FULL:
1250                 *speed = SPEED_10;
1251                 *duplex = DUPLEX_FULL;
1252                 break;
1253
1254         case MII_TG3_AUX_STAT_100HALF:
1255                 *speed = SPEED_100;
1256                 *duplex = DUPLEX_HALF;
1257                 break;
1258
1259         case MII_TG3_AUX_STAT_100FULL:
1260                 *speed = SPEED_100;
1261                 *duplex = DUPLEX_FULL;
1262                 break;
1263
1264         case MII_TG3_AUX_STAT_1000HALF:
1265                 *speed = SPEED_1000;
1266                 *duplex = DUPLEX_HALF;
1267                 break;
1268
1269         case MII_TG3_AUX_STAT_1000FULL:
1270                 *speed = SPEED_1000;
1271                 *duplex = DUPLEX_FULL;
1272                 break;
1273
1274         default:
1275                 *speed = SPEED_INVALID;
1276                 *duplex = DUPLEX_INVALID;
1277                 break;
1278         };
1279 }
1280
1281 static void tg3_phy_copper_begin(struct tg3 *tp)
1282 {
1283         u32 new_adv;
1284         int i;
1285
1286         if (tp->link_config.phy_is_low_power) {
1287                 /* Entering low power mode.  Disable gigabit and
1288                  * 100baseT advertisements.
1289                  */
1290                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1291
1292                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1293                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1294                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1295                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1296
1297                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1298         } else if (tp->link_config.speed == SPEED_INVALID) {
1299                 tp->link_config.advertising =
1300                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1301                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1302                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1303                          ADVERTISED_Autoneg | ADVERTISED_MII);
1304
1305                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1306                         tp->link_config.advertising &=
1307                                 ~(ADVERTISED_1000baseT_Half |
1308                                   ADVERTISED_1000baseT_Full);
1309
1310                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1311                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1312                         new_adv |= ADVERTISE_10HALF;
1313                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1314                         new_adv |= ADVERTISE_10FULL;
1315                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1316                         new_adv |= ADVERTISE_100HALF;
1317                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1318                         new_adv |= ADVERTISE_100FULL;
1319                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1320
1321                 if (tp->link_config.advertising &
1322                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1323                         new_adv = 0;
1324                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1325                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1326                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1327                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1328                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1329                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1330                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1331                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1332                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1333                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1334                 } else {
1335                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1336                 }
1337         } else {
1338                 /* Asking for a specific link mode. */
1339                 if (tp->link_config.speed == SPEED_1000) {
1340                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1341                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1342
1343                         if (tp->link_config.duplex == DUPLEX_FULL)
1344                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1345                         else
1346                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1347                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1348                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1349                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1350                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1351                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1352                 } else {
1353                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1354
1355                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1356                         if (tp->link_config.speed == SPEED_100) {
1357                                 if (tp->link_config.duplex == DUPLEX_FULL)
1358                                         new_adv |= ADVERTISE_100FULL;
1359                                 else
1360                                         new_adv |= ADVERTISE_100HALF;
1361                         } else {
1362                                 if (tp->link_config.duplex == DUPLEX_FULL)
1363                                         new_adv |= ADVERTISE_10FULL;
1364                                 else
1365                                         new_adv |= ADVERTISE_10HALF;
1366                         }
1367                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1368                 }
1369         }
1370
1371         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1372             tp->link_config.speed != SPEED_INVALID) {
1373                 u32 bmcr, orig_bmcr;
1374
1375                 tp->link_config.active_speed = tp->link_config.speed;
1376                 tp->link_config.active_duplex = tp->link_config.duplex;
1377
1378                 bmcr = 0;
1379                 switch (tp->link_config.speed) {
1380                 default:
1381                 case SPEED_10:
1382                         break;
1383
1384                 case SPEED_100:
1385                         bmcr |= BMCR_SPEED100;
1386                         break;
1387
1388                 case SPEED_1000:
1389                         bmcr |= TG3_BMCR_SPEED1000;
1390                         break;
1391                 };
1392
1393                 if (tp->link_config.duplex == DUPLEX_FULL)
1394                         bmcr |= BMCR_FULLDPLX;
1395
1396                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1397                     (bmcr != orig_bmcr)) {
1398                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1399                         for (i = 0; i < 1500; i++) {
1400                                 u32 tmp;
1401
1402                                 udelay(10);
1403                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1404                                     tg3_readphy(tp, MII_BMSR, &tmp))
1405                                         continue;
1406                                 if (!(tmp & BMSR_LSTATUS)) {
1407                                         udelay(40);
1408                                         break;
1409                                 }
1410                         }
1411                         tg3_writephy(tp, MII_BMCR, bmcr);
1412                         udelay(40);
1413                 }
1414         } else {
1415                 tg3_writephy(tp, MII_BMCR,
1416                              BMCR_ANENABLE | BMCR_ANRESTART);
1417         }
1418 }
1419
1420 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1421 {
1422         int err;
1423
1424         /* Turn off tap power management. */
1425         /* Set Extended packet length bit */
1426         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1427
1428         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1429         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1430
1431         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1432         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1433
1434         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1435         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1436
1437         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1438         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1439
1440         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1441         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1442
1443         udelay(40);
1444
1445         return err;
1446 }
1447
1448 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1449 {
1450         u32 adv_reg, all_mask;
1451
1452         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1453                 return 0;
1454
1455         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1456                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1457         if ((adv_reg & all_mask) != all_mask)
1458                 return 0;
1459         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1460                 u32 tg3_ctrl;
1461
1462                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1463                         return 0;
1464
1465                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1466                             MII_TG3_CTRL_ADV_1000_FULL);
1467                 if ((tg3_ctrl & all_mask) != all_mask)
1468                         return 0;
1469         }
1470         return 1;
1471 }
1472
1473 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1474 {
1475         int current_link_up;
1476         u32 bmsr, dummy;
1477         u16 current_speed;
1478         u8 current_duplex;
1479         int i, err;
1480
1481         tw32(MAC_EVENT, 0);
1482
1483         tw32_f(MAC_STATUS,
1484              (MAC_STATUS_SYNC_CHANGED |
1485               MAC_STATUS_CFG_CHANGED |
1486               MAC_STATUS_MI_COMPLETION |
1487               MAC_STATUS_LNKSTATE_CHANGED));
1488         udelay(40);
1489
1490         tp->mi_mode = MAC_MI_MODE_BASE;
1491         tw32_f(MAC_MI_MODE, tp->mi_mode);
1492         udelay(80);
1493
1494         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1495
1496         /* Some third-party PHYs need to be reset on link going
1497          * down.
1498          */
1499         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1500              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1501              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1502             netif_carrier_ok(tp->dev)) {
1503                 tg3_readphy(tp, MII_BMSR, &bmsr);
1504                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1505                     !(bmsr & BMSR_LSTATUS))
1506                         force_reset = 1;
1507         }
1508         if (force_reset)
1509                 tg3_phy_reset(tp);
1510
1511         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1512                 tg3_readphy(tp, MII_BMSR, &bmsr);
1513                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1514                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1515                         bmsr = 0;
1516
1517                 if (!(bmsr & BMSR_LSTATUS)) {
1518                         err = tg3_init_5401phy_dsp(tp);
1519                         if (err)
1520                                 return err;
1521
1522                         tg3_readphy(tp, MII_BMSR, &bmsr);
1523                         for (i = 0; i < 1000; i++) {
1524                                 udelay(10);
1525                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1526                                     (bmsr & BMSR_LSTATUS)) {
1527                                         udelay(40);
1528                                         break;
1529                                 }
1530                         }
1531
1532                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1533                             !(bmsr & BMSR_LSTATUS) &&
1534                             tp->link_config.active_speed == SPEED_1000) {
1535                                 err = tg3_phy_reset(tp);
1536                                 if (!err)
1537                                         err = tg3_init_5401phy_dsp(tp);
1538                                 if (err)
1539                                         return err;
1540                         }
1541                 }
1542         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1543                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1544                 /* 5701 {A0,B0} CRC bug workaround */
1545                 tg3_writephy(tp, 0x15, 0x0a75);
1546                 tg3_writephy(tp, 0x1c, 0x8c68);
1547                 tg3_writephy(tp, 0x1c, 0x8d68);
1548                 tg3_writephy(tp, 0x1c, 0x8c68);
1549         }
1550
1551         /* Clear pending interrupts... */
1552         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1553         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1554
1555         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1556                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1557         else
1558                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1559
1560         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1561             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1562                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1563                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1564                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1565                 else
1566                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1567         }
1568
1569         current_link_up = 0;
1570         current_speed = SPEED_INVALID;
1571         current_duplex = DUPLEX_INVALID;
1572
1573         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1574                 u32 val;
1575
1576                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1577                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1578                 if (!(val & (1 << 10))) {
1579                         val |= (1 << 10);
1580                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1581                         goto relink;
1582                 }
1583         }
1584
1585         bmsr = 0;
1586         for (i = 0; i < 100; i++) {
1587                 tg3_readphy(tp, MII_BMSR, &bmsr);
1588                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1589                     (bmsr & BMSR_LSTATUS))
1590                         break;
1591                 udelay(40);
1592         }
1593
1594         if (bmsr & BMSR_LSTATUS) {
1595                 u32 aux_stat, bmcr;
1596
1597                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1598                 for (i = 0; i < 2000; i++) {
1599                         udelay(10);
1600                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1601                             aux_stat)
1602                                 break;
1603                 }
1604
1605                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1606                                              &current_speed,
1607                                              &current_duplex);
1608
1609                 bmcr = 0;
1610                 for (i = 0; i < 200; i++) {
1611                         tg3_readphy(tp, MII_BMCR, &bmcr);
1612                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1613                                 continue;
1614                         if (bmcr && bmcr != 0x7fff)
1615                                 break;
1616                         udelay(10);
1617                 }
1618
1619                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1620                         if (bmcr & BMCR_ANENABLE) {
1621                                 current_link_up = 1;
1622
1623                                 /* Force autoneg restart if we are exiting
1624                                  * low power mode.
1625                                  */
1626                                 if (!tg3_copper_is_advertising_all(tp))
1627                                         current_link_up = 0;
1628                         } else {
1629                                 current_link_up = 0;
1630                         }
1631                 } else {
1632                         if (!(bmcr & BMCR_ANENABLE) &&
1633                             tp->link_config.speed == current_speed &&
1634                             tp->link_config.duplex == current_duplex) {
1635                                 current_link_up = 1;
1636                         } else {
1637                                 current_link_up = 0;
1638                         }
1639                 }
1640
1641                 tp->link_config.active_speed = current_speed;
1642                 tp->link_config.active_duplex = current_duplex;
1643         }
1644
1645         if (current_link_up == 1 &&
1646             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1647             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1648                 u32 local_adv, remote_adv;
1649
1650                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1651                         local_adv = 0;
1652                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1653
1654                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1655                         remote_adv = 0;
1656
1657                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1658
1659                 /* If we are not advertising full pause capability,
1660                  * something is wrong.  Bring the link down and reconfigure.
1661                  */
1662                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1663                         current_link_up = 0;
1664                 } else {
1665                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1666                 }
1667         }
1668 relink:
1669         if (current_link_up == 0) {
1670                 u32 tmp;
1671
1672                 tg3_phy_copper_begin(tp);
1673
1674                 tg3_readphy(tp, MII_BMSR, &tmp);
1675                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1676                     (tmp & BMSR_LSTATUS))
1677                         current_link_up = 1;
1678         }
1679
1680         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1681         if (current_link_up == 1) {
1682                 if (tp->link_config.active_speed == SPEED_100 ||
1683                     tp->link_config.active_speed == SPEED_10)
1684                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1685                 else
1686                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1687         } else
1688                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1689
1690         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1691         if (tp->link_config.active_duplex == DUPLEX_HALF)
1692                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1693
1694         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1695         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1696                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1697                     (current_link_up == 1 &&
1698                      tp->link_config.active_speed == SPEED_10))
1699                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1700         } else {
1701                 if (current_link_up == 1)
1702                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1703         }
1704
1705         /* ??? Without this setting Netgear GA302T PHY does not
1706          * ??? send/receive packets...
1707          */
1708         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1709             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1710                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1711                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1712                 udelay(80);
1713         }
1714
1715         tw32_f(MAC_MODE, tp->mac_mode);
1716         udelay(40);
1717
1718         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1719                 /* Polled via timer. */
1720                 tw32_f(MAC_EVENT, 0);
1721         } else {
1722                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1723         }
1724         udelay(40);
1725
1726         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1727             current_link_up == 1 &&
1728             tp->link_config.active_speed == SPEED_1000 &&
1729             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1730              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1731                 udelay(120);
1732                 tw32_f(MAC_STATUS,
1733                      (MAC_STATUS_SYNC_CHANGED |
1734                       MAC_STATUS_CFG_CHANGED));
1735                 udelay(40);
1736                 tg3_write_mem(tp,
1737                               NIC_SRAM_FIRMWARE_MBOX,
1738                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1739         }
1740
1741         if (current_link_up != netif_carrier_ok(tp->dev)) {
1742                 if (current_link_up)
1743                         netif_carrier_on(tp->dev);
1744                 else
1745                         netif_carrier_off(tp->dev);
1746                 tg3_link_report(tp);
1747         }
1748
1749         return 0;
1750 }
1751
1752 struct tg3_fiber_aneginfo {
1753         int state;
1754 #define ANEG_STATE_UNKNOWN              0
1755 #define ANEG_STATE_AN_ENABLE            1
1756 #define ANEG_STATE_RESTART_INIT         2
1757 #define ANEG_STATE_RESTART              3
1758 #define ANEG_STATE_DISABLE_LINK_OK      4
1759 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1760 #define ANEG_STATE_ABILITY_DETECT       6
1761 #define ANEG_STATE_ACK_DETECT_INIT      7
1762 #define ANEG_STATE_ACK_DETECT           8
1763 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1764 #define ANEG_STATE_COMPLETE_ACK         10
1765 #define ANEG_STATE_IDLE_DETECT_INIT     11
1766 #define ANEG_STATE_IDLE_DETECT          12
1767 #define ANEG_STATE_LINK_OK              13
1768 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1769 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1770
1771         u32 flags;
1772 #define MR_AN_ENABLE            0x00000001
1773 #define MR_RESTART_AN           0x00000002
1774 #define MR_AN_COMPLETE          0x00000004
1775 #define MR_PAGE_RX              0x00000008
1776 #define MR_NP_LOADED            0x00000010
1777 #define MR_TOGGLE_TX            0x00000020
1778 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1779 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1780 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1781 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1782 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1783 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1784 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1785 #define MR_TOGGLE_RX            0x00002000
1786 #define MR_NP_RX                0x00004000
1787
1788 #define MR_LINK_OK              0x80000000
1789
1790         unsigned long link_time, cur_time;
1791
1792         u32 ability_match_cfg;
1793         int ability_match_count;
1794
1795         char ability_match, idle_match, ack_match;
1796
1797         u32 txconfig, rxconfig;
1798 #define ANEG_CFG_NP             0x00000080
1799 #define ANEG_CFG_ACK            0x00000040
1800 #define ANEG_CFG_RF2            0x00000020
1801 #define ANEG_CFG_RF1            0x00000010
1802 #define ANEG_CFG_PS2            0x00000001
1803 #define ANEG_CFG_PS1            0x00008000
1804 #define ANEG_CFG_HD             0x00004000
1805 #define ANEG_CFG_FD             0x00002000
1806 #define ANEG_CFG_INVAL          0x00001f06
1807
1808 };
1809 #define ANEG_OK         0
1810 #define ANEG_DONE       1
1811 #define ANEG_TIMER_ENAB 2
1812 #define ANEG_FAILED     -1
1813
1814 #define ANEG_STATE_SETTLE_TIME  10000
1815
1816 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1817                                    struct tg3_fiber_aneginfo *ap)
1818 {
1819         unsigned long delta;
1820         u32 rx_cfg_reg;
1821         int ret;
1822
1823         if (ap->state == ANEG_STATE_UNKNOWN) {
1824                 ap->rxconfig = 0;
1825                 ap->link_time = 0;
1826                 ap->cur_time = 0;
1827                 ap->ability_match_cfg = 0;
1828                 ap->ability_match_count = 0;
1829                 ap->ability_match = 0;
1830                 ap->idle_match = 0;
1831                 ap->ack_match = 0;
1832         }
1833         ap->cur_time++;
1834
1835         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1836                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1837
1838                 if (rx_cfg_reg != ap->ability_match_cfg) {
1839                         ap->ability_match_cfg = rx_cfg_reg;
1840                         ap->ability_match = 0;
1841                         ap->ability_match_count = 0;
1842                 } else {
1843                         if (++ap->ability_match_count > 1) {
1844                                 ap->ability_match = 1;
1845                                 ap->ability_match_cfg = rx_cfg_reg;
1846                         }
1847                 }
1848                 if (rx_cfg_reg & ANEG_CFG_ACK)
1849                         ap->ack_match = 1;
1850                 else
1851                         ap->ack_match = 0;
1852
1853                 ap->idle_match = 0;
1854         } else {
1855                 ap->idle_match = 1;
1856                 ap->ability_match_cfg = 0;
1857                 ap->ability_match_count = 0;
1858                 ap->ability_match = 0;
1859                 ap->ack_match = 0;
1860
1861                 rx_cfg_reg = 0;
1862         }
1863
1864         ap->rxconfig = rx_cfg_reg;
1865         ret = ANEG_OK;
1866
1867         switch(ap->state) {
1868         case ANEG_STATE_UNKNOWN:
1869                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1870                         ap->state = ANEG_STATE_AN_ENABLE;
1871
1872                 /* fallthru */
1873         case ANEG_STATE_AN_ENABLE:
1874                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1875                 if (ap->flags & MR_AN_ENABLE) {
1876                         ap->link_time = 0;
1877                         ap->cur_time = 0;
1878                         ap->ability_match_cfg = 0;
1879                         ap->ability_match_count = 0;
1880                         ap->ability_match = 0;
1881                         ap->idle_match = 0;
1882                         ap->ack_match = 0;
1883
1884                         ap->state = ANEG_STATE_RESTART_INIT;
1885                 } else {
1886                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
1887                 }
1888                 break;
1889
1890         case ANEG_STATE_RESTART_INIT:
1891                 ap->link_time = ap->cur_time;
1892                 ap->flags &= ~(MR_NP_LOADED);
1893                 ap->txconfig = 0;
1894                 tw32(MAC_TX_AUTO_NEG, 0);
1895                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1896                 tw32_f(MAC_MODE, tp->mac_mode);
1897                 udelay(40);
1898
1899                 ret = ANEG_TIMER_ENAB;
1900                 ap->state = ANEG_STATE_RESTART;
1901
1902                 /* fallthru */
1903         case ANEG_STATE_RESTART:
1904                 delta = ap->cur_time - ap->link_time;
1905                 if (delta > ANEG_STATE_SETTLE_TIME) {
1906                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1907                 } else {
1908                         ret = ANEG_TIMER_ENAB;
1909                 }
1910                 break;
1911
1912         case ANEG_STATE_DISABLE_LINK_OK:
1913                 ret = ANEG_DONE;
1914                 break;
1915
1916         case ANEG_STATE_ABILITY_DETECT_INIT:
1917                 ap->flags &= ~(MR_TOGGLE_TX);
1918                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1919                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1920                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1921                 tw32_f(MAC_MODE, tp->mac_mode);
1922                 udelay(40);
1923
1924                 ap->state = ANEG_STATE_ABILITY_DETECT;
1925                 break;
1926
1927         case ANEG_STATE_ABILITY_DETECT:
1928                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
1929                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
1930                 }
1931                 break;
1932
1933         case ANEG_STATE_ACK_DETECT_INIT:
1934                 ap->txconfig |= ANEG_CFG_ACK;
1935                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1936                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1937                 tw32_f(MAC_MODE, tp->mac_mode);
1938                 udelay(40);
1939
1940                 ap->state = ANEG_STATE_ACK_DETECT;
1941
1942                 /* fallthru */
1943         case ANEG_STATE_ACK_DETECT:
1944                 if (ap->ack_match != 0) {
1945                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
1946                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
1947                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
1948                         } else {
1949                                 ap->state = ANEG_STATE_AN_ENABLE;
1950                         }
1951                 } else if (ap->ability_match != 0 &&
1952                            ap->rxconfig == 0) {
1953                         ap->state = ANEG_STATE_AN_ENABLE;
1954                 }
1955                 break;
1956
1957         case ANEG_STATE_COMPLETE_ACK_INIT:
1958                 if (ap->rxconfig & ANEG_CFG_INVAL) {
1959                         ret = ANEG_FAILED;
1960                         break;
1961                 }
1962                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
1963                                MR_LP_ADV_HALF_DUPLEX |
1964                                MR_LP_ADV_SYM_PAUSE |
1965                                MR_LP_ADV_ASYM_PAUSE |
1966                                MR_LP_ADV_REMOTE_FAULT1 |
1967                                MR_LP_ADV_REMOTE_FAULT2 |
1968                                MR_LP_ADV_NEXT_PAGE |
1969                                MR_TOGGLE_RX |
1970                                MR_NP_RX);
1971                 if (ap->rxconfig & ANEG_CFG_FD)
1972                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
1973                 if (ap->rxconfig & ANEG_CFG_HD)
1974                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
1975                 if (ap->rxconfig & ANEG_CFG_PS1)
1976                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
1977                 if (ap->rxconfig & ANEG_CFG_PS2)
1978                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
1979                 if (ap->rxconfig & ANEG_CFG_RF1)
1980                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
1981                 if (ap->rxconfig & ANEG_CFG_RF2)
1982                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
1983                 if (ap->rxconfig & ANEG_CFG_NP)
1984                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
1985
1986                 ap->link_time = ap->cur_time;
1987
1988                 ap->flags ^= (MR_TOGGLE_TX);
1989                 if (ap->rxconfig & 0x0008)
1990                         ap->flags |= MR_TOGGLE_RX;
1991                 if (ap->rxconfig & ANEG_CFG_NP)
1992                         ap->flags |= MR_NP_RX;
1993                 ap->flags |= MR_PAGE_RX;
1994
1995                 ap->state = ANEG_STATE_COMPLETE_ACK;
1996                 ret = ANEG_TIMER_ENAB;
1997                 break;
1998
1999         case ANEG_STATE_COMPLETE_ACK:
2000                 if (ap->ability_match != 0 &&
2001                     ap->rxconfig == 0) {
2002                         ap->state = ANEG_STATE_AN_ENABLE;
2003                         break;
2004                 }
2005                 delta = ap->cur_time - ap->link_time;
2006                 if (delta > ANEG_STATE_SETTLE_TIME) {
2007                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2008                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2009                         } else {
2010                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2011                                     !(ap->flags & MR_NP_RX)) {
2012                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2013                                 } else {
2014                                         ret = ANEG_FAILED;
2015                                 }
2016                         }
2017                 }
2018                 break;
2019
2020         case ANEG_STATE_IDLE_DETECT_INIT:
2021                 ap->link_time = ap->cur_time;
2022                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2023                 tw32_f(MAC_MODE, tp->mac_mode);
2024                 udelay(40);
2025
2026                 ap->state = ANEG_STATE_IDLE_DETECT;
2027                 ret = ANEG_TIMER_ENAB;
2028                 break;
2029
2030         case ANEG_STATE_IDLE_DETECT:
2031                 if (ap->ability_match != 0 &&
2032                     ap->rxconfig == 0) {
2033                         ap->state = ANEG_STATE_AN_ENABLE;
2034                         break;
2035                 }
2036                 delta = ap->cur_time - ap->link_time;
2037                 if (delta > ANEG_STATE_SETTLE_TIME) {
2038                         /* XXX another gem from the Broadcom driver :( */
2039                         ap->state = ANEG_STATE_LINK_OK;
2040                 }
2041                 break;
2042
2043         case ANEG_STATE_LINK_OK:
2044                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2045                 ret = ANEG_DONE;
2046                 break;
2047
2048         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2049                 /* ??? unimplemented */
2050                 break;
2051
2052         case ANEG_STATE_NEXT_PAGE_WAIT:
2053                 /* ??? unimplemented */
2054                 break;
2055
2056         default:
2057                 ret = ANEG_FAILED;
2058                 break;
2059         };
2060
2061         return ret;
2062 }
2063
2064 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2065 {
2066         int res = 0;
2067         struct tg3_fiber_aneginfo aninfo;
2068         int status = ANEG_FAILED;
2069         unsigned int tick;
2070         u32 tmp;
2071
2072         tw32_f(MAC_TX_AUTO_NEG, 0);
2073
2074         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2075         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2076         udelay(40);
2077
2078         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2079         udelay(40);
2080
2081         memset(&aninfo, 0, sizeof(aninfo));
2082         aninfo.flags |= MR_AN_ENABLE;
2083         aninfo.state = ANEG_STATE_UNKNOWN;
2084         aninfo.cur_time = 0;
2085         tick = 0;
2086         while (++tick < 195000) {
2087                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2088                 if (status == ANEG_DONE || status == ANEG_FAILED)
2089                         break;
2090
2091                 udelay(1);
2092         }
2093
2094         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2095         tw32_f(MAC_MODE, tp->mac_mode);
2096         udelay(40);
2097
2098         *flags = aninfo.flags;
2099
2100         if (status == ANEG_DONE &&
2101             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2102                              MR_LP_ADV_FULL_DUPLEX)))
2103                 res = 1;
2104
2105         return res;
2106 }
2107
2108 static void tg3_init_bcm8002(struct tg3 *tp)
2109 {
2110         u32 mac_status = tr32(MAC_STATUS);
2111         int i;
2112
2113         /* Reset when initting first time or we have a link. */
2114         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2115             !(mac_status & MAC_STATUS_PCS_SYNCED))
2116                 return;
2117
2118         /* Set PLL lock range. */
2119         tg3_writephy(tp, 0x16, 0x8007);
2120
2121         /* SW reset */
2122         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2123
2124         /* Wait for reset to complete. */
2125         /* XXX schedule_timeout() ... */
2126         for (i = 0; i < 500; i++)
2127                 udelay(10);
2128
2129         /* Config mode; select PMA/Ch 1 regs. */
2130         tg3_writephy(tp, 0x10, 0x8411);
2131
2132         /* Enable auto-lock and comdet, select txclk for tx. */
2133         tg3_writephy(tp, 0x11, 0x0a10);
2134
2135         tg3_writephy(tp, 0x18, 0x00a0);
2136         tg3_writephy(tp, 0x16, 0x41ff);
2137
2138         /* Assert and deassert POR. */
2139         tg3_writephy(tp, 0x13, 0x0400);
2140         udelay(40);
2141         tg3_writephy(tp, 0x13, 0x0000);
2142
2143         tg3_writephy(tp, 0x11, 0x0a50);
2144         udelay(40);
2145         tg3_writephy(tp, 0x11, 0x0a10);
2146
2147         /* Wait for signal to stabilize */
2148         /* XXX schedule_timeout() ... */
2149         for (i = 0; i < 15000; i++)
2150                 udelay(10);
2151
2152         /* Deselect the channel register so we can read the PHYID
2153          * later.
2154          */
2155         tg3_writephy(tp, 0x10, 0x8011);
2156 }
2157
2158 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2159 {
2160         u32 sg_dig_ctrl, sg_dig_status;
2161         u32 serdes_cfg, expected_sg_dig_ctrl;
2162         int workaround, port_a;
2163         int current_link_up;
2164
2165         serdes_cfg = 0;
2166         expected_sg_dig_ctrl = 0;
2167         workaround = 0;
2168         port_a = 1;
2169         current_link_up = 0;
2170
2171         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2172             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2173                 workaround = 1;
2174                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2175                         port_a = 0;
2176
2177                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2178                 /* preserve bits 20-23 for voltage regulator */
2179                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2180         }
2181
2182         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2183
2184         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2185                 if (sg_dig_ctrl & (1 << 31)) {
2186                         if (workaround) {
2187                                 u32 val = serdes_cfg;
2188
2189                                 if (port_a)
2190                                         val |= 0xc010000;
2191                                 else
2192                                         val |= 0x4010000;
2193                                 tw32_f(MAC_SERDES_CFG, val);
2194                         }
2195                         tw32_f(SG_DIG_CTRL, 0x01388400);
2196                 }
2197                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2198                         tg3_setup_flow_control(tp, 0, 0);
2199                         current_link_up = 1;
2200                 }
2201                 goto out;
2202         }
2203
2204         /* Want auto-negotiation.  */
2205         expected_sg_dig_ctrl = 0x81388400;
2206
2207         /* Pause capability */
2208         expected_sg_dig_ctrl |= (1 << 11);
2209
2210         /* Asymettric pause */
2211         expected_sg_dig_ctrl |= (1 << 12);
2212
2213         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2214                 if (workaround)
2215                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2216                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2217                 udelay(5);
2218                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2219
2220                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2221         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2222                                  MAC_STATUS_SIGNAL_DET)) {
2223                 int i;
2224
2225                 /* Giver time to negotiate (~200ms) */
2226                 for (i = 0; i < 40000; i++) {
2227                         sg_dig_status = tr32(SG_DIG_STATUS);
2228                         if (sg_dig_status & (0x3))
2229                                 break;
2230                         udelay(5);
2231                 }
2232                 mac_status = tr32(MAC_STATUS);
2233
2234                 if ((sg_dig_status & (1 << 1)) &&
2235                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2236                         u32 local_adv, remote_adv;
2237
2238                         local_adv = ADVERTISE_PAUSE_CAP;
2239                         remote_adv = 0;
2240                         if (sg_dig_status & (1 << 19))
2241                                 remote_adv |= LPA_PAUSE_CAP;
2242                         if (sg_dig_status & (1 << 20))
2243                                 remote_adv |= LPA_PAUSE_ASYM;
2244
2245                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2246                         current_link_up = 1;
2247                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2248                 } else if (!(sg_dig_status & (1 << 1))) {
2249                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2250                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2251                         else {
2252                                 if (workaround) {
2253                                         u32 val = serdes_cfg;
2254
2255                                         if (port_a)
2256                                                 val |= 0xc010000;
2257                                         else
2258                                                 val |= 0x4010000;
2259
2260                                         tw32_f(MAC_SERDES_CFG, val);
2261                                 }
2262
2263                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2264                                 udelay(40);
2265
2266                                 /* Link parallel detection - link is up */
2267                                 /* only if we have PCS_SYNC and not */
2268                                 /* receiving config code words */
2269                                 mac_status = tr32(MAC_STATUS);
2270                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2271                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2272                                         tg3_setup_flow_control(tp, 0, 0);
2273                                         current_link_up = 1;
2274                                 }
2275                         }
2276                 }
2277         }
2278
2279 out:
2280         return current_link_up;
2281 }
2282
2283 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2284 {
2285         int current_link_up = 0;
2286
2287         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2288                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2289                 goto out;
2290         }
2291
2292         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2293                 u32 flags;
2294                 int i;
2295   
2296                 if (fiber_autoneg(tp, &flags)) {
2297                         u32 local_adv, remote_adv;
2298
2299                         local_adv = ADVERTISE_PAUSE_CAP;
2300                         remote_adv = 0;
2301                         if (flags & MR_LP_ADV_SYM_PAUSE)
2302                                 remote_adv |= LPA_PAUSE_CAP;
2303                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2304                                 remote_adv |= LPA_PAUSE_ASYM;
2305
2306                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2307
2308                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2309                         current_link_up = 1;
2310                 }
2311                 for (i = 0; i < 30; i++) {
2312                         udelay(20);
2313                         tw32_f(MAC_STATUS,
2314                                (MAC_STATUS_SYNC_CHANGED |
2315                                 MAC_STATUS_CFG_CHANGED));
2316                         udelay(40);
2317                         if ((tr32(MAC_STATUS) &
2318                              (MAC_STATUS_SYNC_CHANGED |
2319                               MAC_STATUS_CFG_CHANGED)) == 0)
2320                                 break;
2321                 }
2322
2323                 mac_status = tr32(MAC_STATUS);
2324                 if (current_link_up == 0 &&
2325                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2326                     !(mac_status & MAC_STATUS_RCVD_CFG))
2327                         current_link_up = 1;
2328         } else {
2329                 /* Forcing 1000FD link up. */
2330                 current_link_up = 1;
2331                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2332
2333                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2334                 udelay(40);
2335         }
2336
2337 out:
2338         return current_link_up;
2339 }
2340
2341 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2342 {
2343         u32 orig_pause_cfg;
2344         u16 orig_active_speed;
2345         u8 orig_active_duplex;
2346         u32 mac_status;
2347         int current_link_up;
2348         int i;
2349
2350         orig_pause_cfg =
2351                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2352                                   TG3_FLAG_TX_PAUSE));
2353         orig_active_speed = tp->link_config.active_speed;
2354         orig_active_duplex = tp->link_config.active_duplex;
2355
2356         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2357             netif_carrier_ok(tp->dev) &&
2358             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2359                 mac_status = tr32(MAC_STATUS);
2360                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2361                                MAC_STATUS_SIGNAL_DET |
2362                                MAC_STATUS_CFG_CHANGED |
2363                                MAC_STATUS_RCVD_CFG);
2364                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2365                                    MAC_STATUS_SIGNAL_DET)) {
2366                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2367                                             MAC_STATUS_CFG_CHANGED));
2368                         return 0;
2369                 }
2370         }
2371
2372         tw32_f(MAC_TX_AUTO_NEG, 0);
2373
2374         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2375         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2376         tw32_f(MAC_MODE, tp->mac_mode);
2377         udelay(40);
2378
2379         if (tp->phy_id == PHY_ID_BCM8002)
2380                 tg3_init_bcm8002(tp);
2381
2382         /* Enable link change event even when serdes polling.  */
2383         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2384         udelay(40);
2385
2386         current_link_up = 0;
2387         mac_status = tr32(MAC_STATUS);
2388
2389         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2390                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2391         else
2392                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2393
2394         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2395         tw32_f(MAC_MODE, tp->mac_mode);
2396         udelay(40);
2397
2398         tp->hw_status->status =
2399                 (SD_STATUS_UPDATED |
2400                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2401
2402         for (i = 0; i < 100; i++) {
2403                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2404                                     MAC_STATUS_CFG_CHANGED));
2405                 udelay(5);
2406                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2407                                          MAC_STATUS_CFG_CHANGED)) == 0)
2408                         break;
2409         }
2410
2411         mac_status = tr32(MAC_STATUS);
2412         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2413                 current_link_up = 0;
2414                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2415                         tw32_f(MAC_MODE, (tp->mac_mode |
2416                                           MAC_MODE_SEND_CONFIGS));
2417                         udelay(1);
2418                         tw32_f(MAC_MODE, tp->mac_mode);
2419                 }
2420         }
2421
2422         if (current_link_up == 1) {
2423                 tp->link_config.active_speed = SPEED_1000;
2424                 tp->link_config.active_duplex = DUPLEX_FULL;
2425                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2426                                     LED_CTRL_LNKLED_OVERRIDE |
2427                                     LED_CTRL_1000MBPS_ON));
2428         } else {
2429                 tp->link_config.active_speed = SPEED_INVALID;
2430                 tp->link_config.active_duplex = DUPLEX_INVALID;
2431                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2432                                     LED_CTRL_LNKLED_OVERRIDE |
2433                                     LED_CTRL_TRAFFIC_OVERRIDE));
2434         }
2435
2436         if (current_link_up != netif_carrier_ok(tp->dev)) {
2437                 if (current_link_up)
2438                         netif_carrier_on(tp->dev);
2439                 else
2440                         netif_carrier_off(tp->dev);
2441                 tg3_link_report(tp);
2442         } else {
2443                 u32 now_pause_cfg =
2444                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2445                                          TG3_FLAG_TX_PAUSE);
2446                 if (orig_pause_cfg != now_pause_cfg ||
2447                     orig_active_speed != tp->link_config.active_speed ||
2448                     orig_active_duplex != tp->link_config.active_duplex)
2449                         tg3_link_report(tp);
2450         }
2451
2452         return 0;
2453 }
2454
2455 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2456 {
2457         int err;
2458
2459         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2460                 err = tg3_setup_fiber_phy(tp, force_reset);
2461         } else {
2462                 err = tg3_setup_copper_phy(tp, force_reset);
2463         }
2464
2465         if (tp->link_config.active_speed == SPEED_1000 &&
2466             tp->link_config.active_duplex == DUPLEX_HALF)
2467                 tw32(MAC_TX_LENGTHS,
2468                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2469                       (6 << TX_LENGTHS_IPG_SHIFT) |
2470                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2471         else
2472                 tw32(MAC_TX_LENGTHS,
2473                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2474                       (6 << TX_LENGTHS_IPG_SHIFT) |
2475                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2476
2477         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2478                 if (netif_carrier_ok(tp->dev)) {
2479                         tw32(HOSTCC_STAT_COAL_TICKS,
2480                              DEFAULT_STAT_COAL_TICKS);
2481                 } else {
2482                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2483                 }
2484         }
2485
2486         return err;
2487 }
2488
2489 /* Tigon3 never reports partial packet sends.  So we do not
2490  * need special logic to handle SKBs that have not had all
2491  * of their frags sent yet, like SunGEM does.
2492  */
2493 static void tg3_tx(struct tg3 *tp)
2494 {
2495         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2496         u32 sw_idx = tp->tx_cons;
2497
2498         while (sw_idx != hw_idx) {
2499                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2500                 struct sk_buff *skb = ri->skb;
2501                 int i;
2502
2503                 if (unlikely(skb == NULL))
2504                         BUG();
2505
2506                 pci_unmap_single(tp->pdev,
2507                                  pci_unmap_addr(ri, mapping),
2508                                  skb_headlen(skb),
2509                                  PCI_DMA_TODEVICE);
2510
2511                 ri->skb = NULL;
2512
2513                 sw_idx = NEXT_TX(sw_idx);
2514
2515                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2516                         if (unlikely(sw_idx == hw_idx))
2517                                 BUG();
2518
2519                         ri = &tp->tx_buffers[sw_idx];
2520                         if (unlikely(ri->skb != NULL))
2521                                 BUG();
2522
2523                         pci_unmap_page(tp->pdev,
2524                                        pci_unmap_addr(ri, mapping),
2525                                        skb_shinfo(skb)->frags[i].size,
2526                                        PCI_DMA_TODEVICE);
2527
2528                         sw_idx = NEXT_TX(sw_idx);
2529                 }
2530
2531                 dev_kfree_skb_irq(skb);
2532         }
2533
2534         tp->tx_cons = sw_idx;
2535
2536         if (netif_queue_stopped(tp->dev) &&
2537             (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2538                 netif_wake_queue(tp->dev);
2539 }
2540
2541 /* Returns size of skb allocated or < 0 on error.
2542  *
2543  * We only need to fill in the address because the other members
2544  * of the RX descriptor are invariant, see tg3_init_rings.
2545  *
2546  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2547  * posting buffers we only dirty the first cache line of the RX
2548  * descriptor (containing the address).  Whereas for the RX status
2549  * buffers the cpu only reads the last cacheline of the RX descriptor
2550  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2551  */
2552 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2553                             int src_idx, u32 dest_idx_unmasked)
2554 {
2555         struct tg3_rx_buffer_desc *desc;
2556         struct ring_info *map, *src_map;
2557         struct sk_buff *skb;
2558         dma_addr_t mapping;
2559         int skb_size, dest_idx;
2560
2561         src_map = NULL;
2562         switch (opaque_key) {
2563         case RXD_OPAQUE_RING_STD:
2564                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2565                 desc = &tp->rx_std[dest_idx];
2566                 map = &tp->rx_std_buffers[dest_idx];
2567                 if (src_idx >= 0)
2568                         src_map = &tp->rx_std_buffers[src_idx];
2569                 skb_size = RX_PKT_BUF_SZ;
2570                 break;
2571
2572         case RXD_OPAQUE_RING_JUMBO:
2573                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2574                 desc = &tp->rx_jumbo[dest_idx];
2575                 map = &tp->rx_jumbo_buffers[dest_idx];
2576                 if (src_idx >= 0)
2577                         src_map = &tp->rx_jumbo_buffers[src_idx];
2578                 skb_size = RX_JUMBO_PKT_BUF_SZ;
2579                 break;
2580
2581         default:
2582                 return -EINVAL;
2583         };
2584
2585         /* Do not overwrite any of the map or rp information
2586          * until we are sure we can commit to a new buffer.
2587          *
2588          * Callers depend upon this behavior and assume that
2589          * we leave everything unchanged if we fail.
2590          */
2591         skb = dev_alloc_skb(skb_size);
2592         if (skb == NULL)
2593                 return -ENOMEM;
2594
2595         skb->dev = tp->dev;
2596         skb_reserve(skb, tp->rx_offset);
2597
2598         mapping = pci_map_single(tp->pdev, skb->data,
2599                                  skb_size - tp->rx_offset,
2600                                  PCI_DMA_FROMDEVICE);
2601
2602         map->skb = skb;
2603         pci_unmap_addr_set(map, mapping, mapping);
2604
2605         if (src_map != NULL)
2606                 src_map->skb = NULL;
2607
2608         desc->addr_hi = ((u64)mapping >> 32);
2609         desc->addr_lo = ((u64)mapping & 0xffffffff);
2610
2611         return skb_size;
2612 }
2613
2614 /* We only need to move over in the address because the other
2615  * members of the RX descriptor are invariant.  See notes above
2616  * tg3_alloc_rx_skb for full details.
2617  */
2618 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2619                            int src_idx, u32 dest_idx_unmasked)
2620 {
2621         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2622         struct ring_info *src_map, *dest_map;
2623         int dest_idx;
2624
2625         switch (opaque_key) {
2626         case RXD_OPAQUE_RING_STD:
2627                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2628                 dest_desc = &tp->rx_std[dest_idx];
2629                 dest_map = &tp->rx_std_buffers[dest_idx];
2630                 src_desc = &tp->rx_std[src_idx];
2631                 src_map = &tp->rx_std_buffers[src_idx];
2632                 break;
2633
2634         case RXD_OPAQUE_RING_JUMBO:
2635                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2636                 dest_desc = &tp->rx_jumbo[dest_idx];
2637                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2638                 src_desc = &tp->rx_jumbo[src_idx];
2639                 src_map = &tp->rx_jumbo_buffers[src_idx];
2640                 break;
2641
2642         default:
2643                 return;
2644         };
2645
2646         dest_map->skb = src_map->skb;
2647         pci_unmap_addr_set(dest_map, mapping,
2648                            pci_unmap_addr(src_map, mapping));
2649         dest_desc->addr_hi = src_desc->addr_hi;
2650         dest_desc->addr_lo = src_desc->addr_lo;
2651
2652         src_map->skb = NULL;
2653 }
2654
2655 #if TG3_VLAN_TAG_USED
2656 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
2657 {
2658         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
2659 }
2660 #endif
2661
2662 /* The RX ring scheme is composed of multiple rings which post fresh
2663  * buffers to the chip, and one special ring the chip uses to report
2664  * status back to the host.
2665  *
2666  * The special ring reports the status of received packets to the
2667  * host.  The chip does not write into the original descriptor the
2668  * RX buffer was obtained from.  The chip simply takes the original
2669  * descriptor as provided by the host, updates the status and length
2670  * field, then writes this into the next status ring entry.
2671  *
2672  * Each ring the host uses to post buffers to the chip is described
2673  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
2674  * it is first placed into the on-chip ram.  When the packet's length
2675  * is known, it walks down the TG3_BDINFO entries to select the ring.
2676  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
2677  * which is within the range of the new packet's length is chosen.
2678  *
2679  * The "separate ring for rx status" scheme may sound queer, but it makes
2680  * sense from a cache coherency perspective.  If only the host writes
2681  * to the buffer post rings, and only the chip writes to the rx status
2682  * rings, then cache lines never move beyond shared-modified state.
2683  * If both the host and chip were to write into the same ring, cache line
2684  * eviction could occur since both entities want it in an exclusive state.
2685  */
2686 static int tg3_rx(struct tg3 *tp, int budget)
2687 {
2688         u32 work_mask;
2689         u32 rx_rcb_ptr = tp->rx_rcb_ptr;
2690         u16 hw_idx, sw_idx;
2691         int received;
2692
2693         hw_idx = tp->hw_status->idx[0].rx_producer;
2694         /*
2695          * We need to order the read of hw_idx and the read of
2696          * the opaque cookie.
2697          */
2698         rmb();
2699         sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2700         work_mask = 0;
2701         received = 0;
2702         while (sw_idx != hw_idx && budget > 0) {
2703                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
2704                 unsigned int len;
2705                 struct sk_buff *skb;
2706                 dma_addr_t dma_addr;
2707                 u32 opaque_key, desc_idx, *post_ptr;
2708
2709                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
2710                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
2711                 if (opaque_key == RXD_OPAQUE_RING_STD) {
2712                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
2713                                                   mapping);
2714                         skb = tp->rx_std_buffers[desc_idx].skb;
2715                         post_ptr = &tp->rx_std_ptr;
2716                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
2717                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
2718                                                   mapping);
2719                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
2720                         post_ptr = &tp->rx_jumbo_ptr;
2721                 }
2722                 else {
2723                         goto next_pkt_nopost;
2724                 }
2725
2726                 work_mask |= opaque_key;
2727
2728                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
2729                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
2730                 drop_it:
2731                         tg3_recycle_rx(tp, opaque_key,
2732                                        desc_idx, *post_ptr);
2733                 drop_it_no_recycle:
2734                         /* Other statistics kept track of by card. */
2735                         tp->net_stats.rx_dropped++;
2736                         goto next_pkt;
2737                 }
2738
2739                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
2740
2741                 if (len > RX_COPY_THRESHOLD 
2742                         && tp->rx_offset == 2
2743                         /* rx_offset != 2 iff this is a 5701 card running
2744                          * in PCI-X mode [see tg3_get_invariants()] */
2745                 ) {
2746                         int skb_size;
2747
2748                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
2749                                                     desc_idx, *post_ptr);
2750                         if (skb_size < 0)
2751                                 goto drop_it;
2752
2753                         pci_unmap_single(tp->pdev, dma_addr,
2754                                          skb_size - tp->rx_offset,
2755                                          PCI_DMA_FROMDEVICE);
2756
2757                         skb_put(skb, len);
2758                 } else {
2759                         struct sk_buff *copy_skb;
2760
2761                         tg3_recycle_rx(tp, opaque_key,
2762                                        desc_idx, *post_ptr);
2763
2764                         copy_skb = dev_alloc_skb(len + 2);
2765                         if (copy_skb == NULL)
2766                                 goto drop_it_no_recycle;
2767
2768                         copy_skb->dev = tp->dev;
2769                         skb_reserve(copy_skb, 2);
2770                         skb_put(copy_skb, len);
2771                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2772                         memcpy(copy_skb->data, skb->data, len);
2773                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2774
2775                         /* We'll reuse the original ring buffer. */
2776                         skb = copy_skb;
2777                 }
2778
2779                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
2780                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
2781                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
2782                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
2783                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2784                 else
2785                         skb->ip_summed = CHECKSUM_NONE;
2786
2787                 skb->protocol = eth_type_trans(skb, tp->dev);
2788 #if TG3_VLAN_TAG_USED
2789                 if (tp->vlgrp != NULL &&
2790                     desc->type_flags & RXD_FLAG_VLAN) {
2791                         tg3_vlan_rx(tp, skb,
2792                                     desc->err_vlan & RXD_VLAN_MASK);
2793                 } else
2794 #endif
2795                         netif_receive_skb(skb);
2796
2797                 tp->dev->last_rx = jiffies;
2798                 received++;
2799                 budget--;
2800
2801 next_pkt:
2802                 (*post_ptr)++;
2803 next_pkt_nopost:
2804                 rx_rcb_ptr++;
2805                 sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2806         }
2807
2808         /* ACK the status ring. */
2809         tp->rx_rcb_ptr = rx_rcb_ptr;
2810         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW,
2811                      (rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp)));
2812
2813         /* Refill RX ring(s). */
2814         if (work_mask & RXD_OPAQUE_RING_STD) {
2815                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
2816                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
2817                              sw_idx);
2818         }
2819         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
2820                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
2821                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
2822                              sw_idx);
2823         }
2824         mmiowb();
2825
2826         return received;
2827 }
2828
2829 static int tg3_poll(struct net_device *netdev, int *budget)
2830 {
2831         struct tg3 *tp = netdev_priv(netdev);
2832         struct tg3_hw_status *sblk = tp->hw_status;
2833         unsigned long flags;
2834         int done;
2835
2836         spin_lock_irqsave(&tp->lock, flags);
2837
2838         /* handle link change and other phy events */
2839         if (!(tp->tg3_flags &
2840               (TG3_FLAG_USE_LINKCHG_REG |
2841                TG3_FLAG_POLL_SERDES))) {
2842                 if (sblk->status & SD_STATUS_LINK_CHG) {
2843                         sblk->status = SD_STATUS_UPDATED |
2844                                 (sblk->status & ~SD_STATUS_LINK_CHG);
2845                         tg3_setup_phy(tp, 0);
2846                 }
2847         }
2848
2849         /* run TX completion thread */
2850         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
2851                 spin_lock(&tp->tx_lock);
2852                 tg3_tx(tp);
2853                 spin_unlock(&tp->tx_lock);
2854         }
2855
2856         spin_unlock_irqrestore(&tp->lock, flags);
2857
2858         /* run RX thread, within the bounds set by NAPI.
2859          * All RX "locking" is done by ensuring outside
2860          * code synchronizes with dev->poll()
2861          */
2862         done = 1;
2863         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2864                 int orig_budget = *budget;
2865                 int work_done;
2866
2867                 if (orig_budget > netdev->quota)
2868                         orig_budget = netdev->quota;
2869
2870                 work_done = tg3_rx(tp, orig_budget);
2871
2872                 *budget -= work_done;
2873                 netdev->quota -= work_done;
2874
2875                 if (work_done >= orig_budget)
2876                         done = 0;
2877         }
2878
2879         /* if no more work, tell net stack and NIC we're done */
2880         if (done) {
2881                 spin_lock_irqsave(&tp->lock, flags);
2882                 __netif_rx_complete(netdev);
2883                 tg3_restart_ints(tp);
2884                 spin_unlock_irqrestore(&tp->lock, flags);
2885         }
2886
2887         return (done ? 0 : 1);
2888 }
2889
2890 static inline unsigned int tg3_has_work(struct net_device *dev, struct tg3 *tp)
2891 {
2892         struct tg3_hw_status *sblk = tp->hw_status;
2893         unsigned int work_exists = 0;
2894
2895         /* check for phy events */
2896         if (!(tp->tg3_flags &
2897               (TG3_FLAG_USE_LINKCHG_REG |
2898                TG3_FLAG_POLL_SERDES))) {
2899                 if (sblk->status & SD_STATUS_LINK_CHG)
2900                         work_exists = 1;
2901         }
2902         /* check for RX/TX work to do */
2903         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
2904             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
2905                 work_exists = 1;
2906
2907         return work_exists;
2908 }
2909
2910 /* MSI ISR - No need to check for interrupt sharing and no need to
2911  * flush status block and interrupt mailbox. PCI ordering rules
2912  * guarantee that MSI will arrive after the status block.
2913  */
2914 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
2915 {
2916         struct net_device *dev = dev_id;
2917         struct tg3 *tp = netdev_priv(dev);
2918         struct tg3_hw_status *sblk = tp->hw_status;
2919         unsigned long flags;
2920
2921         spin_lock_irqsave(&tp->lock, flags);
2922
2923         /*
2924          * writing any value to intr-mbox-0 clears PCI INTA# and
2925          * chip-internal interrupt pending events.
2926          * writing non-zero to intr-mbox-0 additional tells the
2927          * NIC to stop sending us irqs, engaging "in-intr-handler"
2928          * event coalescing.
2929          */
2930         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
2931         sblk->status &= ~SD_STATUS_UPDATED;
2932
2933         if (likely(tg3_has_work(dev, tp)))
2934                 netif_rx_schedule(dev);         /* schedule NAPI poll */
2935         else {
2936                 /* no work, re-enable interrupts
2937                  */
2938                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2939                              0x00000000);
2940         }
2941
2942         spin_unlock_irqrestore(&tp->lock, flags);
2943
2944         return IRQ_RETVAL(1);
2945 }
2946
2947 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2948 {
2949         struct net_device *dev = dev_id;
2950         struct tg3 *tp = netdev_priv(dev);
2951         struct tg3_hw_status *sblk = tp->hw_status;
2952         unsigned long flags;
2953         unsigned int handled = 1;
2954
2955         spin_lock_irqsave(&tp->lock, flags);
2956
2957         /* In INTx mode, it is possible for the interrupt to arrive at
2958          * the CPU before the status block posted prior to the interrupt.
2959          * Reading the PCI State register will confirm whether the
2960          * interrupt is ours and will flush the status block.
2961          */
2962         if ((sblk->status & SD_STATUS_UPDATED) ||
2963             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
2964                 /*
2965                  * writing any value to intr-mbox-0 clears PCI INTA# and
2966                  * chip-internal interrupt pending events.
2967                  * writing non-zero to intr-mbox-0 additional tells the
2968                  * NIC to stop sending us irqs, engaging "in-intr-handler"
2969                  * event coalescing.
2970                  */
2971                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2972                              0x00000001);
2973                 /*
2974                  * Flush PCI write.  This also guarantees that our
2975                  * status block has been flushed to host memory.
2976                  */
2977                 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2978                 sblk->status &= ~SD_STATUS_UPDATED;
2979
2980                 if (likely(tg3_has_work(dev, tp)))
2981                         netif_rx_schedule(dev);         /* schedule NAPI poll */
2982                 else {
2983                         /* no work, shared interrupt perhaps?  re-enable
2984                          * interrupts, and flush that PCI write
2985                          */
2986                         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2987                                 0x00000000);
2988                         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2989                 }
2990         } else {        /* shared interrupt */
2991                 handled = 0;
2992         }
2993
2994         spin_unlock_irqrestore(&tp->lock, flags);
2995
2996         return IRQ_RETVAL(handled);
2997 }
2998
2999 static int tg3_init_hw(struct tg3 *);
3000 static int tg3_halt(struct tg3 *);
3001
3002 #ifdef CONFIG_NET_POLL_CONTROLLER
3003 static void tg3_poll_controller(struct net_device *dev)
3004 {
3005         struct tg3 *tp = netdev_priv(dev);
3006
3007         tg3_interrupt(tp->pdev->irq, dev, NULL);
3008 }
3009 #endif
3010
3011 static void tg3_reset_task(void *_data)
3012 {
3013         struct tg3 *tp = _data;
3014         unsigned int restart_timer;
3015
3016         tg3_netif_stop(tp);
3017
3018         spin_lock_irq(&tp->lock);
3019         spin_lock(&tp->tx_lock);
3020
3021         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3022         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3023
3024         tg3_halt(tp);
3025         tg3_init_hw(tp);
3026
3027         tg3_netif_start(tp);
3028
3029         spin_unlock(&tp->tx_lock);
3030         spin_unlock_irq(&tp->lock);
3031
3032         if (restart_timer)
3033                 mod_timer(&tp->timer, jiffies + 1);
3034 }
3035
3036 static void tg3_tx_timeout(struct net_device *dev)
3037 {
3038         struct tg3 *tp = netdev_priv(dev);
3039
3040         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3041                dev->name);
3042
3043         schedule_work(&tp->reset_task);
3044 }
3045
3046 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3047
3048 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3049                                        u32 guilty_entry, int guilty_len,
3050                                        u32 last_plus_one, u32 *start, u32 mss)
3051 {
3052         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3053         dma_addr_t new_addr;
3054         u32 entry = *start;
3055         int i;
3056
3057         if (!new_skb) {
3058                 dev_kfree_skb(skb);
3059                 return -1;
3060         }
3061
3062         /* New SKB is guaranteed to be linear. */
3063         entry = *start;
3064         new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3065                                   PCI_DMA_TODEVICE);
3066         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3067                     (skb->ip_summed == CHECKSUM_HW) ?
3068                     TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
3069         *start = NEXT_TX(entry);
3070
3071         /* Now clean up the sw ring entries. */
3072         i = 0;
3073         while (entry != last_plus_one) {
3074                 int len;
3075
3076                 if (i == 0)
3077                         len = skb_headlen(skb);
3078                 else
3079                         len = skb_shinfo(skb)->frags[i-1].size;
3080                 pci_unmap_single(tp->pdev,
3081                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3082                                  len, PCI_DMA_TODEVICE);
3083                 if (i == 0) {
3084                         tp->tx_buffers[entry].skb = new_skb;
3085                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3086                 } else {
3087                         tp->tx_buffers[entry].skb = NULL;
3088                 }
3089                 entry = NEXT_TX(entry);
3090                 i++;
3091         }
3092
3093         dev_kfree_skb(skb);
3094
3095         return 0;
3096 }
3097
3098 static void tg3_set_txd(struct tg3 *tp, int entry,
3099                         dma_addr_t mapping, int len, u32 flags,
3100                         u32 mss_and_is_end)
3101 {
3102         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3103         int is_end = (mss_and_is_end & 0x1);
3104         u32 mss = (mss_and_is_end >> 1);
3105         u32 vlan_tag = 0;
3106
3107         if (is_end)
3108                 flags |= TXD_FLAG_END;
3109         if (flags & TXD_FLAG_VLAN) {
3110                 vlan_tag = flags >> 16;
3111                 flags &= 0xffff;
3112         }
3113         vlan_tag |= (mss << TXD_MSS_SHIFT);
3114
3115         txd->addr_hi = ((u64) mapping >> 32);
3116         txd->addr_lo = ((u64) mapping & 0xffffffff);
3117         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3118         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3119 }
3120
3121 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3122 {
3123         u32 base = (u32) mapping & 0xffffffff;
3124
3125         return ((base > 0xffffdcc0) &&
3126                 (base + len + 8 < base));
3127 }
3128
3129 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3130 {
3131         struct tg3 *tp = netdev_priv(dev);
3132         dma_addr_t mapping;
3133         unsigned int i;
3134         u32 len, entry, base_flags, mss;
3135         int would_hit_hwbug;
3136         unsigned long flags;
3137
3138         len = skb_headlen(skb);
3139
3140         /* No BH disabling for tx_lock here.  We are running in BH disabled
3141          * context and TX reclaim runs via tp->poll inside of a software
3142          * interrupt.  Rejoice!
3143          *
3144          * Actually, things are not so simple.  If we are to take a hw
3145          * IRQ here, we can deadlock, consider:
3146          *
3147          *       CPU1           CPU2
3148          *   tg3_start_xmit
3149          *   take tp->tx_lock
3150          *                      tg3_timer
3151          *                      take tp->lock
3152          *   tg3_interrupt
3153          *   spin on tp->lock
3154          *                      spin on tp->tx_lock
3155          *
3156          * So we really do need to disable interrupts when taking
3157          * tx_lock here.
3158          */
3159         local_irq_save(flags);
3160         if (!spin_trylock(&tp->tx_lock)) { 
3161                 local_irq_restore(flags);
3162                 return NETDEV_TX_LOCKED; 
3163         } 
3164
3165         /* This is a hard error, log it. */
3166         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3167                 netif_stop_queue(dev);
3168                 spin_unlock_irqrestore(&tp->tx_lock, flags);
3169                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3170                        dev->name);
3171                 return NETDEV_TX_BUSY;
3172         }
3173
3174         entry = tp->tx_prod;
3175         base_flags = 0;
3176         if (skb->ip_summed == CHECKSUM_HW)
3177                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3178 #if TG3_TSO_SUPPORT != 0
3179         mss = 0;
3180         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3181             (mss = skb_shinfo(skb)->tso_size) != 0) {
3182                 int tcp_opt_len, ip_tcp_len;
3183
3184                 if (skb_header_cloned(skb) &&
3185                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3186                         dev_kfree_skb(skb);
3187                         goto out_unlock;
3188                 }
3189
3190                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3191                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3192
3193                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3194                                TXD_FLAG_CPU_POST_DMA);
3195
3196                 skb->nh.iph->check = 0;
3197                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
3198                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3199                         skb->h.th->check = 0;
3200                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3201                 }
3202                 else {
3203                         skb->h.th->check =
3204                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3205                                                    skb->nh.iph->daddr,
3206                                                    0, IPPROTO_TCP, 0);
3207                 }
3208
3209                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3210                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3211                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3212                                 int tsflags;
3213
3214                                 tsflags = ((skb->nh.iph->ihl - 5) +
3215                                            (tcp_opt_len >> 2));
3216                                 mss |= (tsflags << 11);
3217                         }
3218                 } else {
3219                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3220                                 int tsflags;
3221
3222                                 tsflags = ((skb->nh.iph->ihl - 5) +
3223                                            (tcp_opt_len >> 2));
3224                                 base_flags |= tsflags << 12;
3225                         }
3226                 }
3227         }
3228 #else
3229         mss = 0;
3230 #endif
3231 #if TG3_VLAN_TAG_USED
3232         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3233                 base_flags |= (TXD_FLAG_VLAN |
3234                                (vlan_tx_tag_get(skb) << 16));
3235 #endif
3236
3237         /* Queue skb data, a.k.a. the main skb fragment. */
3238         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3239
3240         tp->tx_buffers[entry].skb = skb;
3241         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3242
3243         would_hit_hwbug = 0;
3244
3245         if (tg3_4g_overflow_test(mapping, len))
3246                 would_hit_hwbug = entry + 1;
3247
3248         tg3_set_txd(tp, entry, mapping, len, base_flags,
3249                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3250
3251         entry = NEXT_TX(entry);
3252
3253         /* Now loop through additional data fragments, and queue them. */
3254         if (skb_shinfo(skb)->nr_frags > 0) {
3255                 unsigned int i, last;
3256
3257                 last = skb_shinfo(skb)->nr_frags - 1;
3258                 for (i = 0; i <= last; i++) {
3259                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3260
3261                         len = frag->size;
3262                         mapping = pci_map_page(tp->pdev,
3263                                                frag->page,
3264                                                frag->page_offset,
3265                                                len, PCI_DMA_TODEVICE);
3266
3267                         tp->tx_buffers[entry].skb = NULL;
3268                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3269
3270                         if (tg3_4g_overflow_test(mapping, len)) {
3271                                 /* Only one should match. */
3272                                 if (would_hit_hwbug)
3273                                         BUG();
3274                                 would_hit_hwbug = entry + 1;
3275                         }
3276
3277                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3278                                 tg3_set_txd(tp, entry, mapping, len,
3279                                             base_flags, (i == last)|(mss << 1));
3280                         else
3281                                 tg3_set_txd(tp, entry, mapping, len,
3282                                             base_flags, (i == last));
3283
3284                         entry = NEXT_TX(entry);
3285                 }
3286         }
3287
3288         if (would_hit_hwbug) {
3289                 u32 last_plus_one = entry;
3290                 u32 start;
3291                 unsigned int len = 0;
3292
3293                 would_hit_hwbug -= 1;
3294                 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
3295                 entry &= (TG3_TX_RING_SIZE - 1);
3296                 start = entry;
3297                 i = 0;
3298                 while (entry != last_plus_one) {
3299                         if (i == 0)
3300                                 len = skb_headlen(skb);
3301                         else
3302                                 len = skb_shinfo(skb)->frags[i-1].size;
3303
3304                         if (entry == would_hit_hwbug)
3305                                 break;
3306
3307                         i++;
3308                         entry = NEXT_TX(entry);
3309
3310                 }
3311
3312                 /* If the workaround fails due to memory/mapping
3313                  * failure, silently drop this packet.
3314                  */
3315                 if (tigon3_4gb_hwbug_workaround(tp, skb,
3316                                                 entry, len,
3317                                                 last_plus_one,
3318                                                 &start, mss))
3319                         goto out_unlock;
3320
3321                 entry = start;
3322         }
3323
3324         /* Packets are ready, update Tx producer idx local and on card. */
3325         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3326
3327         tp->tx_prod = entry;
3328         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
3329                 netif_stop_queue(dev);
3330
3331 out_unlock:
3332         mmiowb();
3333         spin_unlock_irqrestore(&tp->tx_lock, flags);
3334
3335         dev->trans_start = jiffies;
3336
3337         return NETDEV_TX_OK;
3338 }
3339
3340 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3341                                int new_mtu)
3342 {
3343         dev->mtu = new_mtu;
3344
3345         if (new_mtu > ETH_DATA_LEN)
3346                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
3347         else
3348                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_ENABLE;
3349 }
3350
3351 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3352 {
3353         struct tg3 *tp = netdev_priv(dev);
3354
3355         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3356                 return -EINVAL;
3357
3358         if (!netif_running(dev)) {
3359                 /* We'll just catch it later when the
3360                  * device is up'd.
3361                  */
3362                 tg3_set_mtu(dev, tp, new_mtu);
3363                 return 0;
3364         }
3365
3366         tg3_netif_stop(tp);
3367         spin_lock_irq(&tp->lock);
3368         spin_lock(&tp->tx_lock);
3369
3370         tg3_halt(tp);
3371
3372         tg3_set_mtu(dev, tp, new_mtu);
3373
3374         tg3_init_hw(tp);
3375
3376         tg3_netif_start(tp);
3377
3378         spin_unlock(&tp->tx_lock);
3379         spin_unlock_irq(&tp->lock);
3380
3381         return 0;
3382 }
3383
3384 /* Free up pending packets in all rx/tx rings.
3385  *
3386  * The chip has been shut down and the driver detached from
3387  * the networking, so no interrupts or new tx packets will
3388  * end up in the driver.  tp->{tx,}lock is not held and we are not
3389  * in an interrupt context and thus may sleep.
3390  */
3391 static void tg3_free_rings(struct tg3 *tp)
3392 {
3393         struct ring_info *rxp;
3394         int i;
3395
3396         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3397                 rxp = &tp->rx_std_buffers[i];
3398
3399                 if (rxp->skb == NULL)
3400                         continue;
3401                 pci_unmap_single(tp->pdev,
3402                                  pci_unmap_addr(rxp, mapping),
3403                                  RX_PKT_BUF_SZ - tp->rx_offset,
3404                                  PCI_DMA_FROMDEVICE);
3405                 dev_kfree_skb_any(rxp->skb);
3406                 rxp->skb = NULL;
3407         }
3408
3409         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3410                 rxp = &tp->rx_jumbo_buffers[i];
3411
3412                 if (rxp->skb == NULL)
3413                         continue;
3414                 pci_unmap_single(tp->pdev,
3415                                  pci_unmap_addr(rxp, mapping),
3416                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3417                                  PCI_DMA_FROMDEVICE);
3418                 dev_kfree_skb_any(rxp->skb);
3419                 rxp->skb = NULL;
3420         }
3421
3422         for (i = 0; i < TG3_TX_RING_SIZE; ) {
3423                 struct tx_ring_info *txp;
3424                 struct sk_buff *skb;
3425                 int j;
3426
3427                 txp = &tp->tx_buffers[i];
3428                 skb = txp->skb;
3429
3430                 if (skb == NULL) {
3431                         i++;
3432                         continue;
3433                 }
3434
3435                 pci_unmap_single(tp->pdev,
3436                                  pci_unmap_addr(txp, mapping),
3437                                  skb_headlen(skb),
3438                                  PCI_DMA_TODEVICE);
3439                 txp->skb = NULL;
3440
3441                 i++;
3442
3443                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3444                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3445                         pci_unmap_page(tp->pdev,
3446                                        pci_unmap_addr(txp, mapping),
3447                                        skb_shinfo(skb)->frags[j].size,
3448                                        PCI_DMA_TODEVICE);
3449                         i++;
3450                 }
3451
3452                 dev_kfree_skb_any(skb);
3453         }
3454 }
3455
3456 /* Initialize tx/rx rings for packet processing.
3457  *
3458  * The chip has been shut down and the driver detached from
3459  * the networking, so no interrupts or new tx packets will
3460  * end up in the driver.  tp->{tx,}lock are held and thus
3461  * we may not sleep.
3462  */
3463 static void tg3_init_rings(struct tg3 *tp)
3464 {
3465         u32 i;
3466
3467         /* Free up all the SKBs. */
3468         tg3_free_rings(tp);
3469
3470         /* Zero out all descriptors. */
3471         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3472         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3473         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3474         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3475
3476         /* Initialize invariants of the rings, we only set this
3477          * stuff once.  This works because the card does not
3478          * write into the rx buffer posting rings.
3479          */
3480         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3481                 struct tg3_rx_buffer_desc *rxd;
3482
3483                 rxd = &tp->rx_std[i];
3484                 rxd->idx_len = (RX_PKT_BUF_SZ - tp->rx_offset - 64)
3485                         << RXD_LEN_SHIFT;
3486                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3487                 rxd->opaque = (RXD_OPAQUE_RING_STD |
3488                                (i << RXD_OPAQUE_INDEX_SHIFT));
3489         }
3490
3491         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3492                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3493                         struct tg3_rx_buffer_desc *rxd;
3494
3495                         rxd = &tp->rx_jumbo[i];
3496                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3497                                 << RXD_LEN_SHIFT;
3498                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3499                                 RXD_FLAG_JUMBO;
3500                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3501                                (i << RXD_OPAQUE_INDEX_SHIFT));
3502                 }
3503         }
3504
3505         /* Now allocate fresh SKBs for each rx ring. */
3506         for (i = 0; i < tp->rx_pending; i++) {
3507                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3508                                      -1, i) < 0)
3509                         break;
3510         }
3511
3512         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3513                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3514                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3515                                              -1, i) < 0)
3516                                 break;
3517                 }
3518         }
3519 }
3520
3521 /*
3522  * Must not be invoked with interrupt sources disabled and
3523  * the hardware shutdown down.
3524  */
3525 static void tg3_free_consistent(struct tg3 *tp)
3526 {
3527         if (tp->rx_std_buffers) {
3528                 kfree(tp->rx_std_buffers);
3529                 tp->rx_std_buffers = NULL;
3530         }
3531         if (tp->rx_std) {
3532                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3533                                     tp->rx_std, tp->rx_std_mapping);
3534                 tp->rx_std = NULL;
3535         }
3536         if (tp->rx_jumbo) {
3537                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3538                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
3539                 tp->rx_jumbo = NULL;
3540         }
3541         if (tp->rx_rcb) {
3542                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3543                                     tp->rx_rcb, tp->rx_rcb_mapping);
3544                 tp->rx_rcb = NULL;
3545         }
3546         if (tp->tx_ring) {
3547                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3548                         tp->tx_ring, tp->tx_desc_mapping);
3549                 tp->tx_ring = NULL;
3550         }
3551         if (tp->hw_status) {
3552                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3553                                     tp->hw_status, tp->status_mapping);
3554                 tp->hw_status = NULL;
3555         }
3556         if (tp->hw_stats) {
3557                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3558                                     tp->hw_stats, tp->stats_mapping);
3559                 tp->hw_stats = NULL;
3560         }
3561 }
3562
3563 /*
3564  * Must not be invoked with interrupt sources disabled and
3565  * the hardware shutdown down.  Can sleep.
3566  */
3567 static int tg3_alloc_consistent(struct tg3 *tp)
3568 {
3569         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3570                                       (TG3_RX_RING_SIZE +
3571                                        TG3_RX_JUMBO_RING_SIZE)) +
3572                                      (sizeof(struct tx_ring_info) *
3573                                       TG3_TX_RING_SIZE),
3574                                      GFP_KERNEL);
3575         if (!tp->rx_std_buffers)
3576                 return -ENOMEM;
3577
3578         memset(tp->rx_std_buffers, 0,
3579                (sizeof(struct ring_info) *
3580                 (TG3_RX_RING_SIZE +
3581                  TG3_RX_JUMBO_RING_SIZE)) +
3582                (sizeof(struct tx_ring_info) *
3583                 TG3_TX_RING_SIZE));
3584
3585         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3586         tp->tx_buffers = (struct tx_ring_info *)
3587                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3588
3589         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3590                                           &tp->rx_std_mapping);
3591         if (!tp->rx_std)
3592                 goto err_out;
3593
3594         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3595                                             &tp->rx_jumbo_mapping);
3596
3597         if (!tp->rx_jumbo)
3598                 goto err_out;
3599
3600         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3601                                           &tp->rx_rcb_mapping);
3602         if (!tp->rx_rcb)
3603                 goto err_out;
3604
3605         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
3606                                            &tp->tx_desc_mapping);
3607         if (!tp->tx_ring)
3608                 goto err_out;
3609
3610         tp->hw_status = pci_alloc_consistent(tp->pdev,
3611                                              TG3_HW_STATUS_SIZE,
3612                                              &tp->status_mapping);
3613         if (!tp->hw_status)
3614                 goto err_out;
3615
3616         tp->hw_stats = pci_alloc_consistent(tp->pdev,
3617                                             sizeof(struct tg3_hw_stats),
3618                                             &tp->stats_mapping);
3619         if (!tp->hw_stats)
3620                 goto err_out;
3621
3622         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3623         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3624
3625         return 0;
3626
3627 err_out:
3628         tg3_free_consistent(tp);
3629         return -ENOMEM;
3630 }
3631
3632 #define MAX_WAIT_CNT 1000
3633
3634 /* To stop a block, clear the enable bit and poll till it
3635  * clears.  tp->lock is held.
3636  */
3637 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit)
3638 {
3639         unsigned int i;
3640         u32 val;
3641
3642         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
3643                 switch (ofs) {
3644                 case RCVLSC_MODE:
3645                 case DMAC_MODE:
3646                 case MBFREE_MODE:
3647                 case BUFMGR_MODE:
3648                 case MEMARB_MODE:
3649                         /* We can't enable/disable these bits of the
3650                          * 5705/5750, just say success.
3651                          */
3652                         return 0;
3653
3654                 default:
3655                         break;
3656                 };
3657         }
3658
3659         val = tr32(ofs);
3660         val &= ~enable_bit;
3661         tw32_f(ofs, val);
3662
3663         for (i = 0; i < MAX_WAIT_CNT; i++) {
3664                 udelay(100);
3665                 val = tr32(ofs);
3666                 if ((val & enable_bit) == 0)
3667                         break;
3668         }
3669
3670         if (i == MAX_WAIT_CNT) {
3671                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
3672                        "ofs=%lx enable_bit=%x\n",
3673                        ofs, enable_bit);
3674                 return -ENODEV;
3675         }
3676
3677         return 0;
3678 }
3679
3680 /* tp->lock is held. */
3681 static int tg3_abort_hw(struct tg3 *tp)
3682 {
3683         int i, err;
3684
3685         tg3_disable_ints(tp);
3686
3687         tp->rx_mode &= ~RX_MODE_ENABLE;
3688         tw32_f(MAC_RX_MODE, tp->rx_mode);
3689         udelay(10);
3690
3691         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE);
3692         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE);
3693         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE);
3694         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE);
3695         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE);
3696         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE);
3697
3698         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE);
3699         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE);
3700         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
3701         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE);
3702         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
3703         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE);
3704         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE);
3705         if (err)
3706                 goto out;
3707
3708         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
3709         tw32_f(MAC_MODE, tp->mac_mode);
3710         udelay(40);
3711
3712         tp->tx_mode &= ~TX_MODE_ENABLE;
3713         tw32_f(MAC_TX_MODE, tp->tx_mode);
3714
3715         for (i = 0; i < MAX_WAIT_CNT; i++) {
3716                 udelay(100);
3717                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
3718                         break;
3719         }
3720         if (i >= MAX_WAIT_CNT) {
3721                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
3722                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
3723                        tp->dev->name, tr32(MAC_TX_MODE));
3724                 return -ENODEV;
3725         }
3726
3727         err  = tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE);
3728         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE);
3729         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE);
3730
3731         tw32(FTQ_RESET, 0xffffffff);
3732         tw32(FTQ_RESET, 0x00000000);
3733
3734         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE);
3735         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE);
3736         if (err)
3737                 goto out;
3738
3739         if (tp->hw_status)
3740                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3741         if (tp->hw_stats)
3742                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3743
3744 out:
3745         return err;
3746 }
3747
3748 /* tp->lock is held. */
3749 static int tg3_nvram_lock(struct tg3 *tp)
3750 {
3751         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
3752                 int i;
3753
3754                 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3755                 for (i = 0; i < 8000; i++) {
3756                         if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3757                                 break;
3758                         udelay(20);
3759                 }
3760                 if (i == 8000)
3761                         return -ENODEV;
3762         }
3763         return 0;
3764 }
3765
3766 /* tp->lock is held. */
3767 static void tg3_nvram_unlock(struct tg3 *tp)
3768 {
3769         if (tp->tg3_flags & TG3_FLAG_NVRAM)
3770                 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3771 }
3772
3773 /* tp->lock is held. */
3774 static void tg3_enable_nvram_access(struct tg3 *tp)
3775 {
3776         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
3777             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
3778                 u32 nvaccess = tr32(NVRAM_ACCESS);
3779
3780                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3781         }
3782 }
3783
3784 /* tp->lock is held. */
3785 static void tg3_disable_nvram_access(struct tg3 *tp)
3786 {
3787         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
3788             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
3789                 u32 nvaccess = tr32(NVRAM_ACCESS);
3790
3791                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3792         }
3793 }
3794
3795 /* tp->lock is held. */
3796 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
3797 {
3798         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
3799                 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
3800                               NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
3801
3802         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3803                 switch (kind) {
3804                 case RESET_KIND_INIT:
3805                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3806                                       DRV_STATE_START);
3807                         break;
3808
3809                 case RESET_KIND_SHUTDOWN:
3810                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3811                                       DRV_STATE_UNLOAD);
3812                         break;
3813
3814                 case RESET_KIND_SUSPEND:
3815                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3816                                       DRV_STATE_SUSPEND);
3817                         break;
3818
3819                 default:
3820                         break;
3821                 };
3822         }
3823 }
3824
3825 /* tp->lock is held. */
3826 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
3827 {
3828         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3829                 switch (kind) {
3830                 case RESET_KIND_INIT:
3831                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3832                                       DRV_STATE_START_DONE);
3833                         break;
3834
3835                 case RESET_KIND_SHUTDOWN:
3836                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3837                                       DRV_STATE_UNLOAD_DONE);
3838                         break;
3839
3840                 default:
3841                         break;
3842                 };
3843         }
3844 }
3845
3846 /* tp->lock is held. */
3847 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
3848 {
3849         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3850                 switch (kind) {
3851                 case RESET_KIND_INIT:
3852                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3853                                       DRV_STATE_START);
3854                         break;
3855
3856                 case RESET_KIND_SHUTDOWN:
3857                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3858                                       DRV_STATE_UNLOAD);
3859                         break;
3860
3861                 case RESET_KIND_SUSPEND:
3862                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3863                                       DRV_STATE_SUSPEND);
3864                         break;
3865
3866                 default:
3867                         break;
3868                 };
3869         }
3870 }
3871
3872 static void tg3_stop_fw(struct tg3 *);
3873
3874 /* tp->lock is held. */
3875 static int tg3_chip_reset(struct tg3 *tp)
3876 {
3877         u32 val;
3878         u32 flags_save;
3879         int i;
3880
3881         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
3882                 tg3_nvram_lock(tp);
3883
3884         /*
3885          * We must avoid the readl() that normally takes place.
3886          * It locks machines, causes machine checks, and other
3887          * fun things.  So, temporarily disable the 5701
3888          * hardware workaround, while we do the reset.
3889          */
3890         flags_save = tp->tg3_flags;
3891         tp->tg3_flags &= ~TG3_FLAG_5701_REG_WRITE_BUG;
3892
3893         /* do the reset */
3894         val = GRC_MISC_CFG_CORECLK_RESET;
3895
3896         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3897                 if (tr32(0x7e2c) == 0x60) {
3898                         tw32(0x7e2c, 0x20);
3899                 }
3900                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3901                         tw32(GRC_MISC_CFG, (1 << 29));
3902                         val |= (1 << 29);
3903                 }
3904         }
3905
3906         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
3907                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
3908         tw32(GRC_MISC_CFG, val);
3909
3910         /* restore 5701 hardware bug workaround flag */
3911         tp->tg3_flags = flags_save;
3912
3913         /* Unfortunately, we have to delay before the PCI read back.
3914          * Some 575X chips even will not respond to a PCI cfg access
3915          * when the reset command is given to the chip.
3916          *
3917          * How do these hardware designers expect things to work
3918          * properly if the PCI write is posted for a long period
3919          * of time?  It is always necessary to have some method by
3920          * which a register read back can occur to push the write
3921          * out which does the reset.
3922          *
3923          * For most tg3 variants the trick below was working.
3924          * Ho hum...
3925          */
3926         udelay(120);
3927
3928         /* Flush PCI posted writes.  The normal MMIO registers
3929          * are inaccessible at this time so this is the only
3930          * way to make this reliably (actually, this is no longer
3931          * the case, see above).  I tried to use indirect
3932          * register read/write but this upset some 5701 variants.
3933          */
3934         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
3935
3936         udelay(120);
3937
3938         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3939                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
3940                         int i;
3941                         u32 cfg_val;
3942
3943                         /* Wait for link training to complete.  */
3944                         for (i = 0; i < 5000; i++)
3945                                 udelay(100);
3946
3947                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
3948                         pci_write_config_dword(tp->pdev, 0xc4,
3949                                                cfg_val | (1 << 15));
3950                 }
3951                 /* Set PCIE max payload size and clear error status.  */
3952                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
3953         }
3954
3955         /* Re-enable indirect register accesses. */
3956         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
3957                                tp->misc_host_ctrl);
3958
3959         /* Set MAX PCI retry to zero. */
3960         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
3961         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
3962             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
3963                 val |= PCISTATE_RETRY_SAME_DMA;
3964         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
3965
3966         pci_restore_state(tp->pdev);
3967
3968         /* Make sure PCI-X relaxed ordering bit is clear. */
3969         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
3970         val &= ~PCIX_CAPS_RELAXED_ORDERING;
3971         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
3972
3973         tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
3974
3975         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
3976                 tg3_stop_fw(tp);
3977                 tw32(0x5000, 0x400);
3978         }
3979
3980         tw32(GRC_MODE, tp->grc_mode);
3981
3982         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
3983                 u32 val = tr32(0xc4);
3984
3985                 tw32(0xc4, val | (1 << 15));
3986         }
3987
3988         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
3989             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3990                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
3991                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
3992                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
3993                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
3994         }
3995
3996         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3997                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
3998                 tw32_f(MAC_MODE, tp->mac_mode);
3999         } else
4000                 tw32_f(MAC_MODE, 0);
4001         udelay(40);
4002
4003         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4004                 /* Wait for firmware initialization to complete. */
4005                 for (i = 0; i < 100000; i++) {
4006                         tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4007                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4008                                 break;
4009                         udelay(10);
4010                 }
4011                 if (i >= 100000) {
4012                         printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4013                                "firmware will not restart magic=%08x\n",
4014                                tp->dev->name, val);
4015                         return -ENODEV;
4016                 }
4017         }
4018
4019         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4020             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4021                 u32 val = tr32(0x7c00);
4022
4023                 tw32(0x7c00, val | (1 << 25));
4024         }
4025
4026         /* Reprobe ASF enable state.  */
4027         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4028         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4029         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4030         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4031                 u32 nic_cfg;
4032
4033                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4034                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4035                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4036                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4037                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4038                 }
4039         }
4040
4041         return 0;
4042 }
4043
4044 /* tp->lock is held. */
4045 static void tg3_stop_fw(struct tg3 *tp)
4046 {
4047         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4048                 u32 val;
4049                 int i;
4050
4051                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4052                 val = tr32(GRC_RX_CPU_EVENT);
4053                 val |= (1 << 14);
4054                 tw32(GRC_RX_CPU_EVENT, val);
4055
4056                 /* Wait for RX cpu to ACK the event.  */
4057                 for (i = 0; i < 100; i++) {
4058                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4059                                 break;
4060                         udelay(1);
4061                 }
4062         }
4063 }
4064
4065 /* tp->lock is held. */
4066 static int tg3_halt(struct tg3 *tp)
4067 {
4068         int err;
4069
4070         tg3_stop_fw(tp);
4071
4072         tg3_write_sig_pre_reset(tp, RESET_KIND_SHUTDOWN);
4073
4074         tg3_abort_hw(tp);
4075         err = tg3_chip_reset(tp);
4076
4077         tg3_write_sig_legacy(tp, RESET_KIND_SHUTDOWN);
4078         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4079
4080         if (err)
4081                 return err;
4082
4083         return 0;
4084 }
4085
4086 #define TG3_FW_RELEASE_MAJOR    0x0
4087 #define TG3_FW_RELASE_MINOR     0x0
4088 #define TG3_FW_RELEASE_FIX      0x0
4089 #define TG3_FW_START_ADDR       0x08000000
4090 #define TG3_FW_TEXT_ADDR        0x08000000
4091 #define TG3_FW_TEXT_LEN         0x9c0
4092 #define TG3_FW_RODATA_ADDR      0x080009c0
4093 #define TG3_FW_RODATA_LEN       0x60
4094 #define TG3_FW_DATA_ADDR        0x08000a40
4095 #define TG3_FW_DATA_LEN         0x20
4096 #define TG3_FW_SBSS_ADDR        0x08000a60
4097 #define TG3_FW_SBSS_LEN         0xc
4098 #define TG3_FW_BSS_ADDR         0x08000a70
4099 #define TG3_FW_BSS_LEN          0x10
4100
4101 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4102         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4103         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4104         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4105         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4106         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4107         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4108         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4109         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4110         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4111         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4112         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4113         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4114         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4115         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4116         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4117         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4118         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4119         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4120         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4121         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4122         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4123         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4124         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4125         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4126         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4127         0, 0, 0, 0, 0, 0,
4128         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4129         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4130         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4131         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4132         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4133         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4134         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4135         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4136         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4137         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4138         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4139         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4140         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4141         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4142         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4143         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4144         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4145         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4146         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4147         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4148         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4149         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4150         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4151         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4152         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4153         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4154         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4155         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4156         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4157         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4158         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4159         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4160         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4161         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4162         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4163         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4164         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4165         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4166         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4167         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4168         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4169         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4170         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4171         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4172         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4173         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4174         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4175         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4176         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4177         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4178         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4179         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4180         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4181         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4182         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4183         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4184         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4185         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4186         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4187         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4188         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4189         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4190         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4191         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4192         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4193 };
4194
4195 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4196         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4197         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4198         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4199         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4200         0x00000000
4201 };
4202
4203 #if 0 /* All zeros, don't eat up space with it. */
4204 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4205         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4206         0x00000000, 0x00000000, 0x00000000, 0x00000000
4207 };
4208 #endif
4209
4210 #define RX_CPU_SCRATCH_BASE     0x30000
4211 #define RX_CPU_SCRATCH_SIZE     0x04000
4212 #define TX_CPU_SCRATCH_BASE     0x34000
4213 #define TX_CPU_SCRATCH_SIZE     0x04000
4214
4215 /* tp->lock is held. */
4216 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4217 {
4218         int i;
4219
4220         if (offset == TX_CPU_BASE &&
4221             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4222                 BUG();
4223
4224         if (offset == RX_CPU_BASE) {
4225                 for (i = 0; i < 10000; i++) {
4226                         tw32(offset + CPU_STATE, 0xffffffff);
4227                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4228                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4229                                 break;
4230                 }
4231
4232                 tw32(offset + CPU_STATE, 0xffffffff);
4233                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4234                 udelay(10);
4235         } else {
4236                 for (i = 0; i < 10000; i++) {
4237                         tw32(offset + CPU_STATE, 0xffffffff);
4238                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4239                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4240                                 break;
4241                 }
4242         }
4243
4244         if (i >= 10000) {
4245                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4246                        "and %s CPU\n",
4247                        tp->dev->name,
4248                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4249                 return -ENODEV;
4250         }
4251         return 0;
4252 }
4253
4254 struct fw_info {
4255         unsigned int text_base;
4256         unsigned int text_len;
4257         u32 *text_data;
4258         unsigned int rodata_base;
4259         unsigned int rodata_len;
4260         u32 *rodata_data;
4261         unsigned int data_base;
4262         unsigned int data_len;
4263         u32 *data_data;
4264 };
4265
4266 /* tp->lock is held. */
4267 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4268                                  int cpu_scratch_size, struct fw_info *info)
4269 {
4270         int err, i;
4271         u32 orig_tg3_flags = tp->tg3_flags;
4272         void (*write_op)(struct tg3 *, u32, u32);
4273
4274         if (cpu_base == TX_CPU_BASE &&
4275             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4276                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4277                        "TX cpu firmware on %s which is 5705.\n",
4278                        tp->dev->name);
4279                 return -EINVAL;
4280         }
4281
4282         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4283                 write_op = tg3_write_mem;
4284         else
4285                 write_op = tg3_write_indirect_reg32;
4286
4287         /* Force use of PCI config space for indirect register
4288          * write calls.
4289          */
4290         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
4291
4292         err = tg3_halt_cpu(tp, cpu_base);
4293         if (err)
4294                 goto out;
4295
4296         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4297                 write_op(tp, cpu_scratch_base + i, 0);
4298         tw32(cpu_base + CPU_STATE, 0xffffffff);
4299         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4300         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4301                 write_op(tp, (cpu_scratch_base +
4302                               (info->text_base & 0xffff) +
4303                               (i * sizeof(u32))),
4304                          (info->text_data ?
4305                           info->text_data[i] : 0));
4306         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4307                 write_op(tp, (cpu_scratch_base +
4308                               (info->rodata_base & 0xffff) +
4309                               (i * sizeof(u32))),
4310                          (info->rodata_data ?
4311                           info->rodata_data[i] : 0));
4312         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4313                 write_op(tp, (cpu_scratch_base +
4314                               (info->data_base & 0xffff) +
4315                               (i * sizeof(u32))),
4316                          (info->data_data ?
4317                           info->data_data[i] : 0));
4318
4319         err = 0;
4320
4321 out:
4322         tp->tg3_flags = orig_tg3_flags;
4323         return err;
4324 }
4325
4326 /* tp->lock is held. */
4327 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4328 {
4329         struct fw_info info;
4330         int err, i;
4331
4332         info.text_base = TG3_FW_TEXT_ADDR;
4333         info.text_len = TG3_FW_TEXT_LEN;
4334         info.text_data = &tg3FwText[0];
4335         info.rodata_base = TG3_FW_RODATA_ADDR;
4336         info.rodata_len = TG3_FW_RODATA_LEN;
4337         info.rodata_data = &tg3FwRodata[0];
4338         info.data_base = TG3_FW_DATA_ADDR;
4339         info.data_len = TG3_FW_DATA_LEN;
4340         info.data_data = NULL;
4341
4342         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4343                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4344                                     &info);
4345         if (err)
4346                 return err;
4347
4348         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4349                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4350                                     &info);
4351         if (err)
4352                 return err;
4353
4354         /* Now startup only the RX cpu. */
4355         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4356         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4357
4358         for (i = 0; i < 5; i++) {
4359                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4360                         break;
4361                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4362                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
4363                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4364                 udelay(1000);
4365         }
4366         if (i >= 5) {
4367                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4368                        "to set RX CPU PC, is %08x should be %08x\n",
4369                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4370                        TG3_FW_TEXT_ADDR);
4371                 return -ENODEV;
4372         }
4373         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4374         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
4375
4376         return 0;
4377 }
4378
4379 #if TG3_TSO_SUPPORT != 0
4380
4381 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
4382 #define TG3_TSO_FW_RELASE_MINOR         0x6
4383 #define TG3_TSO_FW_RELEASE_FIX          0x0
4384 #define TG3_TSO_FW_START_ADDR           0x08000000
4385 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
4386 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
4387 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
4388 #define TG3_TSO_FW_RODATA_LEN           0x60
4389 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
4390 #define TG3_TSO_FW_DATA_LEN             0x30
4391 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
4392 #define TG3_TSO_FW_SBSS_LEN             0x2c
4393 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
4394 #define TG3_TSO_FW_BSS_LEN              0x894
4395
4396 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4397         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4398         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4399         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4400         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4401         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4402         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4403         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4404         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4405         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4406         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4407         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4408         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4409         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4410         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4411         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4412         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4413         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4414         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4415         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4416         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4417         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4418         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4419         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4420         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4421         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4422         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4423         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4424         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4425         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4426         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4427         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4428         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4429         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4430         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4431         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4432         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4433         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4434         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4435         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4436         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4437         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4438         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4439         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4440         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4441         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4442         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4443         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4444         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4445         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4446         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4447         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4448         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4449         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4450         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4451         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4452         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4453         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4454         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4455         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4456         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4457         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4458         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4459         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4460         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4461         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4462         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4463         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4464         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4465         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4466         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4467         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4468         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4469         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4470         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4471         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4472         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4473         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4474         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4475         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4476         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4477         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4478         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4479         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4480         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4481         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4482         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4483         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4484         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4485         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4486         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4487         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4488         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4489         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4490         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4491         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4492         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4493         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4494         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4495         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4496         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4497         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4498         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4499         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4500         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4501         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4502         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4503         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4504         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4505         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4506         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4507         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4508         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4509         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4510         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4511         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4512         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4513         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4514         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4515         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4516         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4517         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4518         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4519         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4520         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4521         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4522         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4523         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4524         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4525         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4526         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4527         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4528         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4529         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4530         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4531         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4532         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4533         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4534         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4535         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4536         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4537         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4538         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4539         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4540         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4541         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4542         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4543         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4544         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4545         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4546         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4547         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4548         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4549         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4550         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4551         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4552         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4553         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4554         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4555         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4556         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4557         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4558         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4559         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4560         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4561         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4562         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4563         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4564         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4565         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4566         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4567         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4568         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4569         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4570         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4571         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4572         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4573         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4574         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4575         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4576         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
4577         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
4578         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
4579         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
4580         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
4581         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
4582         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
4583         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
4584         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
4585         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
4586         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
4587         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
4588         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
4589         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
4590         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
4591         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
4592         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
4593         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
4594         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
4595         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
4596         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
4597         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
4598         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
4599         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
4600         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
4601         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
4602         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
4603         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
4604         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
4605         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
4606         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
4607         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
4608         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
4609         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
4610         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
4611         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
4612         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
4613         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
4614         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
4615         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
4616         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
4617         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4618         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
4619         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
4620         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
4621         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
4622         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
4623         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
4624         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
4625         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
4626         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
4627         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
4628         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
4629         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
4630         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
4631         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
4632         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
4633         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
4634         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4635         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
4636         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
4637         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
4638         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
4639         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
4640         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
4641         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
4642         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
4643         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
4644         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
4645         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
4646         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
4647         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
4648         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
4649         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
4650         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
4651         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
4652         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
4653         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
4654         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
4655         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
4656         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
4657         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
4658         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
4659         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
4660         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
4661         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4662         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
4663         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
4664         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
4665         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
4666         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
4667         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
4668         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
4669         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
4670         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
4671         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
4672         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
4673         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
4674         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
4675         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
4676         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
4677         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
4678         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
4679         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
4680         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
4681 };
4682
4683 static u32 tg3TsoFwRodata[] = {
4684         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4685         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
4686         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
4687         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
4688         0x00000000,
4689 };
4690
4691 static u32 tg3TsoFwData[] = {
4692         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
4693         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4694         0x00000000,
4695 };
4696
4697 /* 5705 needs a special version of the TSO firmware.  */
4698 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
4699 #define TG3_TSO5_FW_RELASE_MINOR        0x2
4700 #define TG3_TSO5_FW_RELEASE_FIX         0x0
4701 #define TG3_TSO5_FW_START_ADDR          0x00010000
4702 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
4703 #define TG3_TSO5_FW_TEXT_LEN            0xe90
4704 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
4705 #define TG3_TSO5_FW_RODATA_LEN          0x50
4706 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
4707 #define TG3_TSO5_FW_DATA_LEN            0x20
4708 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
4709 #define TG3_TSO5_FW_SBSS_LEN            0x28
4710 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
4711 #define TG3_TSO5_FW_BSS_LEN             0x88
4712
4713 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
4714         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
4715         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
4716         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4717         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
4718         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
4719         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
4720         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4721         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
4722         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
4723         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
4724         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
4725         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
4726         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
4727         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
4728         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
4729         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
4730         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
4731         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
4732         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
4733         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
4734         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
4735         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
4736         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
4737         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
4738         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
4739         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
4740         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
4741         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
4742         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
4743         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
4744         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4745         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
4746         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
4747         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
4748         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
4749         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
4750         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
4751         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
4752         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
4753         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
4754         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
4755         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
4756         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
4757         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
4758         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
4759         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
4760         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
4761         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
4762         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
4763         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
4764         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
4765         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
4766         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
4767         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
4768         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
4769         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
4770         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
4771         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
4772         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
4773         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
4774         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
4775         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
4776         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
4777         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
4778         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
4779         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
4780         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4781         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
4782         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
4783         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
4784         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
4785         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
4786         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
4787         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
4788         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
4789         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
4790         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
4791         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
4792         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
4793         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
4794         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
4795         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
4796         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
4797         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
4798         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
4799         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
4800         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
4801         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
4802         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
4803         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
4804         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
4805         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
4806         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
4807         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
4808         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
4809         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
4810         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
4811         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
4812         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
4813         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
4814         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
4815         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
4816         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
4817         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
4818         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
4819         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
4820         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4821         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4822         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
4823         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
4824         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
4825         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
4826         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
4827         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
4828         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
4829         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
4830         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
4831         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4832         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4833         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
4834         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
4835         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
4836         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
4837         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4838         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
4839         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
4840         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
4841         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
4842         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
4843         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
4844         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
4845         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
4846         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
4847         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
4848         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
4849         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
4850         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
4851         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
4852         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
4853         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
4854         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
4855         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
4856         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
4857         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
4858         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
4859         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
4860         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
4861         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4862         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
4863         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
4864         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
4865         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4866         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
4867         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
4868         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4869         0x00000000, 0x00000000, 0x00000000,
4870 };
4871
4872 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
4873         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4874         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
4875         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4876         0x00000000, 0x00000000, 0x00000000,
4877 };
4878
4879 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
4880         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
4881         0x00000000, 0x00000000, 0x00000000,
4882 };
4883
4884 /* tp->lock is held. */
4885 static int tg3_load_tso_firmware(struct tg3 *tp)
4886 {
4887         struct fw_info info;
4888         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
4889         int err, i;
4890
4891         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4892                 return 0;
4893
4894         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4895                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
4896                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
4897                 info.text_data = &tg3Tso5FwText[0];
4898                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
4899                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
4900                 info.rodata_data = &tg3Tso5FwRodata[0];
4901                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
4902                 info.data_len = TG3_TSO5_FW_DATA_LEN;
4903                 info.data_data = &tg3Tso5FwData[0];
4904                 cpu_base = RX_CPU_BASE;
4905                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
4906                 cpu_scratch_size = (info.text_len +
4907                                     info.rodata_len +
4908                                     info.data_len +
4909                                     TG3_TSO5_FW_SBSS_LEN +
4910                                     TG3_TSO5_FW_BSS_LEN);
4911         } else {
4912                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
4913                 info.text_len = TG3_TSO_FW_TEXT_LEN;
4914                 info.text_data = &tg3TsoFwText[0];
4915                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
4916                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
4917                 info.rodata_data = &tg3TsoFwRodata[0];
4918                 info.data_base = TG3_TSO_FW_DATA_ADDR;
4919                 info.data_len = TG3_TSO_FW_DATA_LEN;
4920                 info.data_data = &tg3TsoFwData[0];
4921                 cpu_base = TX_CPU_BASE;
4922                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
4923                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
4924         }
4925
4926         err = tg3_load_firmware_cpu(tp, cpu_base,
4927                                     cpu_scratch_base, cpu_scratch_size,
4928                                     &info);
4929         if (err)
4930                 return err;
4931
4932         /* Now startup the cpu. */
4933         tw32(cpu_base + CPU_STATE, 0xffffffff);
4934         tw32_f(cpu_base + CPU_PC,    info.text_base);
4935
4936         for (i = 0; i < 5; i++) {
4937                 if (tr32(cpu_base + CPU_PC) == info.text_base)
4938                         break;
4939                 tw32(cpu_base + CPU_STATE, 0xffffffff);
4940                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
4941                 tw32_f(cpu_base + CPU_PC,    info.text_base);
4942                 udelay(1000);
4943         }
4944         if (i >= 5) {
4945                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
4946                        "to set CPU PC, is %08x should be %08x\n",
4947                        tp->dev->name, tr32(cpu_base + CPU_PC),
4948                        info.text_base);
4949                 return -ENODEV;
4950         }
4951         tw32(cpu_base + CPU_STATE, 0xffffffff);
4952         tw32_f(cpu_base + CPU_MODE,  0x00000000);
4953         return 0;
4954 }
4955
4956 #endif /* TG3_TSO_SUPPORT != 0 */
4957
4958 /* tp->lock is held. */
4959 static void __tg3_set_mac_addr(struct tg3 *tp)
4960 {
4961         u32 addr_high, addr_low;
4962         int i;
4963
4964         addr_high = ((tp->dev->dev_addr[0] << 8) |
4965                      tp->dev->dev_addr[1]);
4966         addr_low = ((tp->dev->dev_addr[2] << 24) |
4967                     (tp->dev->dev_addr[3] << 16) |
4968                     (tp->dev->dev_addr[4] <<  8) |
4969                     (tp->dev->dev_addr[5] <<  0));
4970         for (i = 0; i < 4; i++) {
4971                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
4972                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
4973         }
4974
4975         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4976             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
4977                 for (i = 0; i < 12; i++) {
4978                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
4979                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
4980                 }
4981         }
4982
4983         addr_high = (tp->dev->dev_addr[0] +
4984                      tp->dev->dev_addr[1] +
4985                      tp->dev->dev_addr[2] +
4986                      tp->dev->dev_addr[3] +
4987                      tp->dev->dev_addr[4] +
4988                      tp->dev->dev_addr[5]) &
4989                 TX_BACKOFF_SEED_MASK;
4990         tw32(MAC_TX_BACKOFF_SEED, addr_high);
4991 }
4992
4993 static int tg3_set_mac_addr(struct net_device *dev, void *p)
4994 {
4995         struct tg3 *tp = netdev_priv(dev);
4996         struct sockaddr *addr = p;
4997
4998         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4999
5000         spin_lock_irq(&tp->lock);
5001         __tg3_set_mac_addr(tp);
5002         spin_unlock_irq(&tp->lock);
5003
5004         return 0;
5005 }
5006
5007 /* tp->lock is held. */
5008 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5009                            dma_addr_t mapping, u32 maxlen_flags,
5010                            u32 nic_addr)
5011 {
5012         tg3_write_mem(tp,
5013                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5014                       ((u64) mapping >> 32));
5015         tg3_write_mem(tp,
5016                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5017                       ((u64) mapping & 0xffffffff));
5018         tg3_write_mem(tp,
5019                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5020                        maxlen_flags);
5021
5022         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5023                 tg3_write_mem(tp,
5024                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5025                               nic_addr);
5026 }
5027
5028 static void __tg3_set_rx_mode(struct net_device *);
5029
5030 /* tp->lock is held. */
5031 static int tg3_reset_hw(struct tg3 *tp)
5032 {
5033         u32 val, rdmac_mode;
5034         int i, err, limit;
5035
5036         tg3_disable_ints(tp);
5037
5038         tg3_stop_fw(tp);
5039
5040         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5041
5042         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5043                 err = tg3_abort_hw(tp);
5044                 if (err)
5045                         return err;
5046         }
5047
5048         err = tg3_chip_reset(tp);
5049         if (err)
5050                 return err;
5051
5052         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5053
5054         /* This works around an issue with Athlon chipsets on
5055          * B3 tigon3 silicon.  This bit has no effect on any
5056          * other revision.  But do not set this on PCI Express
5057          * chips.
5058          */
5059         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5060                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5061         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5062
5063         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5064             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5065                 val = tr32(TG3PCI_PCISTATE);
5066                 val |= PCISTATE_RETRY_SAME_DMA;
5067                 tw32(TG3PCI_PCISTATE, val);
5068         }
5069
5070         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5071                 /* Enable some hw fixes.  */
5072                 val = tr32(TG3PCI_MSI_DATA);
5073                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5074                 tw32(TG3PCI_MSI_DATA, val);
5075         }
5076
5077         /* Descriptor ring init may make accesses to the
5078          * NIC SRAM area to setup the TX descriptors, so we
5079          * can only do this after the hardware has been
5080          * successfully reset.
5081          */
5082         tg3_init_rings(tp);
5083
5084         /* This value is determined during the probe time DMA
5085          * engine test, tg3_test_dma.
5086          */
5087         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5088
5089         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5090                           GRC_MODE_4X_NIC_SEND_RINGS |
5091                           GRC_MODE_NO_TX_PHDR_CSUM |
5092                           GRC_MODE_NO_RX_PHDR_CSUM);
5093         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5094         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5095                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5096         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5097                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5098
5099         tw32(GRC_MODE,
5100              tp->grc_mode |
5101              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5102
5103         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5104         val = tr32(GRC_MISC_CFG);
5105         val &= ~0xff;
5106         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5107         tw32(GRC_MISC_CFG, val);
5108
5109         /* Initialize MBUF/DESC pool. */
5110         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5111                 /* Do nothing.  */
5112         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5113                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5114                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5115                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5116                 else
5117                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5118                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5119                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5120         }
5121 #if TG3_TSO_SUPPORT != 0
5122         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5123                 int fw_len;
5124
5125                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5126                           TG3_TSO5_FW_RODATA_LEN +
5127                           TG3_TSO5_FW_DATA_LEN +
5128                           TG3_TSO5_FW_SBSS_LEN +
5129                           TG3_TSO5_FW_BSS_LEN);
5130                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5131                 tw32(BUFMGR_MB_POOL_ADDR,
5132                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5133                 tw32(BUFMGR_MB_POOL_SIZE,
5134                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5135         }
5136 #endif
5137
5138         if (!(tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE)) {
5139                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5140                      tp->bufmgr_config.mbuf_read_dma_low_water);
5141                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5142                      tp->bufmgr_config.mbuf_mac_rx_low_water);
5143                 tw32(BUFMGR_MB_HIGH_WATER,
5144                      tp->bufmgr_config.mbuf_high_water);
5145         } else {
5146                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5147                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5148                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5149                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5150                 tw32(BUFMGR_MB_HIGH_WATER,
5151                      tp->bufmgr_config.mbuf_high_water_jumbo);
5152         }
5153         tw32(BUFMGR_DMA_LOW_WATER,
5154              tp->bufmgr_config.dma_low_water);
5155         tw32(BUFMGR_DMA_HIGH_WATER,
5156              tp->bufmgr_config.dma_high_water);
5157
5158         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5159         for (i = 0; i < 2000; i++) {
5160                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5161                         break;
5162                 udelay(10);
5163         }
5164         if (i >= 2000) {
5165                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5166                        tp->dev->name);
5167                 return -ENODEV;
5168         }
5169
5170         /* Setup replenish threshold. */
5171         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5172
5173         /* Initialize TG3_BDINFO's at:
5174          *  RCVDBDI_STD_BD:     standard eth size rx ring
5175          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5176          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5177          *
5178          * like so:
5179          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5180          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5181          *                              ring attribute flags
5182          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5183          *
5184          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5185          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5186          *
5187          * The size of each ring is fixed in the firmware, but the location is
5188          * configurable.
5189          */
5190         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5191              ((u64) tp->rx_std_mapping >> 32));
5192         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5193              ((u64) tp->rx_std_mapping & 0xffffffff));
5194         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5195              NIC_SRAM_RX_BUFFER_DESC);
5196
5197         /* Don't even try to program the JUMBO/MINI buffer descriptor
5198          * configs on 5705.
5199          */
5200         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5201                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5202                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5203         } else {
5204                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5205                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5206
5207                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5208                      BDINFO_FLAGS_DISABLED);
5209
5210                 /* Setup replenish threshold. */
5211                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5212
5213                 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
5214                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5215                              ((u64) tp->rx_jumbo_mapping >> 32));
5216                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5217                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5218                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5219                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5220                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5221                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5222                 } else {
5223                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5224                              BDINFO_FLAGS_DISABLED);
5225                 }
5226
5227         }
5228
5229         /* There is only one send ring on 5705/5750, no need to explicitly
5230          * disable the others.
5231          */
5232         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5233                 /* Clear out send RCB ring in SRAM. */
5234                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5235                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5236                                       BDINFO_FLAGS_DISABLED);
5237         }
5238
5239         tp->tx_prod = 0;
5240         tp->tx_cons = 0;
5241         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5242         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5243
5244         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5245                        tp->tx_desc_mapping,
5246                        (TG3_TX_RING_SIZE <<
5247                         BDINFO_FLAGS_MAXLEN_SHIFT),
5248                        NIC_SRAM_TX_BUFFER_DESC);
5249
5250         /* There is only one receive return ring on 5705/5750, no need
5251          * to explicitly disable the others.
5252          */
5253         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5254                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5255                      i += TG3_BDINFO_SIZE) {
5256                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5257                                       BDINFO_FLAGS_DISABLED);
5258                 }
5259         }
5260
5261         tp->rx_rcb_ptr = 0;
5262         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5263
5264         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5265                        tp->rx_rcb_mapping,
5266                        (TG3_RX_RCB_RING_SIZE(tp) <<
5267                         BDINFO_FLAGS_MAXLEN_SHIFT),
5268                        0);
5269
5270         tp->rx_std_ptr = tp->rx_pending;
5271         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5272                      tp->rx_std_ptr);
5273
5274         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) ?
5275                                                 tp->rx_jumbo_pending : 0;
5276         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5277                      tp->rx_jumbo_ptr);
5278
5279         /* Initialize MAC address and backoff seed. */
5280         __tg3_set_mac_addr(tp);
5281
5282         /* MTU + ethernet header + FCS + optional VLAN tag */
5283         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5284
5285         /* The slot time is changed by tg3_setup_phy if we
5286          * run at gigabit with half duplex.
5287          */
5288         tw32(MAC_TX_LENGTHS,
5289              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5290              (6 << TX_LENGTHS_IPG_SHIFT) |
5291              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5292
5293         /* Receive rules. */
5294         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5295         tw32(RCVLPC_CONFIG, 0x0181);
5296
5297         /* Calculate RDMAC_MODE setting early, we need it to determine
5298          * the RCVLPC_STATE_ENABLE mask.
5299          */
5300         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5301                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5302                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5303                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5304                       RDMAC_MODE_LNGREAD_ENAB);
5305         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5306                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5307
5308         /* If statement applies to 5705 and 5750 PCI devices only */
5309         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5310              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5311             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
5312                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5313                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5314                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5315                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5316                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5317                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5318                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5319                 }
5320         }
5321
5322         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5323                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5324
5325 #if TG3_TSO_SUPPORT != 0
5326         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5327                 rdmac_mode |= (1 << 27);
5328 #endif
5329
5330         /* Receive/send statistics. */
5331         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5332             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5333                 val = tr32(RCVLPC_STATS_ENABLE);
5334                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5335                 tw32(RCVLPC_STATS_ENABLE, val);
5336         } else {
5337                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5338         }
5339         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5340         tw32(SNDDATAI_STATSENAB, 0xffffff);
5341         tw32(SNDDATAI_STATSCTRL,
5342              (SNDDATAI_SCTRL_ENABLE |
5343               SNDDATAI_SCTRL_FASTUPD));
5344
5345         /* Setup host coalescing engine. */
5346         tw32(HOSTCC_MODE, 0);
5347         for (i = 0; i < 2000; i++) {
5348                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5349                         break;
5350                 udelay(10);
5351         }
5352
5353         tw32(HOSTCC_RXCOL_TICKS, 0);
5354         tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
5355         tw32(HOSTCC_RXMAX_FRAMES, 1);
5356         tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
5357         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5358                 tw32(HOSTCC_RXCOAL_TICK_INT, 0);
5359                 tw32(HOSTCC_TXCOAL_TICK_INT, 0);
5360         }
5361         tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
5362         tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
5363
5364         /* set status block DMA address */
5365         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5366              ((u64) tp->status_mapping >> 32));
5367         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5368              ((u64) tp->status_mapping & 0xffffffff));
5369
5370         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5371                 /* Status/statistics block address.  See tg3_timer,
5372                  * the tg3_periodic_fetch_stats call there, and
5373                  * tg3_get_stats to see how this works for 5705/5750 chips.
5374                  */
5375                 tw32(HOSTCC_STAT_COAL_TICKS,
5376                      DEFAULT_STAT_COAL_TICKS);
5377                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5378                      ((u64) tp->stats_mapping >> 32));
5379                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5380                      ((u64) tp->stats_mapping & 0xffffffff));
5381                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5382                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5383         }
5384
5385         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5386
5387         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5388         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5389         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5390                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5391
5392         /* Clear statistics/status block in chip, and status block in ram. */
5393         for (i = NIC_SRAM_STATS_BLK;
5394              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5395              i += sizeof(u32)) {
5396                 tg3_write_mem(tp, i, 0);
5397                 udelay(40);
5398         }
5399         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5400
5401         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5402                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5403         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5404         udelay(40);
5405
5406         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
5407          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
5408          * register to preserve the GPIO settings for LOMs. The GPIOs,
5409          * whether used as inputs or outputs, are set by boot code after
5410          * reset.
5411          */
5412         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
5413                 u32 gpio_mask;
5414
5415                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
5416                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
5417
5418                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
5419                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
5420                                      GRC_LCLCTRL_GPIO_OUTPUT3;
5421
5422                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
5423
5424                 /* GPIO1 must be driven high for eeprom write protect */
5425                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5426                                        GRC_LCLCTRL_GPIO_OUTPUT1);
5427         }
5428         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5429         udelay(100);
5430
5431         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5432         tr32(MAILBOX_INTERRUPT_0);
5433
5434         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5435                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5436                 udelay(40);
5437         }
5438
5439         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5440                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5441                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5442                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5443                WDMAC_MODE_LNGREAD_ENAB);
5444
5445         /* If statement applies to 5705 and 5750 PCI devices only */
5446         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5447              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5448             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5449                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5450                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5451                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5452                         /* nothing */
5453                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5454                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5455                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5456                         val |= WDMAC_MODE_RX_ACCEL;
5457                 }
5458         }
5459
5460         tw32_f(WDMAC_MODE, val);
5461         udelay(40);
5462
5463         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5464                 val = tr32(TG3PCI_X_CAPS);
5465                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5466                         val &= ~PCIX_CAPS_BURST_MASK;
5467                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5468                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5469                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5470                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5471                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5472                                 val |= (tp->split_mode_max_reqs <<
5473                                         PCIX_CAPS_SPLIT_SHIFT);
5474                 }
5475                 tw32(TG3PCI_X_CAPS, val);
5476         }
5477
5478         tw32_f(RDMAC_MODE, rdmac_mode);
5479         udelay(40);
5480
5481         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5482         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5483                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5484         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5485         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5486         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5487         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5488         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5489 #if TG3_TSO_SUPPORT != 0
5490         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5491                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5492 #endif
5493         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5494         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5495
5496         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5497                 err = tg3_load_5701_a0_firmware_fix(tp);
5498                 if (err)
5499                         return err;
5500         }
5501
5502 #if TG3_TSO_SUPPORT != 0
5503         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5504                 err = tg3_load_tso_firmware(tp);
5505                 if (err)
5506                         return err;
5507         }
5508 #endif
5509
5510         tp->tx_mode = TX_MODE_ENABLE;
5511         tw32_f(MAC_TX_MODE, tp->tx_mode);
5512         udelay(100);
5513
5514         tp->rx_mode = RX_MODE_ENABLE;
5515         tw32_f(MAC_RX_MODE, tp->rx_mode);
5516         udelay(10);
5517
5518         if (tp->link_config.phy_is_low_power) {
5519                 tp->link_config.phy_is_low_power = 0;
5520                 tp->link_config.speed = tp->link_config.orig_speed;
5521                 tp->link_config.duplex = tp->link_config.orig_duplex;
5522                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5523         }
5524
5525         tp->mi_mode = MAC_MI_MODE_BASE;
5526         tw32_f(MAC_MI_MODE, tp->mi_mode);
5527         udelay(80);
5528
5529         tw32(MAC_LED_CTRL, tp->led_ctrl);
5530
5531         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5532         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5533                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5534                 udelay(10);
5535         }
5536         tw32_f(MAC_RX_MODE, tp->rx_mode);
5537         udelay(10);
5538
5539         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5540                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
5541                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
5542                         /* Set drive transmission level to 1.2V  */
5543                         /* only if the signal pre-emphasis bit is not set  */
5544                         val = tr32(MAC_SERDES_CFG);
5545                         val &= 0xfffff000;
5546                         val |= 0x880;
5547                         tw32(MAC_SERDES_CFG, val);
5548                 }
5549                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5550                         tw32(MAC_SERDES_CFG, 0x616000);
5551         }
5552
5553         /* Prevent chip from dropping frames when flow control
5554          * is enabled.
5555          */
5556         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5557
5558         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5559             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5560                 /* Use hardware link auto-negotiation */
5561                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
5562         }
5563
5564         err = tg3_setup_phy(tp, 1);
5565         if (err)
5566                 return err;
5567
5568         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5569                 u32 tmp;
5570
5571                 /* Clear CRC stats. */
5572                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
5573                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
5574                         tg3_readphy(tp, 0x14, &tmp);
5575                 }
5576         }
5577
5578         __tg3_set_rx_mode(tp->dev);
5579
5580         /* Initialize receive rules. */
5581         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
5582         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
5583         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
5584         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
5585
5586         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5587                 limit = 8;
5588         else
5589                 limit = 16;
5590         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
5591                 limit -= 4;
5592         switch (limit) {
5593         case 16:
5594                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
5595         case 15:
5596                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
5597         case 14:
5598                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
5599         case 13:
5600                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
5601         case 12:
5602                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
5603         case 11:
5604                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
5605         case 10:
5606                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
5607         case 9:
5608                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
5609         case 8:
5610                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
5611         case 7:
5612                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
5613         case 6:
5614                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
5615         case 5:
5616                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
5617         case 4:
5618                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
5619         case 3:
5620                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
5621         case 2:
5622         case 1:
5623
5624         default:
5625                 break;
5626         };
5627
5628         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
5629
5630         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
5631                 tg3_enable_ints(tp);
5632
5633         return 0;
5634 }
5635
5636 /* Called at device open time to get the chip ready for
5637  * packet processing.  Invoked with tp->lock held.
5638  */
5639 static int tg3_init_hw(struct tg3 *tp)
5640 {
5641         int err;
5642
5643         /* Force the chip into D0. */
5644         err = tg3_set_power_state(tp, 0);
5645         if (err)
5646                 goto out;
5647
5648         tg3_switch_clocks(tp);
5649
5650         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
5651
5652         err = tg3_reset_hw(tp);
5653
5654 out:
5655         return err;
5656 }
5657
5658 #define TG3_STAT_ADD32(PSTAT, REG) \
5659 do {    u32 __val = tr32(REG); \
5660         (PSTAT)->low += __val; \
5661         if ((PSTAT)->low < __val) \
5662                 (PSTAT)->high += 1; \
5663 } while (0)
5664
5665 static void tg3_periodic_fetch_stats(struct tg3 *tp)
5666 {
5667         struct tg3_hw_stats *sp = tp->hw_stats;
5668
5669         if (!netif_carrier_ok(tp->dev))
5670                 return;
5671
5672         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
5673         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
5674         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
5675         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
5676         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
5677         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
5678         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
5679         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
5680         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
5681         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
5682         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
5683         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
5684         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
5685
5686         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
5687         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
5688         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
5689         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
5690         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
5691         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
5692         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
5693         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
5694         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
5695         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
5696         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
5697         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
5698         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
5699         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
5700 }
5701
5702 static void tg3_timer(unsigned long __opaque)
5703 {
5704         struct tg3 *tp = (struct tg3 *) __opaque;
5705         unsigned long flags;
5706
5707         spin_lock_irqsave(&tp->lock, flags);
5708         spin_lock(&tp->tx_lock);
5709
5710         /* All of this garbage is because when using non-tagged
5711          * IRQ status the mailbox/status_block protocol the chip
5712          * uses with the cpu is race prone.
5713          */
5714         if (tp->hw_status->status & SD_STATUS_UPDATED) {
5715                 tw32(GRC_LOCAL_CTRL,
5716                      tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
5717         } else {
5718                 tw32(HOSTCC_MODE, tp->coalesce_mode |
5719                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
5720         }
5721
5722         if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
5723                 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
5724                 spin_unlock(&tp->tx_lock);
5725                 spin_unlock_irqrestore(&tp->lock, flags);
5726                 schedule_work(&tp->reset_task);
5727                 return;
5728         }
5729
5730         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5731                 tg3_periodic_fetch_stats(tp);
5732
5733         /* This part only runs once per second. */
5734         if (!--tp->timer_counter) {
5735                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
5736                         u32 mac_stat;
5737                         int phy_event;
5738
5739                         mac_stat = tr32(MAC_STATUS);
5740
5741                         phy_event = 0;
5742                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
5743                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
5744                                         phy_event = 1;
5745                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
5746                                 phy_event = 1;
5747
5748                         if (phy_event)
5749                                 tg3_setup_phy(tp, 0);
5750                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
5751                         u32 mac_stat = tr32(MAC_STATUS);
5752                         int need_setup = 0;
5753
5754                         if (netif_carrier_ok(tp->dev) &&
5755                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
5756                                 need_setup = 1;
5757                         }
5758                         if (! netif_carrier_ok(tp->dev) &&
5759                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
5760                                          MAC_STATUS_SIGNAL_DET))) {
5761                                 need_setup = 1;
5762                         }
5763                         if (need_setup) {
5764                                 tw32_f(MAC_MODE,
5765                                      (tp->mac_mode &
5766                                       ~MAC_MODE_PORT_MODE_MASK));
5767                                 udelay(40);
5768                                 tw32_f(MAC_MODE, tp->mac_mode);
5769                                 udelay(40);
5770                                 tg3_setup_phy(tp, 0);
5771                         }
5772                 }
5773
5774                 tp->timer_counter = tp->timer_multiplier;
5775         }
5776
5777         /* Heartbeat is only sent once every 120 seconds.  */
5778         if (!--tp->asf_counter) {
5779                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5780                         u32 val;
5781
5782                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
5783                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
5784                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
5785                         val = tr32(GRC_RX_CPU_EVENT);
5786                         val |= (1 << 14);
5787                         tw32(GRC_RX_CPU_EVENT, val);
5788                 }
5789                 tp->asf_counter = tp->asf_multiplier;
5790         }
5791
5792         spin_unlock(&tp->tx_lock);
5793         spin_unlock_irqrestore(&tp->lock, flags);
5794
5795         tp->timer.expires = jiffies + tp->timer_offset;
5796         add_timer(&tp->timer);
5797 }
5798
5799 static int tg3_open(struct net_device *dev)
5800 {
5801         struct tg3 *tp = netdev_priv(dev);
5802         int err;
5803
5804         spin_lock_irq(&tp->lock);
5805         spin_lock(&tp->tx_lock);
5806
5807         tg3_disable_ints(tp);
5808         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
5809
5810         spin_unlock(&tp->tx_lock);
5811         spin_unlock_irq(&tp->lock);
5812
5813         /* The placement of this call is tied
5814          * to the setup and use of Host TX descriptors.
5815          */
5816         err = tg3_alloc_consistent(tp);
5817         if (err)
5818                 return err;
5819
5820         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5821             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
5822             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) {
5823                 if (pci_enable_msi(tp->pdev) == 0) {
5824                         u32 msi_mode;
5825
5826                         msi_mode = tr32(MSGINT_MODE);
5827                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
5828                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
5829                 }
5830         }
5831         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
5832                 err = request_irq(tp->pdev->irq, tg3_msi,
5833                                   0, dev->name, dev);
5834         else
5835                 err = request_irq(tp->pdev->irq, tg3_interrupt,
5836                                   SA_SHIRQ, dev->name, dev);
5837
5838         if (err) {
5839                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5840                         pci_disable_msi(tp->pdev);
5841                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
5842                 }
5843                 tg3_free_consistent(tp);
5844                 return err;
5845         }
5846
5847         spin_lock_irq(&tp->lock);
5848         spin_lock(&tp->tx_lock);
5849
5850         err = tg3_init_hw(tp);
5851         if (err) {
5852                 tg3_halt(tp);
5853                 tg3_free_rings(tp);
5854         } else {
5855                 tp->timer_offset = HZ / 10;
5856                 tp->timer_counter = tp->timer_multiplier = 10;
5857                 tp->asf_counter = tp->asf_multiplier = (10 * 120);
5858
5859                 init_timer(&tp->timer);
5860                 tp->timer.expires = jiffies + tp->timer_offset;
5861                 tp->timer.data = (unsigned long) tp;
5862                 tp->timer.function = tg3_timer;
5863                 add_timer(&tp->timer);
5864
5865                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
5866         }
5867
5868         spin_unlock(&tp->tx_lock);
5869         spin_unlock_irq(&tp->lock);
5870
5871         if (err) {
5872                 free_irq(tp->pdev->irq, dev);
5873                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5874                         pci_disable_msi(tp->pdev);
5875                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
5876                 }
5877                 tg3_free_consistent(tp);
5878                 return err;
5879         }
5880
5881         spin_lock_irq(&tp->lock);
5882         spin_lock(&tp->tx_lock);
5883
5884         tg3_enable_ints(tp);
5885
5886         spin_unlock(&tp->tx_lock);
5887         spin_unlock_irq(&tp->lock);
5888
5889         netif_start_queue(dev);
5890
5891         return 0;
5892 }
5893
5894 #if 0
5895 /*static*/ void tg3_dump_state(struct tg3 *tp)
5896 {
5897         u32 val32, val32_2, val32_3, val32_4, val32_5;
5898         u16 val16;
5899         int i;
5900
5901         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
5902         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
5903         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
5904                val16, val32);
5905
5906         /* MAC block */
5907         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
5908                tr32(MAC_MODE), tr32(MAC_STATUS));
5909         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
5910                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
5911         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
5912                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
5913         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
5914                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
5915
5916         /* Send data initiator control block */
5917         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
5918                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
5919         printk("       SNDDATAI_STATSCTRL[%08x]\n",
5920                tr32(SNDDATAI_STATSCTRL));
5921
5922         /* Send data completion control block */
5923         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
5924
5925         /* Send BD ring selector block */
5926         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
5927                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
5928
5929         /* Send BD initiator control block */
5930         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
5931                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
5932
5933         /* Send BD completion control block */
5934         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
5935
5936         /* Receive list placement control block */
5937         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
5938                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
5939         printk("       RCVLPC_STATSCTRL[%08x]\n",
5940                tr32(RCVLPC_STATSCTRL));
5941
5942         /* Receive data and receive BD initiator control block */
5943         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
5944                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
5945
5946         /* Receive data completion control block */
5947         printk("DEBUG: RCVDCC_MODE[%08x]\n",
5948                tr32(RCVDCC_MODE));
5949
5950         /* Receive BD initiator control block */
5951         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
5952                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
5953
5954         /* Receive BD completion control block */
5955         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
5956                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
5957
5958         /* Receive list selector control block */
5959         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
5960                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
5961
5962         /* Mbuf cluster free block */
5963         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
5964                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
5965
5966         /* Host coalescing control block */
5967         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
5968                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
5969         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
5970                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5971                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5972         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
5973                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5974                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5975         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
5976                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
5977         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
5978                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
5979
5980         /* Memory arbiter control block */
5981         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
5982                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
5983
5984         /* Buffer manager control block */
5985         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
5986                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
5987         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
5988                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
5989         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
5990                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
5991                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
5992                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
5993
5994         /* Read DMA control block */
5995         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
5996                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
5997
5998         /* Write DMA control block */
5999         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6000                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6001
6002         /* DMA completion block */
6003         printk("DEBUG: DMAC_MODE[%08x]\n",
6004                tr32(DMAC_MODE));
6005
6006         /* GRC block */
6007         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6008                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6009         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6010                tr32(GRC_LOCAL_CTRL));
6011
6012         /* TG3_BDINFOs */
6013         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6014                tr32(RCVDBDI_JUMBO_BD + 0x0),
6015                tr32(RCVDBDI_JUMBO_BD + 0x4),
6016                tr32(RCVDBDI_JUMBO_BD + 0x8),
6017                tr32(RCVDBDI_JUMBO_BD + 0xc));
6018         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6019                tr32(RCVDBDI_STD_BD + 0x0),
6020                tr32(RCVDBDI_STD_BD + 0x4),
6021                tr32(RCVDBDI_STD_BD + 0x8),
6022                tr32(RCVDBDI_STD_BD + 0xc));
6023         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6024                tr32(RCVDBDI_MINI_BD + 0x0),
6025                tr32(RCVDBDI_MINI_BD + 0x4),
6026                tr32(RCVDBDI_MINI_BD + 0x8),
6027                tr32(RCVDBDI_MINI_BD + 0xc));
6028
6029         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6030         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6031         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6032         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6033         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6034                val32, val32_2, val32_3, val32_4);
6035
6036         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6037         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6038         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6039         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6040         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6041                val32, val32_2, val32_3, val32_4);
6042
6043         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6044         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6045         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6046         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6047         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6048         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6049                val32, val32_2, val32_3, val32_4, val32_5);
6050
6051         /* SW status block */
6052         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6053                tp->hw_status->status,
6054                tp->hw_status->status_tag,
6055                tp->hw_status->rx_jumbo_consumer,
6056                tp->hw_status->rx_consumer,
6057                tp->hw_status->rx_mini_consumer,
6058                tp->hw_status->idx[0].rx_producer,
6059                tp->hw_status->idx[0].tx_consumer);
6060
6061         /* SW statistics block */
6062         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6063                ((u32 *)tp->hw_stats)[0],
6064                ((u32 *)tp->hw_stats)[1],
6065                ((u32 *)tp->hw_stats)[2],
6066                ((u32 *)tp->hw_stats)[3]);
6067
6068         /* Mailboxes */
6069         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
6070                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6071                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
6072                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
6073                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
6074
6075         /* NIC side send descriptors. */
6076         for (i = 0; i < 6; i++) {
6077                 unsigned long txd;
6078
6079                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
6080                         + (i * sizeof(struct tg3_tx_buffer_desc));
6081                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
6082                        i,
6083                        readl(txd + 0x0), readl(txd + 0x4),
6084                        readl(txd + 0x8), readl(txd + 0xc));
6085         }
6086
6087         /* NIC side RX descriptors. */
6088         for (i = 0; i < 6; i++) {
6089                 unsigned long rxd;
6090
6091                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
6092                         + (i * sizeof(struct tg3_rx_buffer_desc));
6093                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
6094                        i,
6095                        readl(rxd + 0x0), readl(rxd + 0x4),
6096                        readl(rxd + 0x8), readl(rxd + 0xc));
6097                 rxd += (4 * sizeof(u32));
6098                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
6099                        i,
6100                        readl(rxd + 0x0), readl(rxd + 0x4),
6101                        readl(rxd + 0x8), readl(rxd + 0xc));
6102         }
6103
6104         for (i = 0; i < 6; i++) {
6105                 unsigned long rxd;
6106
6107                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
6108                         + (i * sizeof(struct tg3_rx_buffer_desc));
6109                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
6110                        i,
6111                        readl(rxd + 0x0), readl(rxd + 0x4),
6112                        readl(rxd + 0x8), readl(rxd + 0xc));
6113                 rxd += (4 * sizeof(u32));
6114                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6115                        i,
6116                        readl(rxd + 0x0), readl(rxd + 0x4),
6117                        readl(rxd + 0x8), readl(rxd + 0xc));
6118         }
6119 }
6120 #endif
6121
6122 static struct net_device_stats *tg3_get_stats(struct net_device *);
6123 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
6124
6125 static int tg3_close(struct net_device *dev)
6126 {
6127         struct tg3 *tp = netdev_priv(dev);
6128
6129         netif_stop_queue(dev);
6130
6131         del_timer_sync(&tp->timer);
6132
6133         spin_lock_irq(&tp->lock);
6134         spin_lock(&tp->tx_lock);
6135 #if 0
6136         tg3_dump_state(tp);
6137 #endif
6138
6139         tg3_disable_ints(tp);
6140
6141         tg3_halt(tp);
6142         tg3_free_rings(tp);
6143         tp->tg3_flags &=
6144                 ~(TG3_FLAG_INIT_COMPLETE |
6145                   TG3_FLAG_GOT_SERDES_FLOWCTL);
6146         netif_carrier_off(tp->dev);
6147
6148         spin_unlock(&tp->tx_lock);
6149         spin_unlock_irq(&tp->lock);
6150
6151         free_irq(tp->pdev->irq, dev);
6152         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6153                 pci_disable_msi(tp->pdev);
6154                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6155         }
6156
6157         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
6158                sizeof(tp->net_stats_prev));
6159         memcpy(&tp->estats_prev, tg3_get_estats(tp),
6160                sizeof(tp->estats_prev));
6161
6162         tg3_free_consistent(tp);
6163
6164         return 0;
6165 }
6166
6167 static inline unsigned long get_stat64(tg3_stat64_t *val)
6168 {
6169         unsigned long ret;
6170
6171 #if (BITS_PER_LONG == 32)
6172         ret = val->low;
6173 #else
6174         ret = ((u64)val->high << 32) | ((u64)val->low);
6175 #endif
6176         return ret;
6177 }
6178
6179 static unsigned long calc_crc_errors(struct tg3 *tp)
6180 {
6181         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6182
6183         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6184             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6185              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
6186                 unsigned long flags;
6187                 u32 val;
6188
6189                 spin_lock_irqsave(&tp->lock, flags);
6190                 if (!tg3_readphy(tp, 0x1e, &val)) {
6191                         tg3_writephy(tp, 0x1e, val | 0x8000);
6192                         tg3_readphy(tp, 0x14, &val);
6193                 } else
6194                         val = 0;
6195                 spin_unlock_irqrestore(&tp->lock, flags);
6196
6197                 tp->phy_crc_errors += val;
6198
6199                 return tp->phy_crc_errors;
6200         }
6201
6202         return get_stat64(&hw_stats->rx_fcs_errors);
6203 }
6204
6205 #define ESTAT_ADD(member) \
6206         estats->member =        old_estats->member + \
6207                                 get_stat64(&hw_stats->member)
6208
6209 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6210 {
6211         struct tg3_ethtool_stats *estats = &tp->estats;
6212         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6213         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6214
6215         if (!hw_stats)
6216                 return old_estats;
6217
6218         ESTAT_ADD(rx_octets);
6219         ESTAT_ADD(rx_fragments);
6220         ESTAT_ADD(rx_ucast_packets);
6221         ESTAT_ADD(rx_mcast_packets);
6222         ESTAT_ADD(rx_bcast_packets);
6223         ESTAT_ADD(rx_fcs_errors);
6224         ESTAT_ADD(rx_align_errors);
6225         ESTAT_ADD(rx_xon_pause_rcvd);
6226         ESTAT_ADD(rx_xoff_pause_rcvd);
6227         ESTAT_ADD(rx_mac_ctrl_rcvd);
6228         ESTAT_ADD(rx_xoff_entered);
6229         ESTAT_ADD(rx_frame_too_long_errors);
6230         ESTAT_ADD(rx_jabbers);
6231         ESTAT_ADD(rx_undersize_packets);
6232         ESTAT_ADD(rx_in_length_errors);
6233         ESTAT_ADD(rx_out_length_errors);
6234         ESTAT_ADD(rx_64_or_less_octet_packets);
6235         ESTAT_ADD(rx_65_to_127_octet_packets);
6236         ESTAT_ADD(rx_128_to_255_octet_packets);
6237         ESTAT_ADD(rx_256_to_511_octet_packets);
6238         ESTAT_ADD(rx_512_to_1023_octet_packets);
6239         ESTAT_ADD(rx_1024_to_1522_octet_packets);
6240         ESTAT_ADD(rx_1523_to_2047_octet_packets);
6241         ESTAT_ADD(rx_2048_to_4095_octet_packets);
6242         ESTAT_ADD(rx_4096_to_8191_octet_packets);
6243         ESTAT_ADD(rx_8192_to_9022_octet_packets);
6244
6245         ESTAT_ADD(tx_octets);
6246         ESTAT_ADD(tx_collisions);
6247         ESTAT_ADD(tx_xon_sent);
6248         ESTAT_ADD(tx_xoff_sent);
6249         ESTAT_ADD(tx_flow_control);
6250         ESTAT_ADD(tx_mac_errors);
6251         ESTAT_ADD(tx_single_collisions);
6252         ESTAT_ADD(tx_mult_collisions);
6253         ESTAT_ADD(tx_deferred);
6254         ESTAT_ADD(tx_excessive_collisions);
6255         ESTAT_ADD(tx_late_collisions);
6256         ESTAT_ADD(tx_collide_2times);
6257         ESTAT_ADD(tx_collide_3times);
6258         ESTAT_ADD(tx_collide_4times);
6259         ESTAT_ADD(tx_collide_5times);
6260         ESTAT_ADD(tx_collide_6times);
6261         ESTAT_ADD(tx_collide_7times);
6262         ESTAT_ADD(tx_collide_8times);
6263         ESTAT_ADD(tx_collide_9times);
6264         ESTAT_ADD(tx_collide_10times);
6265         ESTAT_ADD(tx_collide_11times);
6266         ESTAT_ADD(tx_collide_12times);
6267         ESTAT_ADD(tx_collide_13times);
6268         ESTAT_ADD(tx_collide_14times);
6269         ESTAT_ADD(tx_collide_15times);
6270         ESTAT_ADD(tx_ucast_packets);
6271         ESTAT_ADD(tx_mcast_packets);
6272         ESTAT_ADD(tx_bcast_packets);
6273         ESTAT_ADD(tx_carrier_sense_errors);
6274         ESTAT_ADD(tx_discards);
6275         ESTAT_ADD(tx_errors);
6276
6277         ESTAT_ADD(dma_writeq_full);
6278         ESTAT_ADD(dma_write_prioq_full);
6279         ESTAT_ADD(rxbds_empty);
6280         ESTAT_ADD(rx_discards);
6281         ESTAT_ADD(rx_errors);
6282         ESTAT_ADD(rx_threshold_hit);
6283
6284         ESTAT_ADD(dma_readq_full);
6285         ESTAT_ADD(dma_read_prioq_full);
6286         ESTAT_ADD(tx_comp_queue_full);
6287
6288         ESTAT_ADD(ring_set_send_prod_index);
6289         ESTAT_ADD(ring_status_update);
6290         ESTAT_ADD(nic_irqs);
6291         ESTAT_ADD(nic_avoided_irqs);
6292         ESTAT_ADD(nic_tx_threshold_hit);
6293
6294         return estats;
6295 }
6296
6297 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6298 {
6299         struct tg3 *tp = netdev_priv(dev);
6300         struct net_device_stats *stats = &tp->net_stats;
6301         struct net_device_stats *old_stats = &tp->net_stats_prev;
6302         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6303
6304         if (!hw_stats)
6305                 return old_stats;
6306
6307         stats->rx_packets = old_stats->rx_packets +
6308                 get_stat64(&hw_stats->rx_ucast_packets) +
6309                 get_stat64(&hw_stats->rx_mcast_packets) +
6310                 get_stat64(&hw_stats->rx_bcast_packets);
6311                 
6312         stats->tx_packets = old_stats->tx_packets +
6313                 get_stat64(&hw_stats->tx_ucast_packets) +
6314                 get_stat64(&hw_stats->tx_mcast_packets) +
6315                 get_stat64(&hw_stats->tx_bcast_packets);
6316
6317         stats->rx_bytes = old_stats->rx_bytes +
6318                 get_stat64(&hw_stats->rx_octets);
6319         stats->tx_bytes = old_stats->tx_bytes +
6320                 get_stat64(&hw_stats->tx_octets);
6321
6322         stats->rx_errors = old_stats->rx_errors +
6323                 get_stat64(&hw_stats->rx_errors) +
6324                 get_stat64(&hw_stats->rx_discards);
6325         stats->tx_errors = old_stats->tx_errors +
6326                 get_stat64(&hw_stats->tx_errors) +
6327                 get_stat64(&hw_stats->tx_mac_errors) +
6328                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6329                 get_stat64(&hw_stats->tx_discards);
6330
6331         stats->multicast = old_stats->multicast +
6332                 get_stat64(&hw_stats->rx_mcast_packets);
6333         stats->collisions = old_stats->collisions +
6334                 get_stat64(&hw_stats->tx_collisions);
6335
6336         stats->rx_length_errors = old_stats->rx_length_errors +
6337                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6338                 get_stat64(&hw_stats->rx_undersize_packets);
6339
6340         stats->rx_over_errors = old_stats->rx_over_errors +
6341                 get_stat64(&hw_stats->rxbds_empty);
6342         stats->rx_frame_errors = old_stats->rx_frame_errors +
6343                 get_stat64(&hw_stats->rx_align_errors);
6344         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6345                 get_stat64(&hw_stats->tx_discards);
6346         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6347                 get_stat64(&hw_stats->tx_carrier_sense_errors);
6348
6349         stats->rx_crc_errors = old_stats->rx_crc_errors +
6350                 calc_crc_errors(tp);
6351
6352         return stats;
6353 }
6354
6355 static inline u32 calc_crc(unsigned char *buf, int len)
6356 {
6357         u32 reg;
6358         u32 tmp;
6359         int j, k;
6360
6361         reg = 0xffffffff;
6362
6363         for (j = 0; j < len; j++) {
6364                 reg ^= buf[j];
6365
6366                 for (k = 0; k < 8; k++) {
6367                         tmp = reg & 0x01;
6368
6369                         reg >>= 1;
6370
6371                         if (tmp) {
6372                                 reg ^= 0xedb88320;
6373                         }
6374                 }
6375         }
6376
6377         return ~reg;
6378 }
6379
6380 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6381 {
6382         /* accept or reject all multicast frames */
6383         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6384         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6385         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6386         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6387 }
6388
6389 static void __tg3_set_rx_mode(struct net_device *dev)
6390 {
6391         struct tg3 *tp = netdev_priv(dev);
6392         u32 rx_mode;
6393
6394         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6395                                   RX_MODE_KEEP_VLAN_TAG);
6396
6397         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6398          * flag clear.
6399          */
6400 #if TG3_VLAN_TAG_USED
6401         if (!tp->vlgrp &&
6402             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6403                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6404 #else
6405         /* By definition, VLAN is disabled always in this
6406          * case.
6407          */
6408         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6409                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6410 #endif
6411
6412         if (dev->flags & IFF_PROMISC) {
6413                 /* Promiscuous mode. */
6414                 rx_mode |= RX_MODE_PROMISC;
6415         } else if (dev->flags & IFF_ALLMULTI) {
6416                 /* Accept all multicast. */
6417                 tg3_set_multi (tp, 1);
6418         } else if (dev->mc_count < 1) {
6419                 /* Reject all multicast. */
6420                 tg3_set_multi (tp, 0);
6421         } else {
6422                 /* Accept one or more multicast(s). */
6423                 struct dev_mc_list *mclist;
6424                 unsigned int i;
6425                 u32 mc_filter[4] = { 0, };
6426                 u32 regidx;
6427                 u32 bit;
6428                 u32 crc;
6429
6430                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
6431                      i++, mclist = mclist->next) {
6432
6433                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
6434                         bit = ~crc & 0x7f;
6435                         regidx = (bit & 0x60) >> 5;
6436                         bit &= 0x1f;
6437                         mc_filter[regidx] |= (1 << bit);
6438                 }
6439
6440                 tw32(MAC_HASH_REG_0, mc_filter[0]);
6441                 tw32(MAC_HASH_REG_1, mc_filter[1]);
6442                 tw32(MAC_HASH_REG_2, mc_filter[2]);
6443                 tw32(MAC_HASH_REG_3, mc_filter[3]);
6444         }
6445
6446         if (rx_mode != tp->rx_mode) {
6447                 tp->rx_mode = rx_mode;
6448                 tw32_f(MAC_RX_MODE, rx_mode);
6449                 udelay(10);
6450         }
6451 }
6452
6453 static void tg3_set_rx_mode(struct net_device *dev)
6454 {
6455         struct tg3 *tp = netdev_priv(dev);
6456
6457         spin_lock_irq(&tp->lock);
6458         spin_lock(&tp->tx_lock);
6459         __tg3_set_rx_mode(dev);
6460         spin_unlock(&tp->tx_lock);
6461         spin_unlock_irq(&tp->lock);
6462 }
6463
6464 #define TG3_REGDUMP_LEN         (32 * 1024)
6465
6466 static int tg3_get_regs_len(struct net_device *dev)
6467 {
6468         return TG3_REGDUMP_LEN;
6469 }
6470
6471 static void tg3_get_regs(struct net_device *dev,
6472                 struct ethtool_regs *regs, void *_p)
6473 {
6474         u32 *p = _p;
6475         struct tg3 *tp = netdev_priv(dev);
6476         u8 *orig_p = _p;
6477         int i;
6478
6479         regs->version = 0;
6480
6481         memset(p, 0, TG3_REGDUMP_LEN);
6482
6483         spin_lock_irq(&tp->lock);
6484         spin_lock(&tp->tx_lock);
6485
6486 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
6487 #define GET_REG32_LOOP(base,len)                \
6488 do {    p = (u32 *)(orig_p + (base));           \
6489         for (i = 0; i < len; i += 4)            \
6490                 __GET_REG32((base) + i);        \
6491 } while (0)
6492 #define GET_REG32_1(reg)                        \
6493 do {    p = (u32 *)(orig_p + (reg));            \
6494         __GET_REG32((reg));                     \
6495 } while (0)
6496
6497         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
6498         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
6499         GET_REG32_LOOP(MAC_MODE, 0x4f0);
6500         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
6501         GET_REG32_1(SNDDATAC_MODE);
6502         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
6503         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
6504         GET_REG32_1(SNDBDC_MODE);
6505         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
6506         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
6507         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
6508         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
6509         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
6510         GET_REG32_1(RCVDCC_MODE);
6511         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
6512         GET_REG32_LOOP(RCVCC_MODE, 0x14);
6513         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
6514         GET_REG32_1(MBFREE_MODE);
6515         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
6516         GET_REG32_LOOP(MEMARB_MODE, 0x10);
6517         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
6518         GET_REG32_LOOP(RDMAC_MODE, 0x08);
6519         GET_REG32_LOOP(WDMAC_MODE, 0x08);
6520         GET_REG32_LOOP(RX_CPU_BASE, 0x280);
6521         GET_REG32_LOOP(TX_CPU_BASE, 0x280);
6522         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
6523         GET_REG32_LOOP(FTQ_RESET, 0x120);
6524         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
6525         GET_REG32_1(DMAC_MODE);
6526         GET_REG32_LOOP(GRC_MODE, 0x4c);
6527         if (tp->tg3_flags & TG3_FLAG_NVRAM)
6528                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
6529
6530 #undef __GET_REG32
6531 #undef GET_REG32_LOOP
6532 #undef GET_REG32_1
6533
6534         spin_unlock(&tp->tx_lock);
6535         spin_unlock_irq(&tp->lock);
6536 }
6537
6538 static int tg3_get_eeprom_len(struct net_device *dev)
6539 {
6540         struct tg3 *tp = netdev_priv(dev);
6541
6542         return tp->nvram_size;
6543 }
6544
6545 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
6546
6547 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6548 {
6549         struct tg3 *tp = netdev_priv(dev);
6550         int ret;
6551         u8  *pd;
6552         u32 i, offset, len, val, b_offset, b_count;
6553
6554         offset = eeprom->offset;
6555         len = eeprom->len;
6556         eeprom->len = 0;
6557
6558         eeprom->magic = TG3_EEPROM_MAGIC;
6559
6560         if (offset & 3) {
6561                 /* adjustments to start on required 4 byte boundary */
6562                 b_offset = offset & 3;
6563                 b_count = 4 - b_offset;
6564                 if (b_count > len) {
6565                         /* i.e. offset=1 len=2 */
6566                         b_count = len;
6567                 }
6568                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
6569                 if (ret)
6570                         return ret;
6571                 val = cpu_to_le32(val);
6572                 memcpy(data, ((char*)&val) + b_offset, b_count);
6573                 len -= b_count;
6574                 offset += b_count;
6575                 eeprom->len += b_count;
6576         }
6577
6578         /* read bytes upto the last 4 byte boundary */
6579         pd = &data[eeprom->len];
6580         for (i = 0; i < (len - (len & 3)); i += 4) {
6581                 ret = tg3_nvram_read(tp, offset + i, &val);
6582                 if (ret) {
6583                         eeprom->len += i;
6584                         return ret;
6585                 }
6586                 val = cpu_to_le32(val);
6587                 memcpy(pd + i, &val, 4);
6588         }
6589         eeprom->len += i;
6590
6591         if (len & 3) {
6592                 /* read last bytes not ending on 4 byte boundary */
6593                 pd = &data[eeprom->len];
6594                 b_count = len & 3;
6595                 b_offset = offset + len - b_count;
6596                 ret = tg3_nvram_read(tp, b_offset, &val);
6597                 if (ret)
6598                         return ret;
6599                 val = cpu_to_le32(val);
6600                 memcpy(pd, ((char*)&val), b_count);
6601                 eeprom->len += b_count;
6602         }
6603         return 0;
6604 }
6605
6606 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
6607
6608 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6609 {
6610         struct tg3 *tp = netdev_priv(dev);
6611         int ret;
6612         u32 offset, len, b_offset, odd_len, start, end;
6613         u8 *buf;
6614
6615         if (eeprom->magic != TG3_EEPROM_MAGIC)
6616                 return -EINVAL;
6617
6618         offset = eeprom->offset;
6619         len = eeprom->len;
6620
6621         if ((b_offset = (offset & 3))) {
6622                 /* adjustments to start on required 4 byte boundary */
6623                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
6624                 if (ret)
6625                         return ret;
6626                 start = cpu_to_le32(start);
6627                 len += b_offset;
6628                 offset &= ~3;
6629                 if (len < 4)
6630                         len = 4;
6631         }
6632
6633         odd_len = 0;
6634         if (len & 3) {
6635                 /* adjustments to end on required 4 byte boundary */
6636                 odd_len = 1;
6637                 len = (len + 3) & ~3;
6638                 ret = tg3_nvram_read(tp, offset+len-4, &end);
6639                 if (ret)
6640                         return ret;
6641                 end = cpu_to_le32(end);
6642         }
6643
6644         buf = data;
6645         if (b_offset || odd_len) {
6646                 buf = kmalloc(len, GFP_KERNEL);
6647                 if (buf == 0)
6648                         return -ENOMEM;
6649                 if (b_offset)
6650                         memcpy(buf, &start, 4);
6651                 if (odd_len)
6652                         memcpy(buf+len-4, &end, 4);
6653                 memcpy(buf + b_offset, data, eeprom->len);
6654         }
6655
6656         ret = tg3_nvram_write_block(tp, offset, len, buf);
6657
6658         if (buf != data)
6659                 kfree(buf);
6660
6661         return ret;
6662 }
6663
6664 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6665 {
6666         struct tg3 *tp = netdev_priv(dev);
6667   
6668         cmd->supported = (SUPPORTED_Autoneg);
6669
6670         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
6671                 cmd->supported |= (SUPPORTED_1000baseT_Half |
6672                                    SUPPORTED_1000baseT_Full);
6673
6674         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES))
6675                 cmd->supported |= (SUPPORTED_100baseT_Half |
6676                                   SUPPORTED_100baseT_Full |
6677                                   SUPPORTED_10baseT_Half |
6678                                   SUPPORTED_10baseT_Full |
6679                                   SUPPORTED_MII);
6680         else
6681                 cmd->supported |= SUPPORTED_FIBRE;
6682   
6683         cmd->advertising = tp->link_config.advertising;
6684         if (netif_running(dev)) {
6685                 cmd->speed = tp->link_config.active_speed;
6686                 cmd->duplex = tp->link_config.active_duplex;
6687         }
6688         cmd->port = 0;
6689         cmd->phy_address = PHY_ADDR;
6690         cmd->transceiver = 0;
6691         cmd->autoneg = tp->link_config.autoneg;
6692         cmd->maxtxpkt = 0;
6693         cmd->maxrxpkt = 0;
6694         return 0;
6695 }
6696   
6697 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6698 {
6699         struct tg3 *tp = netdev_priv(dev);
6700   
6701         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6702                 /* These are the only valid advertisement bits allowed.  */
6703                 if (cmd->autoneg == AUTONEG_ENABLE &&
6704                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
6705                                           ADVERTISED_1000baseT_Full |
6706                                           ADVERTISED_Autoneg |
6707                                           ADVERTISED_FIBRE)))
6708                         return -EINVAL;
6709         }
6710
6711         spin_lock_irq(&tp->lock);
6712         spin_lock(&tp->tx_lock);
6713
6714         tp->link_config.autoneg = cmd->autoneg;
6715         if (cmd->autoneg == AUTONEG_ENABLE) {
6716                 tp->link_config.advertising = cmd->advertising;
6717                 tp->link_config.speed = SPEED_INVALID;
6718                 tp->link_config.duplex = DUPLEX_INVALID;
6719         } else {
6720                 tp->link_config.advertising = 0;
6721                 tp->link_config.speed = cmd->speed;
6722                 tp->link_config.duplex = cmd->duplex;
6723         }
6724   
6725         if (netif_running(dev))
6726                 tg3_setup_phy(tp, 1);
6727
6728         spin_unlock(&tp->tx_lock);
6729         spin_unlock_irq(&tp->lock);
6730   
6731         return 0;
6732 }
6733   
6734 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6735 {
6736         struct tg3 *tp = netdev_priv(dev);
6737   
6738         strcpy(info->driver, DRV_MODULE_NAME);
6739         strcpy(info->version, DRV_MODULE_VERSION);
6740         strcpy(info->bus_info, pci_name(tp->pdev));
6741 }
6742   
6743 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6744 {
6745         struct tg3 *tp = netdev_priv(dev);
6746   
6747         wol->supported = WAKE_MAGIC;
6748         wol->wolopts = 0;
6749         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
6750                 wol->wolopts = WAKE_MAGIC;
6751         memset(&wol->sopass, 0, sizeof(wol->sopass));
6752 }
6753   
6754 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6755 {
6756         struct tg3 *tp = netdev_priv(dev);
6757   
6758         if (wol->wolopts & ~WAKE_MAGIC)
6759                 return -EINVAL;
6760         if ((wol->wolopts & WAKE_MAGIC) &&
6761             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
6762             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
6763                 return -EINVAL;
6764   
6765         spin_lock_irq(&tp->lock);
6766         if (wol->wolopts & WAKE_MAGIC)
6767                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
6768         else
6769                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
6770         spin_unlock_irq(&tp->lock);
6771   
6772         return 0;
6773 }
6774   
6775 static u32 tg3_get_msglevel(struct net_device *dev)
6776 {
6777         struct tg3 *tp = netdev_priv(dev);
6778         return tp->msg_enable;
6779 }
6780   
6781 static void tg3_set_msglevel(struct net_device *dev, u32 value)
6782 {
6783         struct tg3 *tp = netdev_priv(dev);
6784         tp->msg_enable = value;
6785 }
6786   
6787 #if TG3_TSO_SUPPORT != 0
6788 static int tg3_set_tso(struct net_device *dev, u32 value)
6789 {
6790         struct tg3 *tp = netdev_priv(dev);
6791
6792         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6793                 if (value)
6794                         return -EINVAL;
6795                 return 0;
6796         }
6797         return ethtool_op_set_tso(dev, value);
6798 }
6799 #endif
6800   
6801 static int tg3_nway_reset(struct net_device *dev)
6802 {
6803         struct tg3 *tp = netdev_priv(dev);
6804         u32 bmcr;
6805         int r;
6806   
6807         if (!netif_running(dev))
6808                 return -EAGAIN;
6809
6810         spin_lock_irq(&tp->lock);
6811         r = -EINVAL;
6812         tg3_readphy(tp, MII_BMCR, &bmcr);
6813         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
6814             (bmcr & BMCR_ANENABLE)) {
6815                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
6816                 r = 0;
6817         }
6818         spin_unlock_irq(&tp->lock);
6819   
6820         return r;
6821 }
6822   
6823 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6824 {
6825         struct tg3 *tp = netdev_priv(dev);
6826   
6827         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
6828         ering->rx_mini_max_pending = 0;
6829         ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
6830
6831         ering->rx_pending = tp->rx_pending;
6832         ering->rx_mini_pending = 0;
6833         ering->rx_jumbo_pending = tp->rx_jumbo_pending;
6834         ering->tx_pending = tp->tx_pending;
6835 }
6836   
6837 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6838 {
6839         struct tg3 *tp = netdev_priv(dev);
6840   
6841         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
6842             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
6843             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
6844                 return -EINVAL;
6845   
6846         if (netif_running(dev))
6847                 tg3_netif_stop(tp);
6848
6849         spin_lock_irq(&tp->lock);
6850         spin_lock(&tp->tx_lock);
6851   
6852         tp->rx_pending = ering->rx_pending;
6853
6854         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
6855             tp->rx_pending > 63)
6856                 tp->rx_pending = 63;
6857         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
6858         tp->tx_pending = ering->tx_pending;
6859
6860         if (netif_running(dev)) {
6861                 tg3_halt(tp);
6862                 tg3_init_hw(tp);
6863                 tg3_netif_start(tp);
6864         }
6865
6866         spin_unlock(&tp->tx_lock);
6867         spin_unlock_irq(&tp->lock);
6868   
6869         return 0;
6870 }
6871   
6872 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6873 {
6874         struct tg3 *tp = netdev_priv(dev);
6875   
6876         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
6877         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
6878         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
6879 }
6880   
6881 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6882 {
6883         struct tg3 *tp = netdev_priv(dev);
6884   
6885         if (netif_running(dev))
6886                 tg3_netif_stop(tp);
6887
6888         spin_lock_irq(&tp->lock);
6889         spin_lock(&tp->tx_lock);
6890         if (epause->autoneg)
6891                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
6892         else
6893                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
6894         if (epause->rx_pause)
6895                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
6896         else
6897                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
6898         if (epause->tx_pause)
6899                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
6900         else
6901                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
6902
6903         if (netif_running(dev)) {
6904                 tg3_halt(tp);
6905                 tg3_init_hw(tp);
6906                 tg3_netif_start(tp);
6907         }
6908         spin_unlock(&tp->tx_lock);
6909         spin_unlock_irq(&tp->lock);
6910   
6911         return 0;
6912 }
6913   
6914 static u32 tg3_get_rx_csum(struct net_device *dev)
6915 {
6916         struct tg3 *tp = netdev_priv(dev);
6917         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
6918 }
6919   
6920 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
6921 {
6922         struct tg3 *tp = netdev_priv(dev);
6923   
6924         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6925                 if (data != 0)
6926                         return -EINVAL;
6927                 return 0;
6928         }
6929   
6930         spin_lock_irq(&tp->lock);
6931         if (data)
6932                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
6933         else
6934                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
6935         spin_unlock_irq(&tp->lock);
6936   
6937         return 0;
6938 }
6939   
6940 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
6941 {
6942         struct tg3 *tp = netdev_priv(dev);
6943   
6944         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6945                 if (data != 0)
6946                         return -EINVAL;
6947                 return 0;
6948         }
6949   
6950         if (data)
6951                 dev->features |= NETIF_F_IP_CSUM;
6952         else
6953                 dev->features &= ~NETIF_F_IP_CSUM;
6954
6955         return 0;
6956 }
6957
6958 static int tg3_get_stats_count (struct net_device *dev)
6959 {
6960         return TG3_NUM_STATS;
6961 }
6962
6963 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
6964 {
6965         switch (stringset) {
6966         case ETH_SS_STATS:
6967                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
6968                 break;
6969         default:
6970                 WARN_ON(1);     /* we need a WARN() */
6971                 break;
6972         }
6973 }
6974
6975 static void tg3_get_ethtool_stats (struct net_device *dev,
6976                                    struct ethtool_stats *estats, u64 *tmp_stats)
6977 {
6978         struct tg3 *tp = netdev_priv(dev);
6979         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
6980 }
6981
6982 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6983 {
6984         struct mii_ioctl_data *data = if_mii(ifr);
6985         struct tg3 *tp = netdev_priv(dev);
6986         int err;
6987
6988         switch(cmd) {
6989         case SIOCGMIIPHY:
6990                 data->phy_id = PHY_ADDR;
6991
6992                 /* fallthru */
6993         case SIOCGMIIREG: {
6994                 u32 mii_regval;
6995
6996                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
6997                         break;                  /* We have no PHY */
6998
6999                 spin_lock_irq(&tp->lock);
7000                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
7001                 spin_unlock_irq(&tp->lock);
7002
7003                 data->val_out = mii_regval;
7004
7005                 return err;
7006         }
7007
7008         case SIOCSMIIREG:
7009                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7010                         break;                  /* We have no PHY */
7011
7012                 if (!capable(CAP_NET_ADMIN))
7013                         return -EPERM;
7014
7015                 spin_lock_irq(&tp->lock);
7016                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
7017                 spin_unlock_irq(&tp->lock);
7018
7019                 return err;
7020
7021         default:
7022                 /* do nothing */
7023                 break;
7024         }
7025         return -EOPNOTSUPP;
7026 }
7027
7028 #if TG3_VLAN_TAG_USED
7029 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
7030 {
7031         struct tg3 *tp = netdev_priv(dev);
7032
7033         spin_lock_irq(&tp->lock);
7034         spin_lock(&tp->tx_lock);
7035
7036         tp->vlgrp = grp;
7037
7038         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
7039         __tg3_set_rx_mode(dev);
7040
7041         spin_unlock(&tp->tx_lock);
7042         spin_unlock_irq(&tp->lock);
7043 }
7044
7045 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
7046 {
7047         struct tg3 *tp = netdev_priv(dev);
7048
7049         spin_lock_irq(&tp->lock);
7050         spin_lock(&tp->tx_lock);
7051         if (tp->vlgrp)
7052                 tp->vlgrp->vlan_devices[vid] = NULL;
7053         spin_unlock(&tp->tx_lock);
7054         spin_unlock_irq(&tp->lock);
7055 }
7056 #endif
7057
7058 static struct ethtool_ops tg3_ethtool_ops = {
7059         .get_settings           = tg3_get_settings,
7060         .set_settings           = tg3_set_settings,
7061         .get_drvinfo            = tg3_get_drvinfo,
7062         .get_regs_len           = tg3_get_regs_len,
7063         .get_regs               = tg3_get_regs,
7064         .get_wol                = tg3_get_wol,
7065         .set_wol                = tg3_set_wol,
7066         .get_msglevel           = tg3_get_msglevel,
7067         .set_msglevel           = tg3_set_msglevel,
7068         .nway_reset             = tg3_nway_reset,
7069         .get_link               = ethtool_op_get_link,
7070         .get_eeprom_len         = tg3_get_eeprom_len,
7071         .get_eeprom             = tg3_get_eeprom,
7072         .set_eeprom             = tg3_set_eeprom,
7073         .get_ringparam          = tg3_get_ringparam,
7074         .set_ringparam          = tg3_set_ringparam,
7075         .get_pauseparam         = tg3_get_pauseparam,
7076         .set_pauseparam         = tg3_set_pauseparam,
7077         .get_rx_csum            = tg3_get_rx_csum,
7078         .set_rx_csum            = tg3_set_rx_csum,
7079         .get_tx_csum            = ethtool_op_get_tx_csum,
7080         .set_tx_csum            = tg3_set_tx_csum,
7081         .get_sg                 = ethtool_op_get_sg,
7082         .set_sg                 = ethtool_op_set_sg,
7083 #if TG3_TSO_SUPPORT != 0
7084         .get_tso                = ethtool_op_get_tso,
7085         .set_tso                = tg3_set_tso,
7086 #endif
7087         .get_strings            = tg3_get_strings,
7088         .get_stats_count        = tg3_get_stats_count,
7089         .get_ethtool_stats      = tg3_get_ethtool_stats,
7090 };
7091
7092 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
7093 {
7094         u32 cursize, val;
7095
7096         tp->nvram_size = EEPROM_CHIP_SIZE;
7097
7098         if (tg3_nvram_read(tp, 0, &val) != 0)
7099                 return;
7100
7101         if (swab32(val) != TG3_EEPROM_MAGIC)
7102                 return;
7103
7104         /*
7105          * Size the chip by reading offsets at increasing powers of two.
7106          * When we encounter our validation signature, we know the addressing
7107          * has wrapped around, and thus have our chip size.
7108          */
7109         cursize = 0x800;
7110
7111         while (cursize < tp->nvram_size) {
7112                 if (tg3_nvram_read(tp, cursize, &val) != 0)
7113                         return;
7114
7115                 if (swab32(val) == TG3_EEPROM_MAGIC)
7116                         break;
7117
7118                 cursize <<= 1;
7119         }
7120
7121         tp->nvram_size = cursize;
7122 }
7123                 
7124 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
7125 {
7126         u32 val;
7127
7128         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
7129                 if (val != 0) {
7130                         tp->nvram_size = (val >> 16) * 1024;
7131                         return;
7132                 }
7133         }
7134         tp->nvram_size = 0x20000;
7135 }
7136
7137 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
7138 {
7139         u32 nvcfg1;
7140
7141         nvcfg1 = tr32(NVRAM_CFG1);
7142         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
7143                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
7144         }
7145         else {
7146                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
7147                 tw32(NVRAM_CFG1, nvcfg1);
7148         }
7149
7150         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7151                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
7152                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
7153                                 tp->nvram_jedecnum = JEDEC_ATMEL;
7154                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
7155                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7156                                 break;
7157                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
7158                                 tp->nvram_jedecnum = JEDEC_ATMEL;
7159                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
7160                                 break;
7161                         case FLASH_VENDOR_ATMEL_EEPROM:
7162                                 tp->nvram_jedecnum = JEDEC_ATMEL;
7163                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
7164                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7165                                 break;
7166                         case FLASH_VENDOR_ST:
7167                                 tp->nvram_jedecnum = JEDEC_ST;
7168                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
7169                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7170                                 break;
7171                         case FLASH_VENDOR_SAIFUN:
7172                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
7173                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
7174                                 break;
7175                         case FLASH_VENDOR_SST_SMALL:
7176                         case FLASH_VENDOR_SST_LARGE:
7177                                 tp->nvram_jedecnum = JEDEC_SST;
7178                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
7179                                 break;
7180                 }
7181         }
7182         else {
7183                 tp->nvram_jedecnum = JEDEC_ATMEL;
7184                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
7185                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7186         }
7187 }
7188
7189 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
7190 {
7191         u32 nvcfg1;
7192
7193         nvcfg1 = tr32(NVRAM_CFG1);
7194
7195         /* NVRAM protection for TPM */
7196         if (nvcfg1 & (1 << 27))
7197                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
7198
7199         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
7200                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
7201                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
7202                         tp->nvram_jedecnum = JEDEC_ATMEL;
7203                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7204                         break;
7205                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
7206                         tp->nvram_jedecnum = JEDEC_ATMEL;
7207                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7208                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
7209                         break;
7210                 case FLASH_5752VENDOR_ST_M45PE10:
7211                 case FLASH_5752VENDOR_ST_M45PE20:
7212                 case FLASH_5752VENDOR_ST_M45PE40:
7213                         tp->nvram_jedecnum = JEDEC_ST;
7214                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7215                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
7216                         break;
7217         }
7218
7219         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
7220                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
7221                         case FLASH_5752PAGE_SIZE_256:
7222                                 tp->nvram_pagesize = 256;
7223                                 break;
7224                         case FLASH_5752PAGE_SIZE_512:
7225                                 tp->nvram_pagesize = 512;
7226                                 break;
7227                         case FLASH_5752PAGE_SIZE_1K:
7228                                 tp->nvram_pagesize = 1024;
7229                                 break;
7230                         case FLASH_5752PAGE_SIZE_2K:
7231                                 tp->nvram_pagesize = 2048;
7232                                 break;
7233                         case FLASH_5752PAGE_SIZE_4K:
7234                                 tp->nvram_pagesize = 4096;
7235                                 break;
7236                         case FLASH_5752PAGE_SIZE_264:
7237                                 tp->nvram_pagesize = 264;
7238                                 break;
7239                 }
7240         }
7241         else {
7242                 /* For eeprom, set pagesize to maximum eeprom size */
7243                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
7244
7245                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
7246                 tw32(NVRAM_CFG1, nvcfg1);
7247         }
7248 }
7249
7250 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
7251 static void __devinit tg3_nvram_init(struct tg3 *tp)
7252 {
7253         int j;
7254
7255         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
7256                 return;
7257
7258         tw32_f(GRC_EEPROM_ADDR,
7259              (EEPROM_ADDR_FSM_RESET |
7260               (EEPROM_DEFAULT_CLOCK_PERIOD <<
7261                EEPROM_ADDR_CLKPERD_SHIFT)));
7262
7263         /* XXX schedule_timeout() ... */
7264         for (j = 0; j < 100; j++)
7265                 udelay(10);
7266
7267         /* Enable seeprom accesses. */
7268         tw32_f(GRC_LOCAL_CTRL,
7269              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
7270         udelay(100);
7271
7272         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
7273             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
7274                 tp->tg3_flags |= TG3_FLAG_NVRAM;
7275
7276                 tg3_enable_nvram_access(tp);
7277
7278                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7279                         tg3_get_5752_nvram_info(tp);
7280                 else
7281                         tg3_get_nvram_info(tp);
7282
7283                 tg3_get_nvram_size(tp);
7284
7285                 tg3_disable_nvram_access(tp);
7286
7287         } else {
7288                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
7289
7290                 tg3_get_eeprom_size(tp);
7291         }
7292 }
7293
7294 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
7295                                         u32 offset, u32 *val)
7296 {
7297         u32 tmp;
7298         int i;
7299
7300         if (offset > EEPROM_ADDR_ADDR_MASK ||
7301             (offset % 4) != 0)
7302                 return -EINVAL;
7303
7304         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
7305                                         EEPROM_ADDR_DEVID_MASK |
7306                                         EEPROM_ADDR_READ);
7307         tw32(GRC_EEPROM_ADDR,
7308              tmp |
7309              (0 << EEPROM_ADDR_DEVID_SHIFT) |
7310              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
7311               EEPROM_ADDR_ADDR_MASK) |
7312              EEPROM_ADDR_READ | EEPROM_ADDR_START);
7313
7314         for (i = 0; i < 10000; i++) {
7315                 tmp = tr32(GRC_EEPROM_ADDR);
7316
7317                 if (tmp & EEPROM_ADDR_COMPLETE)
7318                         break;
7319                 udelay(100);
7320         }
7321         if (!(tmp & EEPROM_ADDR_COMPLETE))
7322                 return -EBUSY;
7323
7324         *val = tr32(GRC_EEPROM_DATA);
7325         return 0;
7326 }
7327
7328 #define NVRAM_CMD_TIMEOUT 10000
7329
7330 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
7331 {
7332         int i;
7333
7334         tw32(NVRAM_CMD, nvram_cmd);
7335         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
7336                 udelay(10);
7337                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
7338                         udelay(10);
7339                         break;
7340                 }
7341         }
7342         if (i == NVRAM_CMD_TIMEOUT) {
7343                 return -EBUSY;
7344         }
7345         return 0;
7346 }
7347
7348 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
7349 {
7350         int ret;
7351
7352         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7353                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
7354                 return -EINVAL;
7355         }
7356
7357         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
7358                 return tg3_nvram_read_using_eeprom(tp, offset, val);
7359
7360         if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
7361                 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
7362                 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
7363
7364                 offset = ((offset / tp->nvram_pagesize) <<
7365                           ATMEL_AT45DB0X1B_PAGE_POS) +
7366                         (offset % tp->nvram_pagesize);
7367         }
7368
7369         if (offset > NVRAM_ADDR_MSK)
7370                 return -EINVAL;
7371
7372         tg3_nvram_lock(tp);
7373
7374         tg3_enable_nvram_access(tp);
7375
7376         tw32(NVRAM_ADDR, offset);
7377         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
7378                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
7379
7380         if (ret == 0)
7381                 *val = swab32(tr32(NVRAM_RDDATA));
7382
7383         tg3_nvram_unlock(tp);
7384
7385         tg3_disable_nvram_access(tp);
7386
7387         return ret;
7388 }
7389
7390 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
7391                                     u32 offset, u32 len, u8 *buf)
7392 {
7393         int i, j, rc = 0;
7394         u32 val;
7395
7396         for (i = 0; i < len; i += 4) {
7397                 u32 addr, data;
7398
7399                 addr = offset + i;
7400
7401                 memcpy(&data, buf + i, 4);
7402
7403                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
7404
7405                 val = tr32(GRC_EEPROM_ADDR);
7406                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
7407
7408                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
7409                         EEPROM_ADDR_READ);
7410                 tw32(GRC_EEPROM_ADDR, val |
7411                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
7412                         (addr & EEPROM_ADDR_ADDR_MASK) |
7413                         EEPROM_ADDR_START |
7414                         EEPROM_ADDR_WRITE);
7415                 
7416                 for (j = 0; j < 10000; j++) {
7417                         val = tr32(GRC_EEPROM_ADDR);
7418
7419                         if (val & EEPROM_ADDR_COMPLETE)
7420                                 break;
7421                         udelay(100);
7422                 }
7423                 if (!(val & EEPROM_ADDR_COMPLETE)) {
7424                         rc = -EBUSY;
7425                         break;
7426                 }
7427         }
7428
7429         return rc;
7430 }
7431
7432 /* offset and length are dword aligned */
7433 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
7434                 u8 *buf)
7435 {
7436         int ret = 0;
7437         u32 pagesize = tp->nvram_pagesize;
7438         u32 pagemask = pagesize - 1;
7439         u32 nvram_cmd;
7440         u8 *tmp;
7441
7442         tmp = kmalloc(pagesize, GFP_KERNEL);
7443         if (tmp == NULL)
7444                 return -ENOMEM;
7445
7446         while (len) {
7447                 int j;
7448                 u32 phy_addr, page_off, size;
7449
7450                 phy_addr = offset & ~pagemask;
7451         
7452                 for (j = 0; j < pagesize; j += 4) {
7453                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
7454                                                 (u32 *) (tmp + j))))
7455                                 break;
7456                 }
7457                 if (ret)
7458                         break;
7459
7460                 page_off = offset & pagemask;
7461                 size = pagesize;
7462                 if (len < size)
7463                         size = len;
7464
7465                 len -= size;
7466
7467                 memcpy(tmp + page_off, buf, size);
7468
7469                 offset = offset + (pagesize - page_off);
7470
7471                 tg3_enable_nvram_access(tp);
7472
7473                 /*
7474                  * Before we can erase the flash page, we need
7475                  * to issue a special "write enable" command.
7476                  */
7477                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7478
7479                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7480                         break;
7481
7482                 /* Erase the target page */
7483                 tw32(NVRAM_ADDR, phy_addr);
7484
7485                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
7486                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
7487
7488                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7489                         break;
7490
7491                 /* Issue another write enable to start the write. */
7492                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7493
7494                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7495                         break;
7496
7497                 for (j = 0; j < pagesize; j += 4) {
7498                         u32 data;
7499
7500                         data = *((u32 *) (tmp + j));
7501                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
7502
7503                         tw32(NVRAM_ADDR, phy_addr + j);
7504
7505                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
7506                                 NVRAM_CMD_WR;
7507
7508                         if (j == 0)
7509                                 nvram_cmd |= NVRAM_CMD_FIRST;
7510                         else if (j == (pagesize - 4))
7511                                 nvram_cmd |= NVRAM_CMD_LAST;
7512
7513                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
7514                                 break;
7515                 }
7516                 if (ret)
7517                         break;
7518         }
7519
7520         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7521         tg3_nvram_exec_cmd(tp, nvram_cmd);
7522
7523         kfree(tmp);
7524
7525         return ret;
7526 }
7527
7528 /* offset and length are dword aligned */
7529 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
7530                 u8 *buf)
7531 {
7532         int i, ret = 0;
7533
7534         for (i = 0; i < len; i += 4, offset += 4) {
7535                 u32 data, page_off, phy_addr, nvram_cmd;
7536
7537                 memcpy(&data, buf + i, 4);
7538                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
7539
7540                 page_off = offset % tp->nvram_pagesize;
7541
7542                 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
7543                         (tp->nvram_jedecnum == JEDEC_ATMEL)) {
7544
7545                         phy_addr = ((offset / tp->nvram_pagesize) <<
7546                                     ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
7547                 }
7548                 else {
7549                         phy_addr = offset;
7550                 }
7551
7552                 tw32(NVRAM_ADDR, phy_addr);
7553
7554                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
7555
7556                 if ((page_off == 0) || (i == 0))
7557                         nvram_cmd |= NVRAM_CMD_FIRST;
7558                 else if (page_off == (tp->nvram_pagesize - 4))
7559                         nvram_cmd |= NVRAM_CMD_LAST;
7560
7561                 if (i == (len - 4))
7562                         nvram_cmd |= NVRAM_CMD_LAST;
7563
7564                 if ((tp->nvram_jedecnum == JEDEC_ST) &&
7565                         (nvram_cmd & NVRAM_CMD_FIRST)) {
7566
7567                         if ((ret = tg3_nvram_exec_cmd(tp,
7568                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
7569                                 NVRAM_CMD_DONE)))
7570
7571                                 break;
7572                 }
7573                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
7574                         /* We always do complete word writes to eeprom. */
7575                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
7576                 }
7577
7578                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
7579                         break;
7580         }
7581         return ret;
7582 }
7583
7584 /* offset and length are dword aligned */
7585 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
7586 {
7587         int ret;
7588
7589         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7590                 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
7591                 return -EINVAL;
7592         }
7593
7594         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
7595                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
7596                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
7597                 udelay(40);
7598         }
7599
7600         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
7601                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
7602         }
7603         else {
7604                 u32 grc_mode;
7605
7606                 tg3_nvram_lock(tp);
7607
7608                 tg3_enable_nvram_access(tp);
7609                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
7610                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
7611                         tw32(NVRAM_WRITE1, 0x406);
7612
7613                 grc_mode = tr32(GRC_MODE);
7614                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
7615
7616                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
7617                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
7618
7619                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
7620                                 buf);
7621                 }
7622                 else {
7623                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
7624                                 buf);
7625                 }
7626
7627                 grc_mode = tr32(GRC_MODE);
7628                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
7629
7630                 tg3_disable_nvram_access(tp);
7631                 tg3_nvram_unlock(tp);
7632         }
7633
7634         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
7635                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7636                 udelay(40);
7637         }
7638
7639         return ret;
7640 }
7641
7642 struct subsys_tbl_ent {
7643         u16 subsys_vendor, subsys_devid;
7644         u32 phy_id;
7645 };
7646
7647 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
7648         /* Broadcom boards. */
7649         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
7650         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
7651         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
7652         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
7653         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
7654         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
7655         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
7656         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
7657         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
7658         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
7659         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
7660
7661         /* 3com boards. */
7662         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
7663         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
7664         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
7665         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
7666         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
7667
7668         /* DELL boards. */
7669         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
7670         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
7671         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
7672         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
7673
7674         /* Compaq boards. */
7675         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
7676         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
7677         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
7678         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
7679         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
7680
7681         /* IBM boards. */
7682         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
7683 };
7684
7685 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
7686 {
7687         int i;
7688
7689         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
7690                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
7691                      tp->pdev->subsystem_vendor) &&
7692                     (subsys_id_to_phy_id[i].subsys_devid ==
7693                      tp->pdev->subsystem_device))
7694                         return &subsys_id_to_phy_id[i];
7695         }
7696         return NULL;
7697 }
7698
7699 /* Since this function may be called in D3-hot power state during
7700  * tg3_init_one(), only config cycles are allowed.
7701  */
7702 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
7703 {
7704         u32 val;
7705
7706         /* Make sure register accesses (indirect or otherwise)
7707          * will function correctly.
7708          */
7709         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7710                                tp->misc_host_ctrl);
7711
7712         tp->phy_id = PHY_ID_INVALID;
7713         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7714
7715         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7716         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7717                 u32 nic_cfg, led_cfg;
7718                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
7719                 int eeprom_phy_serdes = 0;
7720
7721                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7722                 tp->nic_sram_data_cfg = nic_cfg;
7723
7724                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
7725                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
7726                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
7727                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
7728                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
7729                     (ver > 0) && (ver < 0x100))
7730                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
7731
7732                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
7733                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
7734                         eeprom_phy_serdes = 1;
7735
7736                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
7737                 if (nic_phy_id != 0) {
7738                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
7739                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
7740
7741                         eeprom_phy_id  = (id1 >> 16) << 10;
7742                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
7743                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
7744                 } else
7745                         eeprom_phy_id = 0;
7746
7747                 tp->phy_id = eeprom_phy_id;
7748                 if (eeprom_phy_serdes)
7749                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7750
7751                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
7752                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
7753                                     SHASTA_EXT_LED_MODE_MASK);
7754                 else
7755                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
7756
7757                 switch (led_cfg) {
7758                 default:
7759                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
7760                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7761                         break;
7762
7763                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
7764                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
7765                         break;
7766
7767                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
7768                         tp->led_ctrl = LED_CTRL_MODE_MAC;
7769                         break;
7770
7771                 case SHASTA_EXT_LED_SHARED:
7772                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
7773                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7774                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
7775                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
7776                                                  LED_CTRL_MODE_PHY_2);
7777                         break;
7778
7779                 case SHASTA_EXT_LED_MAC:
7780                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
7781                         break;
7782
7783                 case SHASTA_EXT_LED_COMBO:
7784                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
7785                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
7786                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
7787                                                  LED_CTRL_MODE_PHY_2);
7788                         break;
7789
7790                 };
7791
7792                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7793                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
7794                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
7795                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
7796
7797                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
7798                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
7799                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
7800                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
7801
7802                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7803                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
7804                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
7805                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
7806                 }
7807                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
7808                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
7809
7810                 if (cfg2 & (1 << 17))
7811                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
7812
7813                 /* serdes signal pre-emphasis in register 0x590 set by */
7814                 /* bootcode if bit 18 is set */
7815                 if (cfg2 & (1 << 18))
7816                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
7817         }
7818 }
7819
7820 static int __devinit tg3_phy_probe(struct tg3 *tp)
7821 {
7822         u32 hw_phy_id_1, hw_phy_id_2;
7823         u32 hw_phy_id, hw_phy_id_masked;
7824         int err;
7825
7826         /* Reading the PHY ID register can conflict with ASF
7827          * firwmare access to the PHY hardware.
7828          */
7829         err = 0;
7830         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7831                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
7832         } else {
7833                 /* Now read the physical PHY_ID from the chip and verify
7834                  * that it is sane.  If it doesn't look good, we fall back
7835                  * to either the hard-coded table based PHY_ID and failing
7836                  * that the value found in the eeprom area.
7837                  */
7838                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
7839                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
7840
7841                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
7842                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
7843                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
7844
7845                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
7846         }
7847
7848         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
7849                 tp->phy_id = hw_phy_id;
7850                 if (hw_phy_id_masked == PHY_ID_BCM8002)
7851                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7852         } else {
7853                 if (tp->phy_id != PHY_ID_INVALID) {
7854                         /* Do nothing, phy ID already set up in
7855                          * tg3_get_eeprom_hw_cfg().
7856                          */
7857                 } else {
7858                         struct subsys_tbl_ent *p;
7859
7860                         /* No eeprom signature?  Try the hardcoded
7861                          * subsys device table.
7862                          */
7863                         p = lookup_by_subsys(tp);
7864                         if (!p)
7865                                 return -ENODEV;
7866
7867                         tp->phy_id = p->phy_id;
7868                         if (!tp->phy_id ||
7869                             tp->phy_id == PHY_ID_BCM8002)
7870                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7871                 }
7872         }
7873
7874         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7875             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
7876                 u32 bmsr, adv_reg, tg3_ctrl;
7877
7878                 tg3_readphy(tp, MII_BMSR, &bmsr);
7879                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
7880                     (bmsr & BMSR_LSTATUS))
7881                         goto skip_phy_reset;
7882                     
7883                 err = tg3_phy_reset(tp);
7884                 if (err)
7885                         return err;
7886
7887                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
7888                            ADVERTISE_100HALF | ADVERTISE_100FULL |
7889                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
7890                 tg3_ctrl = 0;
7891                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
7892                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
7893                                     MII_TG3_CTRL_ADV_1000_FULL);
7894                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
7895                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
7896                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
7897                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
7898                 }
7899
7900                 if (!tg3_copper_is_advertising_all(tp)) {
7901                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
7902
7903                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7904                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
7905
7906                         tg3_writephy(tp, MII_BMCR,
7907                                      BMCR_ANENABLE | BMCR_ANRESTART);
7908                 }
7909                 tg3_phy_set_wirespeed(tp);
7910
7911                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
7912                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7913                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
7914         }
7915
7916 skip_phy_reset:
7917         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
7918                 err = tg3_init_5401phy_dsp(tp);
7919                 if (err)
7920                         return err;
7921         }
7922
7923         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
7924                 err = tg3_init_5401phy_dsp(tp);
7925         }
7926
7927         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7928                 tp->link_config.advertising =
7929                         (ADVERTISED_1000baseT_Half |
7930                          ADVERTISED_1000baseT_Full |
7931                          ADVERTISED_Autoneg |
7932                          ADVERTISED_FIBRE);
7933         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
7934                 tp->link_config.advertising &=
7935                         ~(ADVERTISED_1000baseT_Half |
7936                           ADVERTISED_1000baseT_Full);
7937
7938         return err;
7939 }
7940
7941 static void __devinit tg3_read_partno(struct tg3 *tp)
7942 {
7943         unsigned char vpd_data[256];
7944         int i;
7945
7946         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7947                 /* Sun decided not to put the necessary bits in the
7948                  * NVRAM of their onboard tg3 parts :(
7949                  */
7950                 strcpy(tp->board_part_number, "Sun 570X");
7951                 return;
7952         }
7953
7954         for (i = 0; i < 256; i += 4) {
7955                 u32 tmp;
7956
7957                 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
7958                         goto out_not_found;
7959
7960                 vpd_data[i + 0] = ((tmp >>  0) & 0xff);
7961                 vpd_data[i + 1] = ((tmp >>  8) & 0xff);
7962                 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
7963                 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
7964         }
7965
7966         /* Now parse and find the part number. */
7967         for (i = 0; i < 256; ) {
7968                 unsigned char val = vpd_data[i];
7969                 int block_end;
7970
7971                 if (val == 0x82 || val == 0x91) {
7972                         i = (i + 3 +
7973                              (vpd_data[i + 1] +
7974                               (vpd_data[i + 2] << 8)));
7975                         continue;
7976                 }
7977
7978                 if (val != 0x90)
7979                         goto out_not_found;
7980
7981                 block_end = (i + 3 +
7982                              (vpd_data[i + 1] +
7983                               (vpd_data[i + 2] << 8)));
7984                 i += 3;
7985                 while (i < block_end) {
7986                         if (vpd_data[i + 0] == 'P' &&
7987                             vpd_data[i + 1] == 'N') {
7988                                 int partno_len = vpd_data[i + 2];
7989
7990                                 if (partno_len > 24)
7991                                         goto out_not_found;
7992
7993                                 memcpy(tp->board_part_number,
7994                                        &vpd_data[i + 3],
7995                                        partno_len);
7996
7997                                 /* Success. */
7998                                 return;
7999                         }
8000                 }
8001
8002                 /* Part number not found. */
8003                 goto out_not_found;
8004         }
8005
8006 out_not_found:
8007         strcpy(tp->board_part_number, "none");
8008 }
8009
8010 #ifdef CONFIG_SPARC64
8011 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
8012 {
8013         struct pci_dev *pdev = tp->pdev;
8014         struct pcidev_cookie *pcp = pdev->sysdata;
8015
8016         if (pcp != NULL) {
8017                 int node = pcp->prom_node;
8018                 u32 venid;
8019                 int err;
8020
8021                 err = prom_getproperty(node, "subsystem-vendor-id",
8022                                        (char *) &venid, sizeof(venid));
8023                 if (err == 0 || err == -1)
8024                         return 0;
8025                 if (venid == PCI_VENDOR_ID_SUN)
8026                         return 1;
8027         }
8028         return 0;
8029 }
8030 #endif
8031
8032 static int __devinit tg3_get_invariants(struct tg3 *tp)
8033 {
8034         static struct pci_device_id write_reorder_chipsets[] = {
8035                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8036                              PCI_DEVICE_ID_INTEL_82801AA_8) },
8037                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8038                              PCI_DEVICE_ID_INTEL_82801AB_8) },
8039                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8040                              PCI_DEVICE_ID_INTEL_82801BA_11) },
8041                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8042                              PCI_DEVICE_ID_INTEL_82801BA_6) },
8043                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
8044                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
8045                 { },
8046         };
8047         u32 misc_ctrl_reg;
8048         u32 cacheline_sz_reg;
8049         u32 pci_state_reg, grc_misc_cfg;
8050         u32 val;
8051         u16 pci_cmd;
8052         int err;
8053
8054 #ifdef CONFIG_SPARC64
8055         if (tg3_is_sun_570X(tp))
8056                 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
8057 #endif
8058
8059         /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write
8060          * reordering to the mailbox registers done by the host
8061          * controller can cause major troubles.  We read back from
8062          * every mailbox register write to force the writes to be
8063          * posted to the chip in order.
8064          */
8065         if (pci_dev_present(write_reorder_chipsets))
8066                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
8067
8068         /* Force memory write invalidate off.  If we leave it on,
8069          * then on 5700_BX chips we have to enable a workaround.
8070          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
8071          * to match the cacheline size.  The Broadcom driver have this
8072          * workaround but turns MWI off all the times so never uses
8073          * it.  This seems to suggest that the workaround is insufficient.
8074          */
8075         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8076         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
8077         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8078
8079         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
8080          * has the register indirect write enable bit set before
8081          * we try to access any of the MMIO registers.  It is also
8082          * critical that the PCI-X hw workaround situation is decided
8083          * before that as well.
8084          */
8085         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8086                               &misc_ctrl_reg);
8087
8088         tp->pci_chip_rev_id = (misc_ctrl_reg >>
8089                                MISC_HOST_CTRL_CHIPREV_SHIFT);
8090
8091         /* Wrong chip ID in 5752 A0. This code can be removed later
8092          * as A0 is not in production.
8093          */
8094         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
8095                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
8096
8097         /* Initialize misc host control in PCI block. */
8098         tp->misc_host_ctrl |= (misc_ctrl_reg &
8099                                MISC_HOST_CTRL_CHIPREV);
8100         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8101                                tp->misc_host_ctrl);
8102
8103         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
8104                               &cacheline_sz_reg);
8105
8106         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
8107         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
8108         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
8109         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
8110
8111         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8112             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8113                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
8114
8115         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
8116             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
8117                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
8118
8119         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8120                 tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
8121
8122         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
8123                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
8124
8125         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
8126             tp->pci_lat_timer < 64) {
8127                 tp->pci_lat_timer = 64;
8128
8129                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
8130                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
8131                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
8132                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
8133
8134                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
8135                                        cacheline_sz_reg);
8136         }
8137
8138         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
8139                               &pci_state_reg);
8140
8141         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
8142                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
8143
8144                 /* If this is a 5700 BX chipset, and we are in PCI-X
8145                  * mode, enable register write workaround.
8146                  *
8147                  * The workaround is to use indirect register accesses
8148                  * for all chip writes not to mailbox registers.
8149                  */
8150                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
8151                         u32 pm_reg;
8152                         u16 pci_cmd;
8153
8154                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
8155
8156                         /* The chip can have it's power management PCI config
8157                          * space registers clobbered due to this bug.
8158                          * So explicitly force the chip into D0 here.
8159                          */
8160                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
8161                                               &pm_reg);
8162                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
8163                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
8164                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
8165                                                pm_reg);
8166
8167                         /* Also, force SERR#/PERR# in PCI command. */
8168                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8169                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
8170                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8171                 }
8172         }
8173
8174         /* Back to back register writes can cause problems on this chip,
8175          * the workaround is to read back all reg writes except those to
8176          * mailbox regs.  See tg3_write_indirect_reg32().
8177          *
8178          * PCI Express 5750_A0 rev chips need this workaround too.
8179          */
8180         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
8181             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
8182              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
8183                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
8184
8185         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
8186                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
8187         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
8188                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
8189
8190         /* Chip-specific fixup from Broadcom driver */
8191         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
8192             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
8193                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
8194                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
8195         }
8196
8197         /* Get eeprom hw config before calling tg3_set_power_state().
8198          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
8199          * determined before calling tg3_set_power_state() so that
8200          * we know whether or not to switch out of Vaux power.
8201          * When the flag is set, it means that GPIO1 is used for eeprom
8202          * write protect and also implies that it is a LOM where GPIOs
8203          * are not used to switch power.
8204          */ 
8205         tg3_get_eeprom_hw_cfg(tp);
8206
8207         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
8208          * GPIO1 driven high will bring 5700's external PHY out of reset.
8209          * It is also used as eeprom write protect on LOMs.
8210          */
8211         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
8212         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
8213             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
8214                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8215                                        GRC_LCLCTRL_GPIO_OUTPUT1);
8216         /* Unused GPIO3 must be driven as output on 5752 because there
8217          * are no pull-up resistors on unused GPIO pins.
8218          */
8219         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8220                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
8221
8222         /* Force the chip into D0. */
8223         err = tg3_set_power_state(tp, 0);
8224         if (err) {
8225                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
8226                        pci_name(tp->pdev));
8227                 return err;
8228         }
8229
8230         /* 5700 B0 chips do not support checksumming correctly due
8231          * to hardware bugs.
8232          */
8233         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
8234                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
8235
8236         /* Pseudo-header checksum is done by hardware logic and not
8237          * the offload processers, so make the chip do the pseudo-
8238          * header checksums on receive.  For transmit it is more
8239          * convenient to do the pseudo-header checksum in software
8240          * as Linux does that on transmit for us in all cases.
8241          */
8242         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
8243         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
8244
8245         /* Derive initial jumbo mode from MTU assigned in
8246          * ether_setup() via the alloc_etherdev() call
8247          */
8248         if (tp->dev->mtu > ETH_DATA_LEN)
8249                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
8250
8251         /* Determine WakeOnLan speed to use. */
8252         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8253             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
8254             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
8255             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
8256                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
8257         } else {
8258                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
8259         }
8260
8261         /* A few boards don't want Ethernet@WireSpeed phy feature */
8262         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
8263             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
8264              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
8265              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)))
8266                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
8267
8268         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
8269             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
8270                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
8271         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
8272                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
8273
8274         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8275                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
8276
8277         /* Only 5701 and later support tagged irq status mode.
8278          * Also, 5788 chips cannot use tagged irq status.
8279          *
8280          * However, since we are using NAPI avoid tagged irq status
8281          * because the interrupt condition is more difficult to
8282          * fully clear in that mode.
8283          */
8284         tp->coalesce_mode = 0;
8285
8286         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
8287             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
8288                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
8289
8290         /* Initialize MAC MI mode, polling disabled. */
8291         tw32_f(MAC_MI_MODE, tp->mi_mode);
8292         udelay(80);
8293
8294         /* Initialize data/descriptor byte/word swapping. */
8295         val = tr32(GRC_MODE);
8296         val &= GRC_MODE_HOST_STACKUP;
8297         tw32(GRC_MODE, val | tp->grc_mode);
8298
8299         tg3_switch_clocks(tp);
8300
8301         /* Clear this out for sanity. */
8302         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8303
8304         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
8305                               &pci_state_reg);
8306         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
8307             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
8308                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
8309
8310                 if (chiprevid == CHIPREV_ID_5701_A0 ||
8311                     chiprevid == CHIPREV_ID_5701_B0 ||
8312                     chiprevid == CHIPREV_ID_5701_B2 ||
8313                     chiprevid == CHIPREV_ID_5701_B5) {
8314                         void __iomem *sram_base;
8315
8316                         /* Write some dummy words into the SRAM status block
8317                          * area, see if it reads back correctly.  If the return
8318                          * value is bad, force enable the PCIX workaround.
8319                          */
8320                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
8321
8322                         writel(0x00000000, sram_base);
8323                         writel(0x00000000, sram_base + 4);
8324                         writel(0xffffffff, sram_base + 4);
8325                         if (readl(sram_base) != 0x00000000)
8326                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
8327                 }
8328         }
8329
8330         udelay(50);
8331         tg3_nvram_init(tp);
8332
8333         grc_misc_cfg = tr32(GRC_MISC_CFG);
8334         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
8335
8336         /* Broadcom's driver says that CIOBE multisplit has a bug */
8337 #if 0
8338         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8339             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
8340                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
8341                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
8342         }
8343 #endif
8344         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8345             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
8346              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
8347                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
8348
8349         /* these are limited to 10/100 only */
8350         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
8351              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
8352             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8353              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
8354              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
8355               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
8356               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
8357             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
8358              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
8359               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
8360                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
8361
8362         err = tg3_phy_probe(tp);
8363         if (err) {
8364                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
8365                        pci_name(tp->pdev), err);
8366                 /* ... but do not return immediately ... */
8367         }
8368
8369         tg3_read_partno(tp);
8370
8371         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
8372                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
8373         } else {
8374                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
8375                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
8376                 else
8377                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
8378         }
8379
8380         /* 5700 {AX,BX} chips have a broken status block link
8381          * change bit implementation, so we must use the
8382          * status register in those cases.
8383          */
8384         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
8385                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
8386         else
8387                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
8388
8389         /* The led_ctrl is set during tg3_phy_probe, here we might
8390          * have to force the link status polling mechanism based
8391          * upon subsystem IDs.
8392          */
8393         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
8394             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8395                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
8396                                   TG3_FLAG_USE_LINKCHG_REG);
8397         }
8398
8399         /* For all SERDES we poll the MAC status register. */
8400         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8401                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
8402         else
8403                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
8404
8405         /* 5700 BX chips need to have their TX producer index mailboxes
8406          * written twice to workaround a bug.
8407          */
8408         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
8409                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
8410         else
8411                 tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG;
8412
8413         /* It seems all chips can get confused if TX buffers
8414          * straddle the 4GB address boundary in some cases.
8415          */
8416         tp->dev->hard_start_xmit = tg3_start_xmit;
8417
8418         tp->rx_offset = 2;
8419         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
8420             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
8421                 tp->rx_offset = 0;
8422
8423         /* By default, disable wake-on-lan.  User can change this
8424          * using ETHTOOL_SWOL.
8425          */
8426         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8427
8428         return err;
8429 }
8430
8431 #ifdef CONFIG_SPARC64
8432 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
8433 {
8434         struct net_device *dev = tp->dev;
8435         struct pci_dev *pdev = tp->pdev;
8436         struct pcidev_cookie *pcp = pdev->sysdata;
8437
8438         if (pcp != NULL) {
8439                 int node = pcp->prom_node;
8440
8441                 if (prom_getproplen(node, "local-mac-address") == 6) {
8442                         prom_getproperty(node, "local-mac-address",
8443                                          dev->dev_addr, 6);
8444                         return 0;
8445                 }
8446         }
8447         return -ENODEV;
8448 }
8449
8450 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
8451 {
8452         struct net_device *dev = tp->dev;
8453
8454         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
8455         return 0;
8456 }
8457 #endif
8458
8459 static int __devinit tg3_get_device_address(struct tg3 *tp)
8460 {
8461         struct net_device *dev = tp->dev;
8462         u32 hi, lo, mac_offset;
8463
8464 #ifdef CONFIG_SPARC64
8465         if (!tg3_get_macaddr_sparc(tp))
8466                 return 0;
8467 #endif
8468
8469         mac_offset = 0x7c;
8470         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8471             !(tp->tg3_flags & TG3_FLG2_SUN_570X)) {
8472                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
8473                         mac_offset = 0xcc;
8474                 if (tg3_nvram_lock(tp))
8475                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
8476                 else
8477                         tg3_nvram_unlock(tp);
8478         }
8479
8480         /* First try to get it from MAC address mailbox. */
8481         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
8482         if ((hi >> 16) == 0x484b) {
8483                 dev->dev_addr[0] = (hi >>  8) & 0xff;
8484                 dev->dev_addr[1] = (hi >>  0) & 0xff;
8485
8486                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
8487                 dev->dev_addr[2] = (lo >> 24) & 0xff;
8488                 dev->dev_addr[3] = (lo >> 16) & 0xff;
8489                 dev->dev_addr[4] = (lo >>  8) & 0xff;
8490                 dev->dev_addr[5] = (lo >>  0) & 0xff;
8491         }
8492         /* Next, try NVRAM. */
8493         else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
8494                  !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
8495                  !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
8496                 dev->dev_addr[0] = ((hi >> 16) & 0xff);
8497                 dev->dev_addr[1] = ((hi >> 24) & 0xff);
8498                 dev->dev_addr[2] = ((lo >>  0) & 0xff);
8499                 dev->dev_addr[3] = ((lo >>  8) & 0xff);
8500                 dev->dev_addr[4] = ((lo >> 16) & 0xff);
8501                 dev->dev_addr[5] = ((lo >> 24) & 0xff);
8502         }
8503         /* Finally just fetch it out of the MAC control regs. */
8504         else {
8505                 hi = tr32(MAC_ADDR_0_HIGH);
8506                 lo = tr32(MAC_ADDR_0_LOW);
8507
8508                 dev->dev_addr[5] = lo & 0xff;
8509                 dev->dev_addr[4] = (lo >> 8) & 0xff;
8510                 dev->dev_addr[3] = (lo >> 16) & 0xff;
8511                 dev->dev_addr[2] = (lo >> 24) & 0xff;
8512                 dev->dev_addr[1] = hi & 0xff;
8513                 dev->dev_addr[0] = (hi >> 8) & 0xff;
8514         }
8515
8516         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
8517 #ifdef CONFIG_SPARC64
8518                 if (!tg3_get_default_macaddr_sparc(tp))
8519                         return 0;
8520 #endif
8521                 return -EINVAL;
8522         }
8523         return 0;
8524 }
8525
8526 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
8527 {
8528         struct tg3_internal_buffer_desc test_desc;
8529         u32 sram_dma_descs;
8530         int i, ret;
8531
8532         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
8533
8534         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
8535         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
8536         tw32(RDMAC_STATUS, 0);
8537         tw32(WDMAC_STATUS, 0);
8538
8539         tw32(BUFMGR_MODE, 0);
8540         tw32(FTQ_RESET, 0);
8541
8542         test_desc.addr_hi = ((u64) buf_dma) >> 32;
8543         test_desc.addr_lo = buf_dma & 0xffffffff;
8544         test_desc.nic_mbuf = 0x00002100;
8545         test_desc.len = size;
8546
8547         /*
8548          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
8549          * the *second* time the tg3 driver was getting loaded after an
8550          * initial scan.
8551          *
8552          * Broadcom tells me:
8553          *   ...the DMA engine is connected to the GRC block and a DMA
8554          *   reset may affect the GRC block in some unpredictable way...
8555          *   The behavior of resets to individual blocks has not been tested.
8556          *
8557          * Broadcom noted the GRC reset will also reset all sub-components.
8558          */
8559         if (to_device) {
8560                 test_desc.cqid_sqid = (13 << 8) | 2;
8561
8562                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
8563                 udelay(40);
8564         } else {
8565                 test_desc.cqid_sqid = (16 << 8) | 7;
8566
8567                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
8568                 udelay(40);
8569         }
8570         test_desc.flags = 0x00000005;
8571
8572         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
8573                 u32 val;
8574
8575                 val = *(((u32 *)&test_desc) + i);
8576                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
8577                                        sram_dma_descs + (i * sizeof(u32)));
8578                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
8579         }
8580         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
8581
8582         if (to_device) {
8583                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
8584         } else {
8585                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
8586         }
8587
8588         ret = -ENODEV;
8589         for (i = 0; i < 40; i++) {
8590                 u32 val;
8591
8592                 if (to_device)
8593                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
8594                 else
8595                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
8596                 if ((val & 0xffff) == sram_dma_descs) {
8597                         ret = 0;
8598                         break;
8599                 }
8600
8601                 udelay(100);
8602         }
8603
8604         return ret;
8605 }
8606
8607 #define TEST_BUFFER_SIZE        0x400
8608
8609 static int __devinit tg3_test_dma(struct tg3 *tp)
8610 {
8611         dma_addr_t buf_dma;
8612         u32 *buf;
8613         int ret;
8614
8615         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
8616         if (!buf) {
8617                 ret = -ENOMEM;
8618                 goto out_nofree;
8619         }
8620
8621         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
8622                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
8623
8624 #ifndef CONFIG_X86
8625         {
8626                 u8 byte;
8627                 int cacheline_size;
8628                 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
8629
8630                 if (byte == 0)
8631                         cacheline_size = 1024;
8632                 else
8633                         cacheline_size = (int) byte * 4;
8634
8635                 switch (cacheline_size) {
8636                 case 16:
8637                 case 32:
8638                 case 64:
8639                 case 128:
8640                         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8641                             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
8642                                 tp->dma_rwctrl |=
8643                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX;
8644                                 break;
8645                         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8646                                 tp->dma_rwctrl &=
8647                                         ~(DMA_RWCTRL_PCI_WRITE_CMD);
8648                                 tp->dma_rwctrl |=
8649                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
8650                                 break;
8651                         }
8652                         /* fallthrough */
8653                 case 256:
8654                         if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8655                             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8656                                 tp->dma_rwctrl |=
8657                                         DMA_RWCTRL_WRITE_BNDRY_256;
8658                         else if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8659                                 tp->dma_rwctrl |=
8660                                         DMA_RWCTRL_WRITE_BNDRY_256_PCIX;
8661                 };
8662         }
8663 #endif
8664
8665         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8666                 /* DMA read watermark not used on PCIE */
8667                 tp->dma_rwctrl |= 0x00180000;
8668         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
8669                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
8670                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
8671                         tp->dma_rwctrl |= 0x003f0000;
8672                 else
8673                         tp->dma_rwctrl |= 0x003f000f;
8674         } else {
8675                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
8676                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8677                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
8678
8679                         if (ccval == 0x6 || ccval == 0x7)
8680                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
8681
8682                         /* Set bit 23 to renable PCIX hw bug fix */
8683                         tp->dma_rwctrl |= 0x009f0000;
8684                 } else {
8685                         tp->dma_rwctrl |= 0x001b000f;
8686                 }
8687         }
8688
8689         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
8690             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8691                 tp->dma_rwctrl &= 0xfffffff0;
8692
8693         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8694             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
8695                 /* Remove this if it causes problems for some boards. */
8696                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
8697
8698                 /* On 5700/5701 chips, we need to set this bit.
8699                  * Otherwise the chip will issue cacheline transactions
8700                  * to streamable DMA memory with not all the byte
8701                  * enables turned on.  This is an error on several
8702                  * RISC PCI controllers, in particular sparc64.
8703                  *
8704                  * On 5703/5704 chips, this bit has been reassigned
8705                  * a different meaning.  In particular, it is used
8706                  * on those chips to enable a PCI-X workaround.
8707                  */
8708                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
8709         }
8710
8711         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8712
8713 #if 0
8714         /* Unneeded, already done by tg3_get_invariants.  */
8715         tg3_switch_clocks(tp);
8716 #endif
8717
8718         ret = 0;
8719         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8720             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
8721                 goto out;
8722
8723         while (1) {
8724                 u32 *p = buf, i;
8725
8726                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
8727                         p[i] = i;
8728
8729                 /* Send the buffer to the chip. */
8730                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
8731                 if (ret) {
8732                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
8733                         break;
8734                 }
8735
8736 #if 0
8737                 /* validate data reached card RAM correctly. */
8738                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
8739                         u32 val;
8740                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
8741                         if (le32_to_cpu(val) != p[i]) {
8742                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
8743                                 /* ret = -ENODEV here? */
8744                         }
8745                         p[i] = 0;
8746                 }
8747 #endif
8748                 /* Now read it back. */
8749                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
8750                 if (ret) {
8751                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
8752
8753                         break;
8754                 }
8755
8756                 /* Verify it. */
8757                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
8758                         if (p[i] == i)
8759                                 continue;
8760
8761                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) ==
8762                             DMA_RWCTRL_WRITE_BNDRY_DISAB) {
8763                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
8764                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8765                                 break;
8766                         } else {
8767                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
8768                                 ret = -ENODEV;
8769                                 goto out;
8770                         }
8771                 }
8772
8773                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
8774                         /* Success. */
8775                         ret = 0;
8776                         break;
8777                 }
8778         }
8779
8780 out:
8781         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
8782 out_nofree:
8783         return ret;
8784 }
8785
8786 static void __devinit tg3_init_link_config(struct tg3 *tp)
8787 {
8788         tp->link_config.advertising =
8789                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
8790                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
8791                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
8792                  ADVERTISED_Autoneg | ADVERTISED_MII);
8793         tp->link_config.speed = SPEED_INVALID;
8794         tp->link_config.duplex = DUPLEX_INVALID;
8795         tp->link_config.autoneg = AUTONEG_ENABLE;
8796         netif_carrier_off(tp->dev);
8797         tp->link_config.active_speed = SPEED_INVALID;
8798         tp->link_config.active_duplex = DUPLEX_INVALID;
8799         tp->link_config.phy_is_low_power = 0;
8800         tp->link_config.orig_speed = SPEED_INVALID;
8801         tp->link_config.orig_duplex = DUPLEX_INVALID;
8802         tp->link_config.orig_autoneg = AUTONEG_INVALID;
8803 }
8804
8805 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
8806 {
8807         tp->bufmgr_config.mbuf_read_dma_low_water =
8808                 DEFAULT_MB_RDMA_LOW_WATER;
8809         tp->bufmgr_config.mbuf_mac_rx_low_water =
8810                 DEFAULT_MB_MACRX_LOW_WATER;
8811         tp->bufmgr_config.mbuf_high_water =
8812                 DEFAULT_MB_HIGH_WATER;
8813
8814         tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
8815                 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
8816         tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
8817                 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
8818         tp->bufmgr_config.mbuf_high_water_jumbo =
8819                 DEFAULT_MB_HIGH_WATER_JUMBO;
8820
8821         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
8822         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
8823 }
8824
8825 static char * __devinit tg3_phy_string(struct tg3 *tp)
8826 {
8827         switch (tp->phy_id & PHY_ID_MASK) {
8828         case PHY_ID_BCM5400:    return "5400";
8829         case PHY_ID_BCM5401:    return "5401";
8830         case PHY_ID_BCM5411:    return "5411";
8831         case PHY_ID_BCM5701:    return "5701";
8832         case PHY_ID_BCM5703:    return "5703";
8833         case PHY_ID_BCM5704:    return "5704";
8834         case PHY_ID_BCM5705:    return "5705";
8835         case PHY_ID_BCM5750:    return "5750";
8836         case PHY_ID_BCM5752:    return "5752";
8837         case PHY_ID_BCM8002:    return "8002/serdes";
8838         case 0:                 return "serdes";
8839         default:                return "unknown";
8840         };
8841 }
8842
8843 static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
8844 {
8845         struct pci_dev *peer;
8846         unsigned int func, devnr = tp->pdev->devfn & ~7;
8847
8848         for (func = 0; func < 8; func++) {
8849                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
8850                 if (peer && peer != tp->pdev)
8851                         break;
8852                 pci_dev_put(peer);
8853         }
8854         if (!peer || peer == tp->pdev)
8855                 BUG();
8856
8857         /*
8858          * We don't need to keep the refcount elevated; there's no way
8859          * to remove one half of this device without removing the other
8860          */
8861         pci_dev_put(peer);
8862
8863         return peer;
8864 }
8865
8866 static int __devinit tg3_init_one(struct pci_dev *pdev,
8867                                   const struct pci_device_id *ent)
8868 {
8869         static int tg3_version_printed = 0;
8870         unsigned long tg3reg_base, tg3reg_len;
8871         struct net_device *dev;
8872         struct tg3 *tp;
8873         int i, err, pci_using_dac, pm_cap;
8874
8875         if (tg3_version_printed++ == 0)
8876                 printk(KERN_INFO "%s", version);
8877
8878         err = pci_enable_device(pdev);
8879         if (err) {
8880                 printk(KERN_ERR PFX "Cannot enable PCI device, "
8881                        "aborting.\n");
8882                 return err;
8883         }
8884
8885         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8886                 printk(KERN_ERR PFX "Cannot find proper PCI device "
8887                        "base address, aborting.\n");
8888                 err = -ENODEV;
8889                 goto err_out_disable_pdev;
8890         }
8891
8892         err = pci_request_regions(pdev, DRV_MODULE_NAME);
8893         if (err) {
8894                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
8895                        "aborting.\n");
8896                 goto err_out_disable_pdev;
8897         }
8898
8899         pci_set_master(pdev);
8900
8901         /* Find power-management capability. */
8902         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8903         if (pm_cap == 0) {
8904                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
8905                        "aborting.\n");
8906                 err = -EIO;
8907                 goto err_out_free_res;
8908         }
8909
8910         /* Configure DMA attributes. */
8911         err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
8912         if (!err) {
8913                 pci_using_dac = 1;
8914                 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
8915                 if (err < 0) {
8916                         printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
8917                                "for consistent allocations\n");
8918                         goto err_out_free_res;
8919                 }
8920         } else {
8921                 err = pci_set_dma_mask(pdev, 0xffffffffULL);
8922                 if (err) {
8923                         printk(KERN_ERR PFX "No usable DMA configuration, "
8924                                "aborting.\n");
8925                         goto err_out_free_res;
8926                 }
8927                 pci_using_dac = 0;
8928         }
8929
8930         tg3reg_base = pci_resource_start(pdev, 0);
8931         tg3reg_len = pci_resource_len(pdev, 0);
8932
8933         dev = alloc_etherdev(sizeof(*tp));
8934         if (!dev) {
8935                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
8936                 err = -ENOMEM;
8937                 goto err_out_free_res;
8938         }
8939
8940         SET_MODULE_OWNER(dev);
8941         SET_NETDEV_DEV(dev, &pdev->dev);
8942
8943         if (pci_using_dac)
8944                 dev->features |= NETIF_F_HIGHDMA;
8945         dev->features |= NETIF_F_LLTX;
8946 #if TG3_VLAN_TAG_USED
8947         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8948         dev->vlan_rx_register = tg3_vlan_rx_register;
8949         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
8950 #endif
8951
8952         tp = netdev_priv(dev);
8953         tp->pdev = pdev;
8954         tp->dev = dev;
8955         tp->pm_cap = pm_cap;
8956         tp->mac_mode = TG3_DEF_MAC_MODE;
8957         tp->rx_mode = TG3_DEF_RX_MODE;
8958         tp->tx_mode = TG3_DEF_TX_MODE;
8959         tp->mi_mode = MAC_MI_MODE_BASE;
8960         if (tg3_debug > 0)
8961                 tp->msg_enable = tg3_debug;
8962         else
8963                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
8964
8965         /* The word/byte swap controls here control register access byte
8966          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
8967          * setting below.
8968          */
8969         tp->misc_host_ctrl =
8970                 MISC_HOST_CTRL_MASK_PCI_INT |
8971                 MISC_HOST_CTRL_WORD_SWAP |
8972                 MISC_HOST_CTRL_INDIR_ACCESS |
8973                 MISC_HOST_CTRL_PCISTATE_RW;
8974
8975         /* The NONFRM (non-frame) byte/word swap controls take effect
8976          * on descriptor entries, anything which isn't packet data.
8977          *
8978          * The StrongARM chips on the board (one for tx, one for rx)
8979          * are running in big-endian mode.
8980          */
8981         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
8982                         GRC_MODE_WSWAP_NONFRM_DATA);
8983 #ifdef __BIG_ENDIAN
8984         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
8985 #endif
8986         spin_lock_init(&tp->lock);
8987         spin_lock_init(&tp->tx_lock);
8988         spin_lock_init(&tp->indirect_lock);
8989         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
8990
8991         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
8992         if (tp->regs == 0UL) {
8993                 printk(KERN_ERR PFX "Cannot map device registers, "
8994                        "aborting.\n");
8995                 err = -ENOMEM;
8996                 goto err_out_free_dev;
8997         }
8998
8999         tg3_init_link_config(tp);
9000
9001         tg3_init_bufmgr_config(tp);
9002
9003         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
9004         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
9005         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
9006
9007         dev->open = tg3_open;
9008         dev->stop = tg3_close;
9009         dev->get_stats = tg3_get_stats;
9010         dev->set_multicast_list = tg3_set_rx_mode;
9011         dev->set_mac_address = tg3_set_mac_addr;
9012         dev->do_ioctl = tg3_ioctl;
9013         dev->tx_timeout = tg3_tx_timeout;
9014         dev->poll = tg3_poll;
9015         dev->ethtool_ops = &tg3_ethtool_ops;
9016         dev->weight = 64;
9017         dev->watchdog_timeo = TG3_TX_TIMEOUT;
9018         dev->change_mtu = tg3_change_mtu;
9019         dev->irq = pdev->irq;
9020 #ifdef CONFIG_NET_POLL_CONTROLLER
9021         dev->poll_controller = tg3_poll_controller;
9022 #endif
9023
9024         err = tg3_get_invariants(tp);
9025         if (err) {
9026                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
9027                        "aborting.\n");
9028                 goto err_out_iounmap;
9029         }
9030
9031         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9032                 tp->bufmgr_config.mbuf_read_dma_low_water =
9033                         DEFAULT_MB_RDMA_LOW_WATER_5705;
9034                 tp->bufmgr_config.mbuf_mac_rx_low_water =
9035                         DEFAULT_MB_MACRX_LOW_WATER_5705;
9036                 tp->bufmgr_config.mbuf_high_water =
9037                         DEFAULT_MB_HIGH_WATER_5705;
9038         }
9039
9040 #if TG3_TSO_SUPPORT != 0
9041         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
9042                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
9043         }
9044         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9045             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
9046             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
9047             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
9048                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
9049         } else {
9050                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
9051         }
9052
9053         /* TSO is off by default, user can enable using ethtool.  */
9054 #if 0
9055         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
9056                 dev->features |= NETIF_F_TSO;
9057 #endif
9058
9059 #endif
9060
9061         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
9062             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
9063             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
9064                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
9065                 tp->rx_pending = 63;
9066         }
9067
9068         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
9069                 tp->pdev_peer = tg3_find_5704_peer(tp);
9070
9071         err = tg3_get_device_address(tp);
9072         if (err) {
9073                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
9074                        "aborting.\n");
9075                 goto err_out_iounmap;
9076         }
9077
9078         /*
9079          * Reset chip in case UNDI or EFI driver did not shutdown
9080          * DMA self test will enable WDMAC and we'll see (spurious)
9081          * pending DMA on the PCI bus at that point.
9082          */
9083         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
9084             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9085                 pci_save_state(tp->pdev);
9086                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
9087                 tg3_halt(tp);
9088         }
9089
9090         err = tg3_test_dma(tp);
9091         if (err) {
9092                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
9093                 goto err_out_iounmap;
9094         }
9095
9096         /* Tigon3 can do ipv4 only... and some chips have buggy
9097          * checksumming.
9098          */
9099         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
9100                 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
9101                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9102         } else
9103                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
9104
9105         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
9106                 dev->features &= ~NETIF_F_HIGHDMA;
9107
9108         /* flow control autonegotiation is default behavior */
9109         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9110
9111         err = register_netdev(dev);
9112         if (err) {
9113                 printk(KERN_ERR PFX "Cannot register net device, "
9114                        "aborting.\n");
9115                 goto err_out_iounmap;
9116         }
9117
9118         pci_set_drvdata(pdev, dev);
9119
9120         /* Now that we have fully setup the chip, save away a snapshot
9121          * of the PCI config space.  We need to restore this after
9122          * GRC_MISC_CFG core clock resets and some resume events.
9123          */
9124         pci_save_state(tp->pdev);
9125
9126         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
9127                dev->name,
9128                tp->board_part_number,
9129                tp->pci_chip_rev_id,
9130                tg3_phy_string(tp),
9131                ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
9132                ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
9133                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
9134                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
9135                ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
9136                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
9137
9138         for (i = 0; i < 6; i++)
9139                 printk("%2.2x%c", dev->dev_addr[i],
9140                        i == 5 ? '\n' : ':');
9141
9142         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
9143                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
9144                "TSOcap[%d] \n",
9145                dev->name,
9146                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
9147                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
9148                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
9149                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
9150                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
9151                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
9152                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
9153
9154         return 0;
9155
9156 err_out_iounmap:
9157         iounmap(tp->regs);
9158
9159 err_out_free_dev:
9160         free_netdev(dev);
9161
9162 err_out_free_res:
9163         pci_release_regions(pdev);
9164
9165 err_out_disable_pdev:
9166         pci_disable_device(pdev);
9167         pci_set_drvdata(pdev, NULL);
9168         return err;
9169 }
9170
9171 static void __devexit tg3_remove_one(struct pci_dev *pdev)
9172 {
9173         struct net_device *dev = pci_get_drvdata(pdev);
9174
9175         if (dev) {
9176                 struct tg3 *tp = netdev_priv(dev);
9177
9178                 unregister_netdev(dev);
9179                 iounmap(tp->regs);
9180                 free_netdev(dev);
9181                 pci_release_regions(pdev);
9182                 pci_disable_device(pdev);
9183                 pci_set_drvdata(pdev, NULL);
9184         }
9185 }
9186
9187 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
9188 {
9189         struct net_device *dev = pci_get_drvdata(pdev);
9190         struct tg3 *tp = netdev_priv(dev);
9191         int err;
9192
9193         if (!netif_running(dev))
9194                 return 0;
9195
9196         tg3_netif_stop(tp);
9197
9198         del_timer_sync(&tp->timer);
9199
9200         spin_lock_irq(&tp->lock);
9201         spin_lock(&tp->tx_lock);
9202         tg3_disable_ints(tp);
9203         spin_unlock(&tp->tx_lock);
9204         spin_unlock_irq(&tp->lock);
9205
9206         netif_device_detach(dev);
9207
9208         spin_lock_irq(&tp->lock);
9209         spin_lock(&tp->tx_lock);
9210         tg3_halt(tp);
9211         spin_unlock(&tp->tx_lock);
9212         spin_unlock_irq(&tp->lock);
9213
9214         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
9215         if (err) {
9216                 spin_lock_irq(&tp->lock);
9217                 spin_lock(&tp->tx_lock);
9218
9219                 tg3_init_hw(tp);
9220
9221                 tp->timer.expires = jiffies + tp->timer_offset;
9222                 add_timer(&tp->timer);
9223
9224                 netif_device_attach(dev);
9225                 tg3_netif_start(tp);
9226
9227                 spin_unlock(&tp->tx_lock);
9228                 spin_unlock_irq(&tp->lock);
9229         }
9230
9231         return err;
9232 }
9233
9234 static int tg3_resume(struct pci_dev *pdev)
9235 {
9236         struct net_device *dev = pci_get_drvdata(pdev);
9237         struct tg3 *tp = netdev_priv(dev);
9238         int err;
9239
9240         if (!netif_running(dev))
9241                 return 0;
9242
9243         pci_restore_state(tp->pdev);
9244
9245         err = tg3_set_power_state(tp, 0);
9246         if (err)
9247                 return err;
9248
9249         netif_device_attach(dev);
9250
9251         spin_lock_irq(&tp->lock);
9252         spin_lock(&tp->tx_lock);
9253
9254         tg3_init_hw(tp);
9255
9256         tp->timer.expires = jiffies + tp->timer_offset;
9257         add_timer(&tp->timer);
9258
9259         tg3_enable_ints(tp);
9260
9261         tg3_netif_start(tp);
9262
9263         spin_unlock(&tp->tx_lock);
9264         spin_unlock_irq(&tp->lock);
9265
9266         return 0;
9267 }
9268
9269 static struct pci_driver tg3_driver = {
9270         .name           = DRV_MODULE_NAME,
9271         .id_table       = tg3_pci_tbl,
9272         .probe          = tg3_init_one,
9273         .remove         = __devexit_p(tg3_remove_one),
9274         .suspend        = tg3_suspend,
9275         .resume         = tg3_resume
9276 };
9277
9278 static int __init tg3_init(void)
9279 {
9280         return pci_module_init(&tg3_driver);
9281 }
9282
9283 static void __exit tg3_cleanup(void)
9284 {
9285         pci_unregister_driver(&tg3_driver);
9286 }
9287
9288 module_init(tg3_init);
9289 module_exit(tg3_cleanup);