2 A Davicom DM9102/DM9102A/DM9102A+DM9801/DM9102A+DM9802 NIC fast
3 ethernet driver for Linux.
4 Copyright (C) 1997 Sten Wang
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 DAVICOM Web-Site: www.davicom.com.tw
18 Author: Sten Wang, 886-3-5798797-8517, E-mail: sten_wang@davicom.com.tw
19 Maintainer: Tobias Ringstrom <tori@unhappy.mine.nu>
21 (C)Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
23 Marcelo Tosatti <marcelo@conectiva.com.br> :
24 Made it compile in 2.3 (device to net_device)
26 Alan Cox <alan@lxorguk.ukuu.org.uk> :
27 Cleaned up for kernel merge.
28 Removed the back compatibility support
29 Reformatted, fixing spelling etc as I went
30 Removed IRQ 0-15 assumption
32 Jeff Garzik <jgarzik@pobox.com> :
33 Updated to use new PCI driver API.
34 Resource usage cleanups.
35 Report driver version to user.
37 Tobias Ringstrom <tori@unhappy.mine.nu> :
38 Cleaned up and added SMP safety. Thanks go to Jeff Garzik,
39 Andrew Morton and Frank Davis for the SMP safety fixes.
41 Vojtech Pavlik <vojtech@suse.cz> :
42 Cleaned up pointer arithmetics.
43 Fixed a lot of 64bit issues.
44 Cleaned up printk()s a bit.
45 Fixed some obvious big endian problems.
47 Tobias Ringstrom <tori@unhappy.mine.nu> :
48 Use time_after for jiffies calculation. Added ethtool
49 support. Updated PCI resource allocation. Do not
50 forget to unmap PCI mapped skbs.
52 Alan Cox <alan@lxorguk.ukuu.org.uk>
53 Added new PCI identifiers provided by Clear Zhang at ALi
54 for their 1563 ethernet device.
58 Check on 64 bit boxes.
59 Check and fix on big endian boxes.
61 Test and make sure PCI latency is now correct for all cases.
64 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
66 #define DRV_NAME "dmfe"
67 #define DRV_VERSION "1.36.4"
68 #define DRV_RELDATE "2002-01-17"
70 #include <linux/module.h>
71 #include <linux/kernel.h>
72 #include <linux/string.h>
73 #include <linux/timer.h>
74 #include <linux/ptrace.h>
75 #include <linux/errno.h>
76 #include <linux/ioport.h>
77 #include <linux/interrupt.h>
78 #include <linux/pci.h>
79 #include <linux/dma-mapping.h>
80 #include <linux/init.h>
81 #include <linux/netdevice.h>
82 #include <linux/etherdevice.h>
83 #include <linux/ethtool.h>
84 #include <linux/skbuff.h>
85 #include <linux/delay.h>
86 #include <linux/spinlock.h>
87 #include <linux/crc32.h>
88 #include <linux/bitops.h>
90 #include <asm/processor.h>
93 #include <asm/uaccess.h>
96 #ifdef CONFIG_TULIP_DM910X
101 /* Board/System/Debug information/definition ---------------- */
102 #define PCI_DM9132_ID 0x91321282 /* Davicom DM9132 ID */
103 #define PCI_DM9102_ID 0x91021282 /* Davicom DM9102 ID */
104 #define PCI_DM9100_ID 0x91001282 /* Davicom DM9100 ID */
105 #define PCI_DM9009_ID 0x90091282 /* Davicom DM9009 ID */
107 #define DM9102_IO_SIZE 0x80
108 #define DM9102A_IO_SIZE 0x100
109 #define TX_MAX_SEND_CNT 0x1 /* Maximum tx packet per time */
110 #define TX_DESC_CNT 0x10 /* Allocated Tx descriptors */
111 #define RX_DESC_CNT 0x20 /* Allocated Rx descriptors */
112 #define TX_FREE_DESC_CNT (TX_DESC_CNT - 2) /* Max TX packet count */
113 #define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3) /* TX wakeup count */
114 #define DESC_ALL_CNT (TX_DESC_CNT + RX_DESC_CNT)
115 #define TX_BUF_ALLOC 0x600
116 #define RX_ALLOC_SIZE 0x620
117 #define DM910X_RESET 1
118 #define CR0_DEFAULT 0x00E00000 /* TX & RX burst mode */
119 #define CR6_DEFAULT 0x00080000 /* HD */
120 #define CR7_DEFAULT 0x180c1
121 #define CR15_DEFAULT 0x06 /* TxJabber RxWatchdog */
122 #define TDES0_ERR_MASK 0x4302 /* TXJT, LC, EC, FUE */
123 #define MAX_PACKET_SIZE 1514
124 #define DMFE_MAX_MULTICAST 14
125 #define RX_COPY_SIZE 100
126 #define MAX_CHECK_PACKET 0x8000
127 #define DM9801_NOISE_FLOOR 8
128 #define DM9802_NOISE_FLOOR 5
130 #define DMFE_WOL_LINKCHANGE 0x20000000
131 #define DMFE_WOL_SAMPLEPACKET 0x10000000
132 #define DMFE_WOL_MAGICPACKET 0x08000000
136 #define DMFE_100MHF 1
138 #define DMFE_100MFD 5
140 #define DMFE_1M_HPNA 0x10
142 #define DMFE_TXTH_72 0x400000 /* TX TH 72 byte */
143 #define DMFE_TXTH_96 0x404000 /* TX TH 96 byte */
144 #define DMFE_TXTH_128 0x0000 /* TX TH 128 byte */
145 #define DMFE_TXTH_256 0x4000 /* TX TH 256 byte */
146 #define DMFE_TXTH_512 0x8000 /* TX TH 512 byte */
147 #define DMFE_TXTH_1K 0xC000 /* TX TH 1K byte */
149 #define DMFE_TIMER_WUT (jiffies + HZ * 1)/* timer wakeup time : 1 second */
150 #define DMFE_TX_TIMEOUT ((3*HZ)/2) /* tx packet time-out time 1.5 s" */
151 #define DMFE_TX_KICK (HZ/2) /* tx packet Kick-out time 0.5 s" */
153 #define dw32(reg, val) iowrite32(val, ioaddr + (reg))
154 #define dw16(reg, val) iowrite16(val, ioaddr + (reg))
155 #define dr32(reg) ioread32(ioaddr + (reg))
156 #define dr16(reg) ioread16(ioaddr + (reg))
157 #define dr8(reg) ioread8(ioaddr + (reg))
159 #define DMFE_DBUG(dbug_now, msg, value) \
161 if (dmfe_debug || (dbug_now)) \
163 (msg), (long) (value)); \
166 #define SHOW_MEDIA_TYPE(mode) \
167 pr_info("Change Speed to %sMhz %s duplex\n" , \
168 (mode & 1) ? "100":"10", \
169 (mode & 4) ? "full":"half");
172 /* CR9 definition: SROM/MII */
173 #define CR9_SROM_READ 0x4800
175 #define CR9_SRCLK 0x2
176 #define CR9_CRDOUT 0x8
177 #define SROM_DATA_0 0x0
178 #define SROM_DATA_1 0x4
179 #define PHY_DATA_1 0x20000
180 #define PHY_DATA_0 0x00000
181 #define MDCLKH 0x10000
183 #define PHY_POWER_DOWN 0x800
185 #define SROM_V41_CODE 0x14
187 #define __CHK_IO_SIZE(pci_id, dev_rev) \
188 (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x30) ) ? \
189 DM9102A_IO_SIZE: DM9102_IO_SIZE)
191 #define CHK_IO_SIZE(pci_dev) \
192 (__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, \
193 (pci_dev)->revision))
196 #define DEVICE net_device
198 /* Structure/enum declaration ------------------------------- */
200 __le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
201 char *tx_buf_ptr; /* Data for us */
202 struct tx_desc *next_tx_desc;
203 } __attribute__(( aligned(32) ));
206 __le32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */
207 struct sk_buff *rx_skb_ptr; /* Data for us */
208 struct rx_desc *next_rx_desc;
209 } __attribute__(( aligned(32) ));
211 struct dmfe_board_info {
212 u32 chip_id; /* Chip vendor/Device ID */
213 u8 chip_revision; /* Chip revision */
214 struct net_device *next_dev; /* next device */
215 struct pci_dev *pdev; /* PCI device */
218 void __iomem *ioaddr; /* I/O base address */
225 /* pointer for memory physical address */
226 dma_addr_t buf_pool_dma_ptr; /* Tx buffer pool memory */
227 dma_addr_t buf_pool_dma_start; /* Tx buffer pool align dword */
228 dma_addr_t desc_pool_dma_ptr; /* descriptor pool memory */
229 dma_addr_t first_tx_desc_dma;
230 dma_addr_t first_rx_desc_dma;
232 /* descriptor pointer */
233 unsigned char *buf_pool_ptr; /* Tx buffer pool memory */
234 unsigned char *buf_pool_start; /* Tx buffer pool align dword */
235 unsigned char *desc_pool_ptr; /* descriptor pool memory */
236 struct tx_desc *first_tx_desc;
237 struct tx_desc *tx_insert_ptr;
238 struct tx_desc *tx_remove_ptr;
239 struct rx_desc *first_rx_desc;
240 struct rx_desc *rx_insert_ptr;
241 struct rx_desc *rx_ready_ptr; /* packet come pointer */
242 unsigned long tx_packet_cnt; /* transmitted packet count */
243 unsigned long tx_queue_cnt; /* wait to send packet count */
244 unsigned long rx_avail_cnt; /* available rx descriptor count */
245 unsigned long interval_rx_cnt; /* rx packet count a callback time */
247 u16 HPNA_command; /* For HPNA register 16 */
248 u16 HPNA_timer; /* For HPNA remote device check */
250 u16 NIC_capability; /* NIC media capability */
251 u16 PHY_reg4; /* Saved Phyxcer register 4 value */
253 u8 HPNA_present; /* 0:none, 1:DM9801, 2:DM9802 */
254 u8 chip_type; /* Keep DM9102A chip type */
255 u8 media_mode; /* user specify media mode */
256 u8 op_mode; /* real work media mode */
258 u8 wait_reset; /* Hardware failed, need to reset */
259 u8 dm910x_chk_mode; /* Operating mode check */
260 u8 first_in_callback; /* Flag to record state */
261 u8 wol_mode; /* user WOL settings */
262 struct timer_list timer;
264 /* Driver defined statistic counter */
265 unsigned long tx_fifo_underrun;
266 unsigned long tx_loss_carrier;
267 unsigned long tx_no_carrier;
268 unsigned long tx_late_collision;
269 unsigned long tx_excessive_collision;
270 unsigned long tx_jabber_timeout;
271 unsigned long reset_count;
272 unsigned long reset_cr8;
273 unsigned long reset_fatal;
274 unsigned long reset_TXtimeout;
277 unsigned char srom[128];
281 DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20,
282 DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48,
283 DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70,
288 CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80,
289 CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000,
290 CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000
293 /* Global variable declaration ----------------------------- */
294 static int __devinitdata printed_version;
295 static const char version[] __devinitconst =
296 "Davicom DM9xxx net driver, version " DRV_VERSION " (" DRV_RELDATE ")";
298 static int dmfe_debug;
299 static unsigned char dmfe_media_mode = DMFE_AUTO;
300 static u32 dmfe_cr6_user_set;
302 /* For module input parameter */
305 static unsigned char mode = 8;
306 static u8 chkmode = 1;
307 static u8 HPNA_mode; /* Default: Low Power/High Speed */
308 static u8 HPNA_rx_cmd; /* Default: Disable Rx remote command */
309 static u8 HPNA_tx_cmd; /* Default: Don't issue remote command */
310 static u8 HPNA_NoiseFloor; /* Default: HPNA NoiseFloor */
311 static u8 SF_mode; /* Special Function: 1:VLAN, 2:RX Flow Control
312 4: TX pause packet */
315 /* function declaration ------------------------------------- */
316 static int dmfe_open(struct DEVICE *);
317 static netdev_tx_t dmfe_start_xmit(struct sk_buff *, struct DEVICE *);
318 static int dmfe_stop(struct DEVICE *);
319 static void dmfe_set_filter_mode(struct DEVICE *);
320 static const struct ethtool_ops netdev_ethtool_ops;
321 static u16 read_srom_word(void __iomem *, int);
322 static irqreturn_t dmfe_interrupt(int , void *);
323 #ifdef CONFIG_NET_POLL_CONTROLLER
324 static void poll_dmfe (struct net_device *dev);
326 static void dmfe_descriptor_init(struct net_device *);
327 static void allocate_rx_buffer(struct net_device *);
328 static void update_cr6(u32, void __iomem *);
329 static void send_filter_frame(struct DEVICE *);
330 static void dm9132_id_table(struct DEVICE *);
331 static u16 phy_read(void __iomem *, u8, u8, u32);
332 static void phy_write(void __iomem *, u8, u8, u16, u32);
333 static void phy_write_1bit(void __iomem *, u32);
334 static u16 phy_read_1bit(void __iomem *);
335 static u8 dmfe_sense_speed(struct dmfe_board_info *);
336 static void dmfe_process_mode(struct dmfe_board_info *);
337 static void dmfe_timer(unsigned long);
338 static inline u32 cal_CRC(unsigned char *, unsigned int, u8);
339 static void dmfe_rx_packet(struct DEVICE *, struct dmfe_board_info *);
340 static void dmfe_free_tx_pkt(struct DEVICE *, struct dmfe_board_info *);
341 static void dmfe_reuse_skb(struct dmfe_board_info *, struct sk_buff *);
342 static void dmfe_dynamic_reset(struct DEVICE *);
343 static void dmfe_free_rxbuffer(struct dmfe_board_info *);
344 static void dmfe_init_dm910x(struct DEVICE *);
345 static void dmfe_parse_srom(struct dmfe_board_info *);
346 static void dmfe_program_DM9801(struct dmfe_board_info *, int);
347 static void dmfe_program_DM9802(struct dmfe_board_info *);
348 static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * );
349 static void dmfe_set_phyxcer(struct dmfe_board_info *);
351 /* DM910X network board routine ---------------------------- */
353 static const struct net_device_ops netdev_ops = {
354 .ndo_open = dmfe_open,
355 .ndo_stop = dmfe_stop,
356 .ndo_start_xmit = dmfe_start_xmit,
357 .ndo_set_rx_mode = dmfe_set_filter_mode,
358 .ndo_change_mtu = eth_change_mtu,
359 .ndo_set_mac_address = eth_mac_addr,
360 .ndo_validate_addr = eth_validate_addr,
361 #ifdef CONFIG_NET_POLL_CONTROLLER
362 .ndo_poll_controller = poll_dmfe,
367 * Search DM910X board ,allocate space and register it
370 static int __devinit dmfe_init_one (struct pci_dev *pdev,
371 const struct pci_device_id *ent)
373 struct dmfe_board_info *db; /* board information structure */
374 struct net_device *dev;
378 DMFE_DBUG(0, "dmfe_init_one()", 0);
380 if (!printed_version++)
381 pr_info("%s\n", version);
384 * SPARC on-board DM910x chips should be handled by the main
385 * tulip driver, except for early DM9100s.
387 #ifdef CONFIG_TULIP_DM910X
388 if ((ent->driver_data == PCI_DM9100_ID && pdev->revision >= 0x30) ||
389 ent->driver_data == PCI_DM9102_ID) {
390 struct device_node *dp = pci_device_to_OF_node(pdev);
392 if (dp && of_get_property(dp, "local-mac-address", NULL)) {
393 pr_info("skipping on-board DM910x (use tulip)\n");
399 /* Init network device */
400 dev = alloc_etherdev(sizeof(*db));
403 SET_NETDEV_DEV(dev, &pdev->dev);
405 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
406 pr_warn("32-bit PCI DMA not available\n");
411 /* Enable Master/IO access, Disable memory access */
412 err = pci_enable_device(pdev);
416 if (!pci_resource_start(pdev, 0)) {
417 pr_err("I/O base is zero\n");
419 goto err_out_disable;
422 if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev)) ) {
423 pr_err("Allocated I/O size too small\n");
425 goto err_out_disable;
428 #if 0 /* pci_{enable_device,set_master} sets minimum latency for us now */
430 /* Set Latency Timer 80h */
431 /* FIXME: setting values > 32 breaks some SiS 559x stuff.
432 Need a PCI quirk.. */
434 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);
437 if (pci_request_regions(pdev, DRV_NAME)) {
438 pr_err("Failed to request PCI regions\n");
440 goto err_out_disable;
443 /* Init system & device */
444 db = netdev_priv(dev);
446 /* Allocate Tx/Rx descriptor memory */
447 db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) *
448 DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
449 if (!db->desc_pool_ptr) {
454 db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC *
455 TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
456 if (!db->buf_pool_ptr) {
458 goto err_out_free_desc;
461 db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
462 db->first_tx_desc_dma = db->desc_pool_dma_ptr;
463 db->buf_pool_start = db->buf_pool_ptr;
464 db->buf_pool_dma_start = db->buf_pool_dma_ptr;
466 db->chip_id = ent->driver_data;
468 db->ioaddr = pci_iomap(pdev, 0, 0);
471 goto err_out_free_buf;
474 db->chip_revision = pdev->revision;
479 pci_set_drvdata(pdev, dev);
480 dev->netdev_ops = &netdev_ops;
481 dev->ethtool_ops = &netdev_ethtool_ops;
482 netif_carrier_off(dev);
483 spin_lock_init(&db->lock);
485 pci_read_config_dword(pdev, 0x50, &pci_pmr);
487 if ( (pci_pmr == 0x10000) && (db->chip_revision == 0x31) )
488 db->chip_type = 1; /* DM9102A E3 */
492 /* read 64 word srom data */
493 for (i = 0; i < 64; i++) {
494 ((__le16 *) db->srom)[i] =
495 cpu_to_le16(read_srom_word(db->ioaddr, i));
498 /* Set Node address */
499 for (i = 0; i < 6; i++)
500 dev->dev_addr[i] = db->srom[20 + i];
502 err = register_netdev (dev);
506 dev_info(&dev->dev, "Davicom DM%04lx at pci%s, %pM, irq %d\n",
507 ent->driver_data >> 16,
508 pci_name(pdev), dev->dev_addr, pdev->irq);
510 pci_set_master(pdev);
515 pci_iounmap(pdev, db->ioaddr);
517 pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
518 db->buf_pool_ptr, db->buf_pool_dma_ptr);
520 pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
521 db->desc_pool_ptr, db->desc_pool_dma_ptr);
523 pci_release_regions(pdev);
525 pci_disable_device(pdev);
527 pci_set_drvdata(pdev, NULL);
534 static void __devexit dmfe_remove_one (struct pci_dev *pdev)
536 struct net_device *dev = pci_get_drvdata(pdev);
537 struct dmfe_board_info *db = netdev_priv(dev);
539 DMFE_DBUG(0, "dmfe_remove_one()", 0);
543 unregister_netdev(dev);
544 pci_iounmap(db->pdev, db->ioaddr);
545 pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
546 DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
547 db->desc_pool_dma_ptr);
548 pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
549 db->buf_pool_ptr, db->buf_pool_dma_ptr);
550 pci_release_regions(pdev);
551 free_netdev(dev); /* free board information */
553 pci_set_drvdata(pdev, NULL);
556 DMFE_DBUG(0, "dmfe_remove_one() exit", 0);
561 * Open the interface.
562 * The interface is opened whenever "ifconfig" actives it.
565 static int dmfe_open(struct DEVICE *dev)
567 struct dmfe_board_info *db = netdev_priv(dev);
568 const int irq = db->pdev->irq;
571 DMFE_DBUG(0, "dmfe_open", 0);
573 ret = request_irq(irq, dmfe_interrupt, IRQF_SHARED, dev->name, dev);
577 /* system variable init */
578 db->cr6_data = CR6_DEFAULT | dmfe_cr6_user_set;
579 db->tx_packet_cnt = 0;
580 db->tx_queue_cnt = 0;
581 db->rx_avail_cnt = 0;
584 db->first_in_callback = 0;
585 db->NIC_capability = 0xf; /* All capability*/
586 db->PHY_reg4 = 0x1e0;
588 /* CR6 operation mode decision */
589 if ( !chkmode || (db->chip_id == PCI_DM9132_ID) ||
590 (db->chip_revision >= 0x30) ) {
591 db->cr6_data |= DMFE_TXTH_256;
592 db->cr0_data = CR0_DEFAULT;
593 db->dm910x_chk_mode=4; /* Enter the normal mode */
595 db->cr6_data |= CR6_SFT; /* Store & Forward mode */
597 db->dm910x_chk_mode = 1; /* Enter the check mode */
600 /* Initialize DM910X board */
601 dmfe_init_dm910x(dev);
603 /* Active System Interface */
604 netif_wake_queue(dev);
606 /* set and active a timer process */
607 init_timer(&db->timer);
608 db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
609 db->timer.data = (unsigned long)dev;
610 db->timer.function = dmfe_timer;
611 add_timer(&db->timer);
617 /* Initialize DM910X board
619 * Initialize TX/Rx descriptor chain structure
620 * Send the set-up frame
621 * Enable Tx/Rx machine
624 static void dmfe_init_dm910x(struct DEVICE *dev)
626 struct dmfe_board_info *db = netdev_priv(dev);
627 void __iomem *ioaddr = db->ioaddr;
629 DMFE_DBUG(0, "dmfe_init_dm910x()", 0);
631 /* Reset DM910x MAC controller */
632 dw32(DCR0, DM910X_RESET); /* RESET MAC */
634 dw32(DCR0, db->cr0_data);
637 /* Phy addr : DM910(A)2/DM9132/9801, phy address = 1 */
640 /* Parser SROM and media mode */
642 db->media_mode = dmfe_media_mode;
644 /* RESET Phyxcer Chip by GPR port bit 7 */
645 dw32(DCR12, 0x180); /* Let bit 7 output port */
646 if (db->chip_id == PCI_DM9009_ID) {
647 dw32(DCR12, 0x80); /* Issue RESET signal */
648 mdelay(300); /* Delay 300 ms */
650 dw32(DCR12, 0x0); /* Clear RESET signal */
652 /* Process Phyxcer Media Mode */
653 if ( !(db->media_mode & 0x10) ) /* Force 1M mode */
654 dmfe_set_phyxcer(db);
656 /* Media Mode Process */
657 if ( !(db->media_mode & DMFE_AUTO) )
658 db->op_mode = db->media_mode; /* Force Mode */
660 /* Initialize Transmit/Receive decriptor and CR3/4 */
661 dmfe_descriptor_init(dev);
663 /* Init CR6 to program DM910x operation */
664 update_cr6(db->cr6_data, ioaddr);
666 /* Send setup frame */
667 if (db->chip_id == PCI_DM9132_ID)
668 dm9132_id_table(dev); /* DM9132 */
670 send_filter_frame(dev); /* DM9102/DM9102A */
672 /* Init CR7, interrupt active bit */
673 db->cr7_data = CR7_DEFAULT;
674 dw32(DCR7, db->cr7_data);
676 /* Init CR15, Tx jabber and Rx watchdog timer */
677 dw32(DCR15, db->cr15_data);
679 /* Enable DM910X Tx/Rx function */
680 db->cr6_data |= CR6_RXSC | CR6_TXSC | 0x40000;
681 update_cr6(db->cr6_data, ioaddr);
686 * Hardware start transmission.
687 * Send a packet to media from the upper layer.
690 static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
693 struct dmfe_board_info *db = netdev_priv(dev);
694 void __iomem *ioaddr = db->ioaddr;
695 struct tx_desc *txptr;
698 DMFE_DBUG(0, "dmfe_start_xmit", 0);
700 /* Too large packet check */
701 if (skb->len > MAX_PACKET_SIZE) {
702 pr_err("big packet = %d\n", (u16)skb->len);
707 /* Resource flag check */
708 netif_stop_queue(dev);
710 spin_lock_irqsave(&db->lock, flags);
712 /* No Tx resource check, it never happen nromally */
713 if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) {
714 spin_unlock_irqrestore(&db->lock, flags);
715 pr_err("No Tx resource %ld\n", db->tx_queue_cnt);
716 return NETDEV_TX_BUSY;
719 /* Disable NIC interrupt */
722 /* transmit this packet */
723 txptr = db->tx_insert_ptr;
724 skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len);
725 txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
727 /* Point to next transmit free descriptor */
728 db->tx_insert_ptr = txptr->next_tx_desc;
730 /* Transmit Packet Process */
731 if ( (!db->tx_queue_cnt) && (db->tx_packet_cnt < TX_MAX_SEND_CNT) ) {
732 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
733 db->tx_packet_cnt++; /* Ready to send */
734 dw32(DCR1, 0x1); /* Issue Tx polling */
735 dev->trans_start = jiffies; /* saved time stamp */
737 db->tx_queue_cnt++; /* queue TX packet */
738 dw32(DCR1, 0x1); /* Issue Tx polling */
741 /* Tx resource check */
742 if ( db->tx_queue_cnt < TX_FREE_DESC_CNT )
743 netif_wake_queue(dev);
745 /* Restore CR7 to enable interrupt */
746 spin_unlock_irqrestore(&db->lock, flags);
747 dw32(DCR7, db->cr7_data);
757 * Stop the interface.
758 * The interface is stopped when it is brought.
761 static int dmfe_stop(struct DEVICE *dev)
763 struct dmfe_board_info *db = netdev_priv(dev);
764 void __iomem *ioaddr = db->ioaddr;
766 DMFE_DBUG(0, "dmfe_stop", 0);
769 netif_stop_queue(dev);
772 del_timer_sync(&db->timer);
774 /* Reset & stop DM910X board */
775 dw32(DCR0, DM910X_RESET);
777 phy_write(ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
780 free_irq(db->pdev->irq, dev);
782 /* free allocated rx buffer */
783 dmfe_free_rxbuffer(db);
786 /* show statistic counter */
787 printk("FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
788 db->tx_fifo_underrun, db->tx_excessive_collision,
789 db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
790 db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
791 db->reset_fatal, db->reset_TXtimeout);
799 * DM9102 insterrupt handler
800 * receive the packet to upper layer, free the transmitted packet
803 static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
805 struct DEVICE *dev = dev_id;
806 struct dmfe_board_info *db = netdev_priv(dev);
807 void __iomem *ioaddr = db->ioaddr;
810 DMFE_DBUG(0, "dmfe_interrupt()", 0);
812 spin_lock_irqsave(&db->lock, flags);
814 /* Got DM910X status */
815 db->cr5_data = dr32(DCR5);
816 dw32(DCR5, db->cr5_data);
817 if ( !(db->cr5_data & 0xc1) ) {
818 spin_unlock_irqrestore(&db->lock, flags);
822 /* Disable all interrupt in CR7 to solve the interrupt edge problem */
825 /* Check system status */
826 if (db->cr5_data & 0x2000) {
827 /* system bus error happen */
828 DMFE_DBUG(1, "System bus error happen. CR5=", db->cr5_data);
830 db->wait_reset = 1; /* Need to RESET */
831 spin_unlock_irqrestore(&db->lock, flags);
835 /* Received the coming packet */
836 if ( (db->cr5_data & 0x40) && db->rx_avail_cnt )
837 dmfe_rx_packet(dev, db);
839 /* reallocate rx descriptor buffer */
840 if (db->rx_avail_cnt<RX_DESC_CNT)
841 allocate_rx_buffer(dev);
843 /* Free the transmitted descriptor */
844 if ( db->cr5_data & 0x01)
845 dmfe_free_tx_pkt(dev, db);
848 if (db->dm910x_chk_mode & 0x2) {
849 db->dm910x_chk_mode = 0x4;
850 db->cr6_data |= 0x100;
851 update_cr6(db->cr6_data, ioaddr);
854 /* Restore CR7 to enable interrupt mask */
855 dw32(DCR7, db->cr7_data);
857 spin_unlock_irqrestore(&db->lock, flags);
862 #ifdef CONFIG_NET_POLL_CONTROLLER
864 * Polling 'interrupt' - used by things like netconsole to send skbs
865 * without having to re-enable interrupts. It's not called while
866 * the interrupt routine is executing.
869 static void poll_dmfe (struct net_device *dev)
871 struct dmfe_board_info *db = netdev_priv(dev);
872 const int irq = db->pdev->irq;
874 /* disable_irq here is not very nice, but with the lockless
875 interrupt handler we have no other choice. */
877 dmfe_interrupt (irq, dev);
883 * Free TX resource after TX complete
886 static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
888 struct tx_desc *txptr;
889 void __iomem *ioaddr = db->ioaddr;
892 txptr = db->tx_remove_ptr;
893 while(db->tx_packet_cnt) {
894 tdes0 = le32_to_cpu(txptr->tdes0);
895 if (tdes0 & 0x80000000)
898 /* A packet sent completed */
900 dev->stats.tx_packets++;
902 /* Transmit statistic counter */
903 if ( tdes0 != 0x7fffffff ) {
904 dev->stats.collisions += (tdes0 >> 3) & 0xf;
905 dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
906 if (tdes0 & TDES0_ERR_MASK) {
907 dev->stats.tx_errors++;
909 if (tdes0 & 0x0002) { /* UnderRun */
910 db->tx_fifo_underrun++;
911 if ( !(db->cr6_data & CR6_SFT) ) {
912 db->cr6_data = db->cr6_data | CR6_SFT;
913 update_cr6(db->cr6_data, ioaddr);
917 db->tx_excessive_collision++;
919 db->tx_late_collision++;
923 db->tx_loss_carrier++;
925 db->tx_jabber_timeout++;
929 txptr = txptr->next_tx_desc;
932 /* Update TX remove pointer to next */
933 db->tx_remove_ptr = txptr;
935 /* Send the Tx packet in queue */
936 if ( (db->tx_packet_cnt < TX_MAX_SEND_CNT) && db->tx_queue_cnt ) {
937 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
938 db->tx_packet_cnt++; /* Ready to send */
940 dw32(DCR1, 0x1); /* Issue Tx polling */
941 dev->trans_start = jiffies; /* saved time stamp */
944 /* Resource available check */
945 if ( db->tx_queue_cnt < TX_WAKE_DESC_CNT )
946 netif_wake_queue(dev); /* Active upper layer, send again */
951 * Calculate the CRC valude of the Rx packet
952 * flag = 1 : return the reverse CRC (for the received packet CRC)
953 * 0 : return the normal CRC (for Hash Table index)
956 static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag)
958 u32 crc = crc32(~0, Data, Len);
959 if (flag) crc = ~crc;
965 * Receive the come packet and pass to upper layer
968 static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
970 struct rx_desc *rxptr;
971 struct sk_buff *skb, *newskb;
975 rxptr = db->rx_ready_ptr;
977 while(db->rx_avail_cnt) {
978 rdes0 = le32_to_cpu(rxptr->rdes0);
979 if (rdes0 & 0x80000000) /* packet owner check */
983 db->interval_rx_cnt++;
985 pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2),
986 RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
988 if ( (rdes0 & 0x300) != 0x300) {
989 /* A packet without First/Last flag */
991 DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
992 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
994 /* A packet with First/Last flag */
995 rxlen = ( (rdes0 >> 16) & 0x3fff) - 4;
997 /* error summary bit check */
998 if (rdes0 & 0x8000) {
999 /* This is a error packet */
1000 dev->stats.rx_errors++;
1002 dev->stats.rx_fifo_errors++;
1004 dev->stats.rx_crc_errors++;
1006 dev->stats.rx_length_errors++;
1009 if ( !(rdes0 & 0x8000) ||
1010 ((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
1011 skb = rxptr->rx_skb_ptr;
1013 /* Received Packet CRC check need or not */
1014 if ( (db->dm910x_chk_mode & 1) &&
1015 (cal_CRC(skb->data, rxlen, 1) !=
1016 (*(u32 *) (skb->data+rxlen) ))) { /* FIXME (?) */
1017 /* Found a error received packet */
1018 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1019 db->dm910x_chk_mode = 3;
1021 /* Good packet, send to upper layer */
1022 /* Shorst packet used new SKB */
1023 if ((rxlen < RX_COPY_SIZE) &&
1024 ((newskb = netdev_alloc_skb(dev, rxlen + 2))
1028 /* size less than COPY_SIZE, allocate a rxlen SKB */
1029 skb_reserve(skb, 2); /* 16byte align */
1030 skb_copy_from_linear_data(rxptr->rx_skb_ptr,
1031 skb_put(skb, rxlen),
1033 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1035 skb_put(skb, rxlen);
1037 skb->protocol = eth_type_trans(skb, dev);
1039 dev->stats.rx_packets++;
1040 dev->stats.rx_bytes += rxlen;
1043 /* Reuse SKB buffer when the packet is error */
1044 DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
1045 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1049 rxptr = rxptr->next_rx_desc;
1052 db->rx_ready_ptr = rxptr;
1056 * Set DM910X multicast address
1059 static void dmfe_set_filter_mode(struct DEVICE * dev)
1061 struct dmfe_board_info *db = netdev_priv(dev);
1062 unsigned long flags;
1063 int mc_count = netdev_mc_count(dev);
1065 DMFE_DBUG(0, "dmfe_set_filter_mode()", 0);
1066 spin_lock_irqsave(&db->lock, flags);
1068 if (dev->flags & IFF_PROMISC) {
1069 DMFE_DBUG(0, "Enable PROM Mode", 0);
1070 db->cr6_data |= CR6_PM | CR6_PBF;
1071 update_cr6(db->cr6_data, db->ioaddr);
1072 spin_unlock_irqrestore(&db->lock, flags);
1076 if (dev->flags & IFF_ALLMULTI || mc_count > DMFE_MAX_MULTICAST) {
1077 DMFE_DBUG(0, "Pass all multicast address", mc_count);
1078 db->cr6_data &= ~(CR6_PM | CR6_PBF);
1079 db->cr6_data |= CR6_PAM;
1080 spin_unlock_irqrestore(&db->lock, flags);
1084 DMFE_DBUG(0, "Set multicast address", mc_count);
1085 if (db->chip_id == PCI_DM9132_ID)
1086 dm9132_id_table(dev); /* DM9132 */
1088 send_filter_frame(dev); /* DM9102/DM9102A */
1089 spin_unlock_irqrestore(&db->lock, flags);
1096 static void dmfe_ethtool_get_drvinfo(struct net_device *dev,
1097 struct ethtool_drvinfo *info)
1099 struct dmfe_board_info *np = netdev_priv(dev);
1101 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1102 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1103 strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
1106 static int dmfe_ethtool_set_wol(struct net_device *dev,
1107 struct ethtool_wolinfo *wolinfo)
1109 struct dmfe_board_info *db = netdev_priv(dev);
1111 if (wolinfo->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST |
1112 WAKE_ARP | WAKE_MAGICSECURE))
1115 db->wol_mode = wolinfo->wolopts;
1119 static void dmfe_ethtool_get_wol(struct net_device *dev,
1120 struct ethtool_wolinfo *wolinfo)
1122 struct dmfe_board_info *db = netdev_priv(dev);
1124 wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
1125 wolinfo->wolopts = db->wol_mode;
1129 static const struct ethtool_ops netdev_ethtool_ops = {
1130 .get_drvinfo = dmfe_ethtool_get_drvinfo,
1131 .get_link = ethtool_op_get_link,
1132 .set_wol = dmfe_ethtool_set_wol,
1133 .get_wol = dmfe_ethtool_get_wol,
1137 * A periodic timer routine
1138 * Dynamic media sense, allocate Rx buffer...
1141 static void dmfe_timer(unsigned long data)
1143 struct net_device *dev = (struct net_device *)data;
1144 struct dmfe_board_info *db = netdev_priv(dev);
1145 void __iomem *ioaddr = db->ioaddr;
1147 unsigned char tmp_cr12;
1148 unsigned long flags;
1150 int link_ok, link_ok_phy;
1152 DMFE_DBUG(0, "dmfe_timer()", 0);
1153 spin_lock_irqsave(&db->lock, flags);
1155 /* Media mode process when Link OK before enter this route */
1156 if (db->first_in_callback == 0) {
1157 db->first_in_callback = 1;
1158 if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) {
1159 db->cr6_data &= ~0x40000;
1160 update_cr6(db->cr6_data, ioaddr);
1161 phy_write(ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
1162 db->cr6_data |= 0x40000;
1163 update_cr6(db->cr6_data, ioaddr);
1164 db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
1165 add_timer(&db->timer);
1166 spin_unlock_irqrestore(&db->lock, flags);
1172 /* Operating Mode Check */
1173 if ( (db->dm910x_chk_mode & 0x1) &&
1174 (dev->stats.rx_packets > MAX_CHECK_PACKET) )
1175 db->dm910x_chk_mode = 0x4;
1177 /* Dynamic reset DM910X : system error or transmit time-out */
1178 tmp_cr8 = dr32(DCR8);
1179 if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
1183 db->interval_rx_cnt = 0;
1185 /* TX polling kick monitor */
1186 if ( db->tx_packet_cnt &&
1187 time_after(jiffies, dev_trans_start(dev) + DMFE_TX_KICK) ) {
1188 dw32(DCR1, 0x1); /* Tx polling again */
1191 if (time_after(jiffies, dev_trans_start(dev) + DMFE_TX_TIMEOUT) ) {
1192 db->reset_TXtimeout++;
1194 dev_warn(&dev->dev, "Tx timeout - resetting\n");
1198 if (db->wait_reset) {
1199 DMFE_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt);
1201 dmfe_dynamic_reset(dev);
1202 db->first_in_callback = 0;
1203 db->timer.expires = DMFE_TIMER_WUT;
1204 add_timer(&db->timer);
1205 spin_unlock_irqrestore(&db->lock, flags);
1209 /* Link status check, Dynamic media type change */
1210 if (db->chip_id == PCI_DM9132_ID)
1211 tmp_cr12 = dr8(DCR9 + 3); /* DM9132 */
1213 tmp_cr12 = dr8(DCR12); /* DM9102/DM9102A */
1215 if ( ((db->chip_id == PCI_DM9102_ID) &&
1216 (db->chip_revision == 0x30)) ||
1217 ((db->chip_id == PCI_DM9132_ID) &&
1218 (db->chip_revision == 0x10)) ) {
1226 /*0x43 is used instead of 0x3 because bit 6 should represent
1227 link status of external PHY */
1228 link_ok = (tmp_cr12 & 0x43) ? 1 : 0;
1231 /* If chip reports that link is failed it could be because external
1232 PHY link status pin is not connected correctly to chip
1233 To be sure ask PHY too.
1236 /* need a dummy read because of PHY's register latch*/
1237 phy_read (db->ioaddr, db->phy_addr, 1, db->chip_id);
1238 link_ok_phy = (phy_read (db->ioaddr,
1239 db->phy_addr, 1, db->chip_id) & 0x4) ? 1 : 0;
1241 if (link_ok_phy != link_ok) {
1242 DMFE_DBUG (0, "PHY and chip report different link status", 0);
1243 link_ok = link_ok | link_ok_phy;
1246 if ( !link_ok && netif_carrier_ok(dev)) {
1248 DMFE_DBUG(0, "Link Failed", tmp_cr12);
1249 netif_carrier_off(dev);
1251 /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
1252 /* AUTO or force 1M Homerun/Longrun don't need */
1253 if ( !(db->media_mode & 0x38) )
1254 phy_write(db->ioaddr, db->phy_addr,
1255 0, 0x1000, db->chip_id);
1257 /* AUTO mode, if INT phyxcer link failed, select EXT device */
1258 if (db->media_mode & DMFE_AUTO) {
1259 /* 10/100M link failed, used 1M Home-Net */
1260 db->cr6_data|=0x00040000; /* bit18=1, MII */
1261 db->cr6_data&=~0x00000200; /* bit9=0, HD mode */
1262 update_cr6(db->cr6_data, ioaddr);
1264 } else if (!netif_carrier_ok(dev)) {
1266 DMFE_DBUG(0, "Link link OK", tmp_cr12);
1268 /* Auto Sense Speed */
1269 if ( !(db->media_mode & DMFE_AUTO) || !dmfe_sense_speed(db)) {
1270 netif_carrier_on(dev);
1271 SHOW_MEDIA_TYPE(db->op_mode);
1274 dmfe_process_mode(db);
1277 /* HPNA remote command check */
1278 if (db->HPNA_command & 0xf00) {
1280 if (!db->HPNA_timer)
1281 dmfe_HPNA_remote_cmd_chk(db);
1284 /* Timer active again */
1285 db->timer.expires = DMFE_TIMER_WUT;
1286 add_timer(&db->timer);
1287 spin_unlock_irqrestore(&db->lock, flags);
1292 * Dynamic reset the DM910X board
1294 * Free Tx/Rx allocated memory
1295 * Reset DM910X board
1296 * Re-initialize DM910X board
1299 static void dmfe_dynamic_reset(struct net_device *dev)
1301 struct dmfe_board_info *db = netdev_priv(dev);
1302 void __iomem *ioaddr = db->ioaddr;
1304 DMFE_DBUG(0, "dmfe_dynamic_reset()", 0);
1306 /* Sopt MAC controller */
1307 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */
1308 update_cr6(db->cr6_data, ioaddr);
1309 dw32(DCR7, 0); /* Disable Interrupt */
1310 dw32(DCR5, dr32(DCR5));
1312 /* Disable upper layer interface */
1313 netif_stop_queue(dev);
1315 /* Free Rx Allocate buffer */
1316 dmfe_free_rxbuffer(db);
1318 /* system variable init */
1319 db->tx_packet_cnt = 0;
1320 db->tx_queue_cnt = 0;
1321 db->rx_avail_cnt = 0;
1322 netif_carrier_off(dev);
1325 /* Re-initialize DM910X board */
1326 dmfe_init_dm910x(dev);
1328 /* Restart upper layer interface */
1329 netif_wake_queue(dev);
1334 * free all allocated rx buffer
1337 static void dmfe_free_rxbuffer(struct dmfe_board_info * db)
1339 DMFE_DBUG(0, "dmfe_free_rxbuffer()", 0);
1341 /* free allocated rx buffer */
1342 while (db->rx_avail_cnt) {
1343 dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr);
1344 db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc;
1351 * Reuse the SK buffer
1354 static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb)
1356 struct rx_desc *rxptr = db->rx_insert_ptr;
1358 if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
1359 rxptr->rx_skb_ptr = skb;
1360 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev,
1361 skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1363 rxptr->rdes0 = cpu_to_le32(0x80000000);
1365 db->rx_insert_ptr = rxptr->next_rx_desc;
1367 DMFE_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt);
1372 * Initialize transmit/Receive descriptor
1373 * Using Chain structure, and allocate Tx/Rx buffer
1376 static void dmfe_descriptor_init(struct net_device *dev)
1378 struct dmfe_board_info *db = netdev_priv(dev);
1379 void __iomem *ioaddr = db->ioaddr;
1380 struct tx_desc *tmp_tx;
1381 struct rx_desc *tmp_rx;
1382 unsigned char *tmp_buf;
1383 dma_addr_t tmp_tx_dma, tmp_rx_dma;
1384 dma_addr_t tmp_buf_dma;
1387 DMFE_DBUG(0, "dmfe_descriptor_init()", 0);
1389 /* tx descriptor start pointer */
1390 db->tx_insert_ptr = db->first_tx_desc;
1391 db->tx_remove_ptr = db->first_tx_desc;
1392 dw32(DCR4, db->first_tx_desc_dma); /* TX DESC address */
1394 /* rx descriptor start pointer */
1395 db->first_rx_desc = (void *)db->first_tx_desc +
1396 sizeof(struct tx_desc) * TX_DESC_CNT;
1398 db->first_rx_desc_dma = db->first_tx_desc_dma +
1399 sizeof(struct tx_desc) * TX_DESC_CNT;
1400 db->rx_insert_ptr = db->first_rx_desc;
1401 db->rx_ready_ptr = db->first_rx_desc;
1402 dw32(DCR3, db->first_rx_desc_dma); /* RX DESC address */
1404 /* Init Transmit chain */
1405 tmp_buf = db->buf_pool_start;
1406 tmp_buf_dma = db->buf_pool_dma_start;
1407 tmp_tx_dma = db->first_tx_desc_dma;
1408 for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) {
1409 tmp_tx->tx_buf_ptr = tmp_buf;
1410 tmp_tx->tdes0 = cpu_to_le32(0);
1411 tmp_tx->tdes1 = cpu_to_le32(0x81000000); /* IC, chain */
1412 tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma);
1413 tmp_tx_dma += sizeof(struct tx_desc);
1414 tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma);
1415 tmp_tx->next_tx_desc = tmp_tx + 1;
1416 tmp_buf = tmp_buf + TX_BUF_ALLOC;
1417 tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC;
1419 (--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma);
1420 tmp_tx->next_tx_desc = db->first_tx_desc;
1422 /* Init Receive descriptor chain */
1423 tmp_rx_dma=db->first_rx_desc_dma;
1424 for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) {
1425 tmp_rx->rdes0 = cpu_to_le32(0);
1426 tmp_rx->rdes1 = cpu_to_le32(0x01000600);
1427 tmp_rx_dma += sizeof(struct rx_desc);
1428 tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma);
1429 tmp_rx->next_rx_desc = tmp_rx + 1;
1431 (--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma);
1432 tmp_rx->next_rx_desc = db->first_rx_desc;
1434 /* pre-allocate Rx buffer */
1435 allocate_rx_buffer(dev);
1441 * Firstly stop DM910X , then written value and start
1444 static void update_cr6(u32 cr6_data, void __iomem *ioaddr)
1448 cr6_tmp = cr6_data & ~0x2002; /* stop Tx/Rx */
1449 dw32(DCR6, cr6_tmp);
1451 dw32(DCR6, cr6_data);
1457 * Send a setup frame for DM9132
1458 * This setup frame initialize DM910X address filter mode
1461 static void dm9132_id_table(struct net_device *dev)
1463 struct dmfe_board_info *db = netdev_priv(dev);
1464 void __iomem *ioaddr = db->ioaddr + 0xc0;
1465 u16 *addrptr = (u16 *)dev->dev_addr;
1466 struct netdev_hw_addr *ha;
1467 u16 i, hash_table[4];
1470 for (i = 0; i < 3; i++) {
1471 dw16(0, addrptr[i]);
1475 /* Clear Hash Table */
1476 memset(hash_table, 0, sizeof(hash_table));
1478 /* broadcast address */
1479 hash_table[3] = 0x8000;
1481 /* the multicast address in Hash Table : 64 bits */
1482 netdev_for_each_mc_addr(ha, dev) {
1483 u32 hash_val = cal_CRC((char *)ha->addr, 6, 0) & 0x3f;
1485 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
1488 /* Write the hash table to MAC MD table */
1489 for (i = 0; i < 4; i++, ioaddr += 4)
1490 dw16(0, hash_table[i]);
1495 * Send a setup frame for DM9102/DM9102A
1496 * This setup frame initialize DM910X address filter mode
1499 static void send_filter_frame(struct net_device *dev)
1501 struct dmfe_board_info *db = netdev_priv(dev);
1502 struct netdev_hw_addr *ha;
1503 struct tx_desc *txptr;
1508 DMFE_DBUG(0, "send_filter_frame()", 0);
1510 txptr = db->tx_insert_ptr;
1511 suptr = (u32 *) txptr->tx_buf_ptr;
1514 addrptr = (u16 *) dev->dev_addr;
1515 *suptr++ = addrptr[0];
1516 *suptr++ = addrptr[1];
1517 *suptr++ = addrptr[2];
1519 /* broadcast address */
1524 /* fit the multicast address */
1525 netdev_for_each_mc_addr(ha, dev) {
1526 addrptr = (u16 *) ha->addr;
1527 *suptr++ = addrptr[0];
1528 *suptr++ = addrptr[1];
1529 *suptr++ = addrptr[2];
1532 for (i = netdev_mc_count(dev); i < 14; i++) {
1538 /* prepare the setup frame */
1539 db->tx_insert_ptr = txptr->next_tx_desc;
1540 txptr->tdes1 = cpu_to_le32(0x890000c0);
1542 /* Resource Check and Send the setup packet */
1543 if (!db->tx_packet_cnt) {
1544 void __iomem *ioaddr = db->ioaddr;
1546 /* Resource Empty */
1547 db->tx_packet_cnt++;
1548 txptr->tdes0 = cpu_to_le32(0x80000000);
1549 update_cr6(db->cr6_data | 0x2000, ioaddr);
1550 dw32(DCR1, 0x1); /* Issue Tx polling */
1551 update_cr6(db->cr6_data, ioaddr);
1552 dev->trans_start = jiffies;
1554 db->tx_queue_cnt++; /* Put in TX queue */
1559 * Allocate rx buffer,
1560 * As possible as allocate maxiumn Rx buffer
1563 static void allocate_rx_buffer(struct net_device *dev)
1565 struct dmfe_board_info *db = netdev_priv(dev);
1566 struct rx_desc *rxptr;
1567 struct sk_buff *skb;
1569 rxptr = db->rx_insert_ptr;
1571 while(db->rx_avail_cnt < RX_DESC_CNT) {
1572 if ( ( skb = netdev_alloc_skb(dev, RX_ALLOC_SIZE) ) == NULL )
1574 rxptr->rx_skb_ptr = skb; /* FIXME (?) */
1575 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data,
1576 RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1578 rxptr->rdes0 = cpu_to_le32(0x80000000);
1579 rxptr = rxptr->next_rx_desc;
1583 db->rx_insert_ptr = rxptr;
1586 static void srom_clk_write(void __iomem *ioaddr, u32 data)
1588 static const u32 cmd[] = {
1589 CR9_SROM_READ | CR9_SRCS,
1590 CR9_SROM_READ | CR9_SRCS | CR9_SRCLK,
1591 CR9_SROM_READ | CR9_SRCS
1595 for (i = 0; i < ARRAY_SIZE(cmd); i++) {
1596 dw32(DCR9, data | cmd[i]);
1602 * Read one word data from the serial ROM
1604 static u16 read_srom_word(void __iomem *ioaddr, int offset)
1609 dw32(DCR9, CR9_SROM_READ);
1611 dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1614 /* Send the Read Command 110b */
1615 srom_clk_write(ioaddr, SROM_DATA_1);
1616 srom_clk_write(ioaddr, SROM_DATA_1);
1617 srom_clk_write(ioaddr, SROM_DATA_0);
1619 /* Send the offset */
1620 for (i = 5; i >= 0; i--) {
1621 srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
1622 srom_clk_write(ioaddr, srom_data);
1625 dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1628 for (i = 16; i > 0; i--) {
1629 dw32(DCR9, CR9_SROM_READ | CR9_SRCS | CR9_SRCLK);
1631 srom_data = (srom_data << 1) |
1632 ((dr32(DCR9) & CR9_CRDOUT) ? 1 : 0);
1633 dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1637 dw32(DCR9, CR9_SROM_READ);
1644 * Auto sense the media mode
1647 static u8 dmfe_sense_speed(struct dmfe_board_info *db)
1649 void __iomem *ioaddr = db->ioaddr;
1653 /* CR6 bit18=0, select 10/100M */
1654 update_cr6(db->cr6_data & ~0x40000, ioaddr);
1656 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1657 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1659 if ( (phy_mode & 0x24) == 0x24 ) {
1660 if (db->chip_id == PCI_DM9132_ID) /* DM9132 */
1661 phy_mode = phy_read(db->ioaddr,
1662 db->phy_addr, 7, db->chip_id) & 0xf000;
1663 else /* DM9102/DM9102A */
1664 phy_mode = phy_read(db->ioaddr,
1665 db->phy_addr, 17, db->chip_id) & 0xf000;
1667 case 0x1000: db->op_mode = DMFE_10MHF; break;
1668 case 0x2000: db->op_mode = DMFE_10MFD; break;
1669 case 0x4000: db->op_mode = DMFE_100MHF; break;
1670 case 0x8000: db->op_mode = DMFE_100MFD; break;
1671 default: db->op_mode = DMFE_10MHF;
1676 db->op_mode = DMFE_10MHF;
1677 DMFE_DBUG(0, "Link Failed :", phy_mode);
1686 * Set 10/100 phyxcer capability
1687 * AUTO mode : phyxcer register4 is NIC capability
1688 * Force mode: phyxcer register4 is the force media
1691 static void dmfe_set_phyxcer(struct dmfe_board_info *db)
1693 void __iomem *ioaddr = db->ioaddr;
1696 /* Select 10/100M phyxcer */
1697 db->cr6_data &= ~0x40000;
1698 update_cr6(db->cr6_data, ioaddr);
1700 /* DM9009 Chip: Phyxcer reg18 bit12=0 */
1701 if (db->chip_id == PCI_DM9009_ID) {
1702 phy_reg = phy_read(db->ioaddr,
1703 db->phy_addr, 18, db->chip_id) & ~0x1000;
1705 phy_write(db->ioaddr,
1706 db->phy_addr, 18, phy_reg, db->chip_id);
1709 /* Phyxcer capability setting */
1710 phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
1712 if (db->media_mode & DMFE_AUTO) {
1714 phy_reg |= db->PHY_reg4;
1717 switch(db->media_mode) {
1718 case DMFE_10MHF: phy_reg |= 0x20; break;
1719 case DMFE_10MFD: phy_reg |= 0x40; break;
1720 case DMFE_100MHF: phy_reg |= 0x80; break;
1721 case DMFE_100MFD: phy_reg |= 0x100; break;
1723 if (db->chip_id == PCI_DM9009_ID) phy_reg &= 0x61;
1726 /* Write new capability to Phyxcer Reg4 */
1727 if ( !(phy_reg & 0x01e0)) {
1728 phy_reg|=db->PHY_reg4;
1729 db->media_mode|=DMFE_AUTO;
1731 phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
1733 /* Restart Auto-Negotiation */
1734 if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1735 phy_write(db->ioaddr, db->phy_addr, 0, 0x1800, db->chip_id);
1736 if ( !db->chip_type )
1737 phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id);
1743 * AUTO mode : PHY controller in Auto-negotiation Mode
1744 * Force mode: PHY controller in force mode with HUB
1745 * N-way force capability with SWITCH
1748 static void dmfe_process_mode(struct dmfe_board_info *db)
1752 /* Full Duplex Mode Check */
1753 if (db->op_mode & 0x4)
1754 db->cr6_data |= CR6_FDM; /* Set Full Duplex Bit */
1756 db->cr6_data &= ~CR6_FDM; /* Clear Full Duplex Bit */
1758 /* Transciver Selection */
1759 if (db->op_mode & 0x10) /* 1M HomePNA */
1760 db->cr6_data |= 0x40000;/* External MII select */
1762 db->cr6_data &= ~0x40000;/* Internal 10/100 transciver */
1764 update_cr6(db->cr6_data, db->ioaddr);
1766 /* 10/100M phyxcer force mode need */
1767 if ( !(db->media_mode & 0x18)) {
1769 phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id);
1770 if ( !(phy_reg & 0x1) ) {
1771 /* parter without N-Way capability */
1773 switch(db->op_mode) {
1774 case DMFE_10MHF: phy_reg = 0x0; break;
1775 case DMFE_10MFD: phy_reg = 0x100; break;
1776 case DMFE_100MHF: phy_reg = 0x2000; break;
1777 case DMFE_100MFD: phy_reg = 0x2100; break;
1779 phy_write(db->ioaddr,
1780 db->phy_addr, 0, phy_reg, db->chip_id);
1781 if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1783 phy_write(db->ioaddr,
1784 db->phy_addr, 0, phy_reg, db->chip_id);
1791 * Write a word to Phy register
1794 static void phy_write(void __iomem *ioaddr, u8 phy_addr, u8 offset,
1795 u16 phy_data, u32 chip_id)
1799 if (chip_id == PCI_DM9132_ID) {
1800 dw16(0x80 + offset * 4, phy_data);
1802 /* DM9102/DM9102A Chip */
1804 /* Send 33 synchronization clock to Phy controller */
1805 for (i = 0; i < 35; i++)
1806 phy_write_1bit(ioaddr, PHY_DATA_1);
1808 /* Send start command(01) to Phy */
1809 phy_write_1bit(ioaddr, PHY_DATA_0);
1810 phy_write_1bit(ioaddr, PHY_DATA_1);
1812 /* Send write command(01) to Phy */
1813 phy_write_1bit(ioaddr, PHY_DATA_0);
1814 phy_write_1bit(ioaddr, PHY_DATA_1);
1816 /* Send Phy address */
1817 for (i = 0x10; i > 0; i = i >> 1)
1818 phy_write_1bit(ioaddr,
1819 phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1821 /* Send register address */
1822 for (i = 0x10; i > 0; i = i >> 1)
1823 phy_write_1bit(ioaddr,
1824 offset & i ? PHY_DATA_1 : PHY_DATA_0);
1826 /* written trasnition */
1827 phy_write_1bit(ioaddr, PHY_DATA_1);
1828 phy_write_1bit(ioaddr, PHY_DATA_0);
1830 /* Write a word data to PHY controller */
1831 for ( i = 0x8000; i > 0; i >>= 1)
1832 phy_write_1bit(ioaddr,
1833 phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
1839 * Read a word data from phy register
1842 static u16 phy_read(void __iomem *ioaddr, u8 phy_addr, u8 offset, u32 chip_id)
1847 if (chip_id == PCI_DM9132_ID) {
1849 phy_data = dr16(0x80 + offset * 4);
1851 /* DM9102/DM9102A Chip */
1853 /* Send 33 synchronization clock to Phy controller */
1854 for (i = 0; i < 35; i++)
1855 phy_write_1bit(ioaddr, PHY_DATA_1);
1857 /* Send start command(01) to Phy */
1858 phy_write_1bit(ioaddr, PHY_DATA_0);
1859 phy_write_1bit(ioaddr, PHY_DATA_1);
1861 /* Send read command(10) to Phy */
1862 phy_write_1bit(ioaddr, PHY_DATA_1);
1863 phy_write_1bit(ioaddr, PHY_DATA_0);
1865 /* Send Phy address */
1866 for (i = 0x10; i > 0; i = i >> 1)
1867 phy_write_1bit(ioaddr,
1868 phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1870 /* Send register address */
1871 for (i = 0x10; i > 0; i = i >> 1)
1872 phy_write_1bit(ioaddr,
1873 offset & i ? PHY_DATA_1 : PHY_DATA_0);
1875 /* Skip transition state */
1876 phy_read_1bit(ioaddr);
1878 /* read 16bit data */
1879 for (phy_data = 0, i = 0; i < 16; i++) {
1881 phy_data |= phy_read_1bit(ioaddr);
1890 * Write one bit data to Phy Controller
1893 static void phy_write_1bit(void __iomem *ioaddr, u32 phy_data)
1895 dw32(DCR9, phy_data); /* MII Clock Low */
1897 dw32(DCR9, phy_data | MDCLKH); /* MII Clock High */
1899 dw32(DCR9, phy_data); /* MII Clock Low */
1905 * Read one bit phy data from PHY controller
1908 static u16 phy_read_1bit(void __iomem *ioaddr)
1912 dw32(DCR9, 0x50000);
1914 phy_data = (dr32(DCR9) >> 19) & 0x1;
1915 dw32(DCR9, 0x40000);
1923 * Parser SROM and media mode
1926 static void dmfe_parse_srom(struct dmfe_board_info * db)
1928 char * srom = db->srom;
1929 int dmfe_mode, tmp_reg;
1931 DMFE_DBUG(0, "dmfe_parse_srom() ", 0);
1934 db->cr15_data = CR15_DEFAULT;
1936 /* Check SROM Version */
1937 if ( ( (int) srom[18] & 0xff) == SROM_V41_CODE) {
1939 /* Get NIC support media mode */
1940 db->NIC_capability = le16_to_cpup((__le16 *) (srom + 34));
1942 for (tmp_reg = 1; tmp_reg < 0x10; tmp_reg <<= 1) {
1943 switch( db->NIC_capability & tmp_reg ) {
1944 case 0x1: db->PHY_reg4 |= 0x0020; break;
1945 case 0x2: db->PHY_reg4 |= 0x0040; break;
1946 case 0x4: db->PHY_reg4 |= 0x0080; break;
1947 case 0x8: db->PHY_reg4 |= 0x0100; break;
1951 /* Media Mode Force or not check */
1952 dmfe_mode = (le32_to_cpup((__le32 *) (srom + 34)) &
1953 le32_to_cpup((__le32 *) (srom + 36)));
1955 case 0x4: dmfe_media_mode = DMFE_100MHF; break; /* 100MHF */
1956 case 0x2: dmfe_media_mode = DMFE_10MFD; break; /* 10MFD */
1957 case 0x8: dmfe_media_mode = DMFE_100MFD; break; /* 100MFD */
1959 case 0x200: dmfe_media_mode = DMFE_1M_HPNA; break;/* HomePNA */
1962 /* Special Function setting */
1964 if ( (SF_mode & 0x1) || (srom[43] & 0x80) )
1965 db->cr15_data |= 0x40;
1968 if ( (SF_mode & 0x2) || (srom[40] & 0x1) )
1969 db->cr15_data |= 0x400;
1971 /* TX pause packet */
1972 if ( (SF_mode & 0x4) || (srom[40] & 0xe) )
1973 db->cr15_data |= 0x9800;
1976 /* Parse HPNA parameter */
1977 db->HPNA_command = 1;
1979 /* Accept remote command or not */
1980 if (HPNA_rx_cmd == 0)
1981 db->HPNA_command |= 0x8000;
1983 /* Issue remote command & operation mode */
1984 if (HPNA_tx_cmd == 1)
1985 switch(HPNA_mode) { /* Issue Remote Command */
1986 case 0: db->HPNA_command |= 0x0904; break;
1987 case 1: db->HPNA_command |= 0x0a00; break;
1988 case 2: db->HPNA_command |= 0x0506; break;
1989 case 3: db->HPNA_command |= 0x0602; break;
1992 switch(HPNA_mode) { /* Don't Issue */
1993 case 0: db->HPNA_command |= 0x0004; break;
1994 case 1: db->HPNA_command |= 0x0000; break;
1995 case 2: db->HPNA_command |= 0x0006; break;
1996 case 3: db->HPNA_command |= 0x0002; break;
1999 /* Check DM9801 or DM9802 present or not */
2000 db->HPNA_present = 0;
2001 update_cr6(db->cr6_data | 0x40000, db->ioaddr);
2002 tmp_reg = phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id);
2003 if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) {
2004 /* DM9801 or DM9802 present */
2006 if ( phy_read(db->ioaddr, db->phy_addr, 31, db->chip_id) == 0x4404) {
2007 /* DM9801 HomeRun */
2008 db->HPNA_present = 1;
2009 dmfe_program_DM9801(db, tmp_reg);
2011 /* DM9802 LongRun */
2012 db->HPNA_present = 2;
2013 dmfe_program_DM9802(db);
2021 * Init HomeRun DM9801
2024 static void dmfe_program_DM9801(struct dmfe_board_info * db, int HPNA_rev)
2028 if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9801_NOISE_FLOOR;
2030 case 0xb900: /* DM9801 E3 */
2031 db->HPNA_command |= 0x1000;
2032 reg25 = phy_read(db->ioaddr, db->phy_addr, 24, db->chip_id);
2033 reg25 = ( (reg25 + HPNA_NoiseFloor) & 0xff) | 0xf000;
2034 reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2036 case 0xb901: /* DM9801 E4 */
2037 reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2038 reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor;
2039 reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2040 reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor + 3;
2042 case 0xb902: /* DM9801 E5 */
2043 case 0xb903: /* DM9801 E6 */
2045 db->HPNA_command |= 0x1000;
2046 reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2047 reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor - 5;
2048 reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2049 reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor;
2052 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
2053 phy_write(db->ioaddr, db->phy_addr, 17, reg17, db->chip_id);
2054 phy_write(db->ioaddr, db->phy_addr, 25, reg25, db->chip_id);
2059 * Init HomeRun DM9802
2062 static void dmfe_program_DM9802(struct dmfe_board_info * db)
2066 if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9802_NOISE_FLOOR;
2067 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
2068 phy_reg = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2069 phy_reg = ( phy_reg & 0xff00) + HPNA_NoiseFloor;
2070 phy_write(db->ioaddr, db->phy_addr, 25, phy_reg, db->chip_id);
2075 * Check remote HPNA power and speed status. If not correct,
2076 * issue command again.
2079 static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db)
2083 /* Got remote device status */
2084 phy_reg = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0x60;
2086 case 0x00: phy_reg = 0x0a00;break; /* LP/LS */
2087 case 0x20: phy_reg = 0x0900;break; /* LP/HS */
2088 case 0x40: phy_reg = 0x0600;break; /* HP/LS */
2089 case 0x60: phy_reg = 0x0500;break; /* HP/HS */
2092 /* Check remote device status match our setting ot not */
2093 if ( phy_reg != (db->HPNA_command & 0x0f00) ) {
2094 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command,
2098 db->HPNA_timer=600; /* Match, every 10 minutes, check */
2103 static DEFINE_PCI_DEVICE_TABLE(dmfe_pci_tbl) = {
2104 { 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID },
2105 { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID },
2106 { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID },
2107 { 0x1282, 0x9009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9009_ID },
2110 MODULE_DEVICE_TABLE(pci, dmfe_pci_tbl);
2114 static int dmfe_suspend(struct pci_dev *pci_dev, pm_message_t state)
2116 struct net_device *dev = pci_get_drvdata(pci_dev);
2117 struct dmfe_board_info *db = netdev_priv(dev);
2118 void __iomem *ioaddr = db->ioaddr;
2121 /* Disable upper layer interface */
2122 netif_device_detach(dev);
2125 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);
2126 update_cr6(db->cr6_data, ioaddr);
2128 /* Disable Interrupt */
2130 dw32(DCR5, dr32(DCR5));
2132 /* Fre RX buffers */
2133 dmfe_free_rxbuffer(db);
2136 pci_read_config_dword(pci_dev, 0x40, &tmp);
2137 tmp &= ~(DMFE_WOL_LINKCHANGE|DMFE_WOL_MAGICPACKET);
2139 if (db->wol_mode & WAKE_PHY)
2140 tmp |= DMFE_WOL_LINKCHANGE;
2141 if (db->wol_mode & WAKE_MAGIC)
2142 tmp |= DMFE_WOL_MAGICPACKET;
2144 pci_write_config_dword(pci_dev, 0x40, tmp);
2146 pci_enable_wake(pci_dev, PCI_D3hot, 1);
2147 pci_enable_wake(pci_dev, PCI_D3cold, 1);
2149 /* Power down device*/
2150 pci_save_state(pci_dev);
2151 pci_set_power_state(pci_dev, pci_choose_state (pci_dev, state));
2156 static int dmfe_resume(struct pci_dev *pci_dev)
2158 struct net_device *dev = pci_get_drvdata(pci_dev);
2161 pci_set_power_state(pci_dev, PCI_D0);
2162 pci_restore_state(pci_dev);
2164 /* Re-initialize DM910X board */
2165 dmfe_init_dm910x(dev);
2168 pci_read_config_dword(pci_dev, 0x40, &tmp);
2170 tmp &= ~(DMFE_WOL_LINKCHANGE | DMFE_WOL_MAGICPACKET);
2171 pci_write_config_dword(pci_dev, 0x40, tmp);
2173 pci_enable_wake(pci_dev, PCI_D3hot, 0);
2174 pci_enable_wake(pci_dev, PCI_D3cold, 0);
2176 /* Restart upper layer interface */
2177 netif_device_attach(dev);
2182 #define dmfe_suspend NULL
2183 #define dmfe_resume NULL
2186 static struct pci_driver dmfe_driver = {
2188 .id_table = dmfe_pci_tbl,
2189 .probe = dmfe_init_one,
2190 .remove = __devexit_p(dmfe_remove_one),
2191 .suspend = dmfe_suspend,
2192 .resume = dmfe_resume
2195 MODULE_AUTHOR("Sten Wang, sten_wang@davicom.com.tw");
2196 MODULE_DESCRIPTION("Davicom DM910X fast ethernet driver");
2197 MODULE_LICENSE("GPL");
2198 MODULE_VERSION(DRV_VERSION);
2200 module_param(debug, int, 0);
2201 module_param(mode, byte, 0);
2202 module_param(cr6set, int, 0);
2203 module_param(chkmode, byte, 0);
2204 module_param(HPNA_mode, byte, 0);
2205 module_param(HPNA_rx_cmd, byte, 0);
2206 module_param(HPNA_tx_cmd, byte, 0);
2207 module_param(HPNA_NoiseFloor, byte, 0);
2208 module_param(SF_mode, byte, 0);
2209 MODULE_PARM_DESC(debug, "Davicom DM9xxx enable debugging (0-1)");
2210 MODULE_PARM_DESC(mode, "Davicom DM9xxx: "
2211 "Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
2213 MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function "
2214 "(bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)");
2217 * when user used insmod to add module, system invoked init_module()
2218 * to initialize and register.
2221 static int __init dmfe_init_module(void)
2225 pr_info("%s\n", version);
2226 printed_version = 1;
2228 DMFE_DBUG(0, "init_module() ", debug);
2231 dmfe_debug = debug; /* set debug flag */
2233 dmfe_cr6_user_set = cr6set;
2241 dmfe_media_mode = mode;
2243 default:dmfe_media_mode = DMFE_AUTO;
2248 HPNA_mode = 0; /* Default: LP/HS */
2249 if (HPNA_rx_cmd > 1)
2250 HPNA_rx_cmd = 0; /* Default: Ignored remote cmd */
2251 if (HPNA_tx_cmd > 1)
2252 HPNA_tx_cmd = 0; /* Default: Don't issue remote cmd */
2253 if (HPNA_NoiseFloor > 15)
2254 HPNA_NoiseFloor = 0;
2256 rc = pci_register_driver(&dmfe_driver);
2266 * when user used rmmod to delete module, system invoked clean_module()
2267 * to un-register all registered services.
2270 static void __exit dmfe_cleanup_module(void)
2272 DMFE_DBUG(0, "dmfe_clean_module() ", debug);
2273 pci_unregister_driver(&dmfe_driver);
2276 module_init(dmfe_init_module);
2277 module_exit(dmfe_cleanup_module);