2 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
3 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
5 * Right now, I am very wasteful with the buffers. I allocate memory
6 * pages and then divide them into 2K frame buffers. This way I know I
7 * have buffers large enough to hold one frame within one buffer descriptor.
8 * Once I get this working, I will use 64 or 128 byte CPM buffers, which
9 * will be much more memory efficient and will easily handle lots of
12 * Much better multiple PHY support by Magnus Damm.
13 * Copyright (c) 2000 Ericsson Radio Systems AB.
15 * Support for FEC controller of ColdFire processors.
16 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com)
18 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
19 * Copyright (c) 2004-2006 Macq Electronique SA.
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/string.h>
25 #include <linux/ptrace.h>
26 #include <linux/errno.h>
27 #include <linux/ioport.h>
28 #include <linux/slab.h>
29 #include <linux/interrupt.h>
30 #include <linux/pci.h>
31 #include <linux/init.h>
32 #include <linux/delay.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/skbuff.h>
36 #include <linux/spinlock.h>
37 #include <linux/workqueue.h>
38 #include <linux/bitops.h>
40 #include <linux/irq.h>
41 #include <linux/clk.h>
42 #include <linux/platform_device.h>
43 #include <linux/phy.h>
44 #include <linux/fec.h>
46 #include <asm/cacheflush.h>
48 #ifndef CONFIG_ARCH_MXC
49 #include <asm/coldfire.h>
50 #include <asm/mcfsim.h>
55 #ifdef CONFIG_ARCH_MXC
56 #include <mach/hardware.h>
57 #define FEC_ALIGNMENT 0xf
59 #define FEC_ALIGNMENT 0x3
63 * Define the fixed address of the FEC hardware.
65 #if defined(CONFIG_M5272)
67 static unsigned char fec_mac_default[] = {
68 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
72 * Some hardware gets it MAC address out of local flash memory.
73 * if this is non-zero then assume it is the address to get MAC from.
75 #if defined(CONFIG_NETtel)
76 #define FEC_FLASHMAC 0xf0006006
77 #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
78 #define FEC_FLASHMAC 0xf0006000
79 #elif defined(CONFIG_CANCam)
80 #define FEC_FLASHMAC 0xf0020000
81 #elif defined (CONFIG_M5272C3)
82 #define FEC_FLASHMAC (0xffe04000 + 4)
83 #elif defined(CONFIG_MOD5272)
84 #define FEC_FLASHMAC 0xffc0406b
86 #define FEC_FLASHMAC 0
88 #endif /* CONFIG_M5272 */
90 /* The number of Tx and Rx buffers. These are allocated from the page
91 * pool. The code may assume these are power of two, so it it best
92 * to keep them that size.
93 * We don't need to allocate pages for the transmitter. We just use
94 * the skbuffer directly.
96 #define FEC_ENET_RX_PAGES 8
97 #define FEC_ENET_RX_FRSIZE 2048
98 #define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE)
99 #define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
100 #define FEC_ENET_TX_FRSIZE 2048
101 #define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE)
102 #define TX_RING_SIZE 16 /* Must be power of two */
103 #define TX_RING_MOD_MASK 15 /* for this to work */
105 #if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE)
106 #error "FEC: descriptor ring size constants too large"
109 /* Interrupt events/masks. */
110 #define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */
111 #define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */
112 #define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */
113 #define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */
114 #define FEC_ENET_TXF ((uint)0x08000000) /* Full frame transmitted */
115 #define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */
116 #define FEC_ENET_RXF ((uint)0x02000000) /* Full frame received */
117 #define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */
118 #define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */
119 #define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */
121 #define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII)
123 /* The FEC stores dest/src/type, data, and checksum for receive packets.
125 #define PKT_MAXBUF_SIZE 1518
126 #define PKT_MINBUF_SIZE 64
127 #define PKT_MAXBLR_SIZE 1520
131 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
132 * size bits. Other FEC hardware does not, so we need to take that into
133 * account when setting it.
135 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
136 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC)
137 #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
139 #define OPT_FRAME_SIZE 0
142 /* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
143 * tx_bd_base always point to the base of the buffer descriptors. The
144 * cur_rx and cur_tx point to the currently available buffer.
145 * The dirty_tx tracks the current buffer that is being sent by the
146 * controller. The cur_tx and dirty_tx are equal under both completely
147 * empty and completely full conditions. The empty/ready indicator in
148 * the buffer descriptor determines the actual condition.
150 struct fec_enet_private {
151 /* Hardware registers of the FEC device */
154 struct net_device *netdev;
158 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
159 unsigned char *tx_bounce[TX_RING_SIZE];
160 struct sk_buff* tx_skbuff[TX_RING_SIZE];
161 struct sk_buff* rx_skbuff[RX_RING_SIZE];
165 /* CPM dual port RAM relative addresses */
167 /* Address of Rx and Tx buffers */
168 struct bufdesc *rx_bd_base;
169 struct bufdesc *tx_bd_base;
170 /* The next free ring entry */
171 struct bufdesc *cur_rx, *cur_tx;
172 /* The ring entries to be free()ed */
173 struct bufdesc *dirty_tx;
176 /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
179 struct platform_device *pdev;
183 /* Phylib and MDIO interface */
184 struct mii_bus *mii_bus;
185 struct phy_device *phy_dev;
188 phy_interface_t phy_interface;
191 struct completion mdio_done;
194 static irqreturn_t fec_enet_interrupt(int irq, void * dev_id);
195 static void fec_enet_tx(struct net_device *dev);
196 static void fec_enet_rx(struct net_device *dev);
197 static int fec_enet_close(struct net_device *dev);
198 static void fec_restart(struct net_device *dev, int duplex);
199 static void fec_stop(struct net_device *dev);
201 /* FEC MII MMFR bits definition */
202 #define FEC_MMFR_ST (1 << 30)
203 #define FEC_MMFR_OP_READ (2 << 28)
204 #define FEC_MMFR_OP_WRITE (1 << 28)
205 #define FEC_MMFR_PA(v) ((v & 0x1f) << 23)
206 #define FEC_MMFR_RA(v) ((v & 0x1f) << 18)
207 #define FEC_MMFR_TA (2 << 16)
208 #define FEC_MMFR_DATA(v) (v & 0xffff)
210 #define FEC_MII_TIMEOUT 1000 /* us */
212 /* Transmitter timeout */
213 #define TX_TIMEOUT (2 * HZ)
216 fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
218 struct fec_enet_private *fep = netdev_priv(dev);
221 unsigned short status;
225 /* Link is down or autonegotiation is in progress. */
226 return NETDEV_TX_BUSY;
229 spin_lock_irqsave(&fep->hw_lock, flags);
230 /* Fill in a Tx ring entry */
233 status = bdp->cbd_sc;
235 if (status & BD_ENET_TX_READY) {
236 /* Ooops. All transmit buffers are full. Bail out.
237 * This should not happen, since dev->tbusy should be set.
239 printk("%s: tx queue full!.\n", dev->name);
240 spin_unlock_irqrestore(&fep->hw_lock, flags);
241 return NETDEV_TX_BUSY;
244 /* Clear all of the status flags */
245 status &= ~BD_ENET_TX_STATS;
247 /* Set buffer length and buffer pointer */
249 bdp->cbd_datlen = skb->len;
252 * On some FEC implementations data must be aligned on
253 * 4-byte boundaries. Use bounce buffers to copy data
254 * and get it aligned. Ugh.
256 if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
258 index = bdp - fep->tx_bd_base;
259 memcpy(fep->tx_bounce[index], (void *)skb->data, skb->len);
260 bufaddr = fep->tx_bounce[index];
263 /* Save skb pointer */
264 fep->tx_skbuff[fep->skb_cur] = skb;
266 dev->stats.tx_bytes += skb->len;
267 fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;
269 /* Push the data cache so the CPM does not get stale memory
272 bdp->cbd_bufaddr = dma_map_single(&dev->dev, bufaddr,
273 FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
275 /* Send it on its way. Tell FEC it's ready, interrupt when done,
276 * it's the last BD of the frame, and to put the CRC on the end.
278 status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
279 | BD_ENET_TX_LAST | BD_ENET_TX_TC);
280 bdp->cbd_sc = status;
282 /* Trigger transmission start */
283 writel(0, fep->hwp + FEC_X_DES_ACTIVE);
285 /* If this was the last BD in the ring, start at the beginning again. */
286 if (status & BD_ENET_TX_WRAP)
287 bdp = fep->tx_bd_base;
291 if (bdp == fep->dirty_tx) {
293 netif_stop_queue(dev);
298 spin_unlock_irqrestore(&fep->hw_lock, flags);
304 fec_timeout(struct net_device *dev)
306 struct fec_enet_private *fep = netdev_priv(dev);
308 dev->stats.tx_errors++;
310 fec_restart(dev, fep->full_duplex);
311 netif_wake_queue(dev);
315 fec_enet_interrupt(int irq, void * dev_id)
317 struct net_device *dev = dev_id;
318 struct fec_enet_private *fep = netdev_priv(dev);
320 irqreturn_t ret = IRQ_NONE;
323 int_events = readl(fep->hwp + FEC_IEVENT);
324 writel(int_events, fep->hwp + FEC_IEVENT);
326 if (int_events & FEC_ENET_RXF) {
331 /* Transmit OK, or non-fatal error. Update the buffer
332 * descriptors. FEC handles all errors, we just discover
333 * them as part of the transmit process.
335 if (int_events & FEC_ENET_TXF) {
340 if (int_events & FEC_ENET_MII) {
342 complete(&fep->mdio_done);
344 } while (int_events);
351 fec_enet_tx(struct net_device *dev)
353 struct fec_enet_private *fep;
355 unsigned short status;
358 fep = netdev_priv(dev);
359 spin_lock(&fep->hw_lock);
362 while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
363 if (bdp == fep->cur_tx && fep->tx_full == 0)
366 dma_unmap_single(&dev->dev, bdp->cbd_bufaddr, FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
367 bdp->cbd_bufaddr = 0;
369 skb = fep->tx_skbuff[fep->skb_dirty];
370 /* Check for errors. */
371 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
372 BD_ENET_TX_RL | BD_ENET_TX_UN |
374 dev->stats.tx_errors++;
375 if (status & BD_ENET_TX_HB) /* No heartbeat */
376 dev->stats.tx_heartbeat_errors++;
377 if (status & BD_ENET_TX_LC) /* Late collision */
378 dev->stats.tx_window_errors++;
379 if (status & BD_ENET_TX_RL) /* Retrans limit */
380 dev->stats.tx_aborted_errors++;
381 if (status & BD_ENET_TX_UN) /* Underrun */
382 dev->stats.tx_fifo_errors++;
383 if (status & BD_ENET_TX_CSL) /* Carrier lost */
384 dev->stats.tx_carrier_errors++;
386 dev->stats.tx_packets++;
389 if (status & BD_ENET_TX_READY)
390 printk("HEY! Enet xmit interrupt and TX_READY.\n");
392 /* Deferred means some collisions occurred during transmit,
393 * but we eventually sent the packet OK.
395 if (status & BD_ENET_TX_DEF)
396 dev->stats.collisions++;
398 /* Free the sk buffer associated with this last transmit */
399 dev_kfree_skb_any(skb);
400 fep->tx_skbuff[fep->skb_dirty] = NULL;
401 fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK;
403 /* Update pointer to next buffer descriptor to be transmitted */
404 if (status & BD_ENET_TX_WRAP)
405 bdp = fep->tx_bd_base;
409 /* Since we have freed up a buffer, the ring is no longer full
413 if (netif_queue_stopped(dev))
414 netif_wake_queue(dev);
418 spin_unlock(&fep->hw_lock);
422 /* During a receive, the cur_rx points to the current incoming buffer.
423 * When we update through the ring, if the next incoming buffer has
424 * not been given to the system, we just set the empty indicator,
425 * effectively tossing the packet.
428 fec_enet_rx(struct net_device *dev)
430 struct fec_enet_private *fep = netdev_priv(dev);
432 unsigned short status;
441 spin_lock(&fep->hw_lock);
443 /* First, grab all of the stats for the incoming packet.
444 * These get messed up if we get called due to a busy condition.
448 while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
450 /* Since we have allocated space to hold a complete frame,
451 * the last indicator should be set.
453 if ((status & BD_ENET_RX_LAST) == 0)
454 printk("FEC ENET: rcv is not +last\n");
457 goto rx_processing_done;
459 /* Check for errors. */
460 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
461 BD_ENET_RX_CR | BD_ENET_RX_OV)) {
462 dev->stats.rx_errors++;
463 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
464 /* Frame too long or too short. */
465 dev->stats.rx_length_errors++;
467 if (status & BD_ENET_RX_NO) /* Frame alignment */
468 dev->stats.rx_frame_errors++;
469 if (status & BD_ENET_RX_CR) /* CRC Error */
470 dev->stats.rx_crc_errors++;
471 if (status & BD_ENET_RX_OV) /* FIFO overrun */
472 dev->stats.rx_fifo_errors++;
475 /* Report late collisions as a frame error.
476 * On this error, the BD is closed, but we don't know what we
477 * have in the buffer. So, just drop this frame on the floor.
479 if (status & BD_ENET_RX_CL) {
480 dev->stats.rx_errors++;
481 dev->stats.rx_frame_errors++;
482 goto rx_processing_done;
485 /* Process the incoming frame. */
486 dev->stats.rx_packets++;
487 pkt_len = bdp->cbd_datlen;
488 dev->stats.rx_bytes += pkt_len;
489 data = (__u8*)__va(bdp->cbd_bufaddr);
491 dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen,
494 /* This does 16 byte alignment, exactly what we need.
495 * The packet length includes FCS, but we don't want to
496 * include that when passing upstream as it messes up
497 * bridging applications.
499 skb = dev_alloc_skb(pkt_len - 4 + NET_IP_ALIGN);
501 if (unlikely(!skb)) {
502 printk("%s: Memory squeeze, dropping packet.\n",
504 dev->stats.rx_dropped++;
506 skb_reserve(skb, NET_IP_ALIGN);
507 skb_put(skb, pkt_len - 4); /* Make room */
508 skb_copy_to_linear_data(skb, data, pkt_len - 4);
509 skb->protocol = eth_type_trans(skb, dev);
513 bdp->cbd_bufaddr = dma_map_single(NULL, data, bdp->cbd_datlen,
516 /* Clear the status flags for this buffer */
517 status &= ~BD_ENET_RX_STATS;
519 /* Mark the buffer empty */
520 status |= BD_ENET_RX_EMPTY;
521 bdp->cbd_sc = status;
523 /* Update BD pointer to next entry */
524 if (status & BD_ENET_RX_WRAP)
525 bdp = fep->rx_bd_base;
528 /* Doing this here will keep the FEC running while we process
529 * incoming frames. On a heavily loaded network, we should be
530 * able to keep up at the expense of system resources.
532 writel(0, fep->hwp + FEC_R_DES_ACTIVE);
536 spin_unlock(&fep->hw_lock);
539 /* ------------------------------------------------------------------------- */
541 static void __inline__ fec_get_mac(struct net_device *dev)
543 struct fec_enet_private *fep = netdev_priv(dev);
544 unsigned char *iap, tmpaddr[ETH_ALEN];
548 * Get MAC address from FLASH.
549 * If it is all 1's or 0's, use the default.
551 iap = (unsigned char *)FEC_FLASHMAC;
552 if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) &&
553 (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0))
554 iap = fec_mac_default;
555 if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) &&
556 (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff))
557 iap = fec_mac_default;
559 *((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW);
560 *((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
564 memcpy(dev->dev_addr, iap, ETH_ALEN);
566 /* Adjust MAC if using default MAC address */
567 if (iap == fec_mac_default)
568 dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->pdev->id;
572 /* ------------------------------------------------------------------------- */
577 static void fec_enet_adjust_link(struct net_device *dev)
579 struct fec_enet_private *fep = netdev_priv(dev);
580 struct phy_device *phy_dev = fep->phy_dev;
583 int status_change = 0;
585 spin_lock_irqsave(&fep->hw_lock, flags);
587 /* Prevent a state halted on mii error */
588 if (fep->mii_timeout && phy_dev->state == PHY_HALTED) {
589 phy_dev->state = PHY_RESUMING;
593 /* Duplex link change */
595 if (fep->full_duplex != phy_dev->duplex) {
596 fec_restart(dev, phy_dev->duplex);
601 /* Link on or off change */
602 if (phy_dev->link != fep->link) {
603 fep->link = phy_dev->link;
605 fec_restart(dev, phy_dev->duplex);
612 spin_unlock_irqrestore(&fep->hw_lock, flags);
615 phy_print_status(phy_dev);
618 static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
620 struct fec_enet_private *fep = bus->priv;
621 unsigned long time_left;
623 fep->mii_timeout = 0;
624 init_completion(&fep->mdio_done);
626 /* start a read op */
627 writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
628 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
629 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
631 /* wait for end of transfer */
632 time_left = wait_for_completion_timeout(&fep->mdio_done,
633 usecs_to_jiffies(FEC_MII_TIMEOUT));
634 if (time_left == 0) {
635 fep->mii_timeout = 1;
636 printk(KERN_ERR "FEC: MDIO read timeout\n");
641 return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
644 static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
647 struct fec_enet_private *fep = bus->priv;
648 unsigned long time_left;
650 fep->mii_timeout = 0;
651 init_completion(&fep->mdio_done);
653 /* start a write op */
654 writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
655 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
656 FEC_MMFR_TA | FEC_MMFR_DATA(value),
657 fep->hwp + FEC_MII_DATA);
659 /* wait for end of transfer */
660 time_left = wait_for_completion_timeout(&fep->mdio_done,
661 usecs_to_jiffies(FEC_MII_TIMEOUT));
662 if (time_left == 0) {
663 fep->mii_timeout = 1;
664 printk(KERN_ERR "FEC: MDIO write timeout\n");
671 static int fec_enet_mdio_reset(struct mii_bus *bus)
676 static int fec_enet_mii_probe(struct net_device *dev)
678 struct fec_enet_private *fep = netdev_priv(dev);
679 struct phy_device *phy_dev = NULL;
680 char mdio_bus_id[MII_BUS_ID_SIZE];
681 char phy_name[MII_BUS_ID_SIZE + 3];
686 /* check for attached phy */
687 for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
688 if ((fep->mii_bus->phy_mask & (1 << phy_id)))
690 if (fep->mii_bus->phy_map[phy_id] == NULL)
692 if (fep->mii_bus->phy_map[phy_id]->phy_id == 0)
694 strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
698 if (phy_id >= PHY_MAX_ADDR) {
699 printk(KERN_INFO "%s: no PHY, assuming direct connection "
700 "to switch\n", dev->name);
701 strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE);
705 snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id);
706 phy_dev = phy_connect(dev, phy_name, &fec_enet_adjust_link, 0,
707 PHY_INTERFACE_MODE_MII);
708 if (IS_ERR(phy_dev)) {
709 printk(KERN_ERR "%s: could not attach to PHY\n", dev->name);
710 return PTR_ERR(phy_dev);
713 /* mask with MAC supported features */
714 phy_dev->supported &= PHY_BASIC_FEATURES;
715 phy_dev->advertising = phy_dev->supported;
717 fep->phy_dev = phy_dev;
719 fep->full_duplex = 0;
721 printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] "
722 "(mii_bus:phy_addr=%s, irq=%d)\n", dev->name,
723 fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
729 static int fec_enet_mii_init(struct platform_device *pdev)
731 struct net_device *dev = platform_get_drvdata(pdev);
732 struct fec_enet_private *fep = netdev_priv(dev);
735 fep->mii_timeout = 0;
738 * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
740 fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk), 5000000) << 1;
741 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
743 fep->mii_bus = mdiobus_alloc();
744 if (fep->mii_bus == NULL) {
749 fep->mii_bus->name = "fec_enet_mii_bus";
750 fep->mii_bus->read = fec_enet_mdio_read;
751 fep->mii_bus->write = fec_enet_mdio_write;
752 fep->mii_bus->reset = fec_enet_mdio_reset;
753 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id + 1);
754 fep->mii_bus->priv = fep;
755 fep->mii_bus->parent = &pdev->dev;
757 fep->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
758 if (!fep->mii_bus->irq) {
760 goto err_out_free_mdiobus;
763 for (i = 0; i < PHY_MAX_ADDR; i++)
764 fep->mii_bus->irq[i] = PHY_POLL;
766 platform_set_drvdata(dev, fep->mii_bus);
768 if (mdiobus_register(fep->mii_bus))
769 goto err_out_free_mdio_irq;
773 err_out_free_mdio_irq:
774 kfree(fep->mii_bus->irq);
775 err_out_free_mdiobus:
776 mdiobus_free(fep->mii_bus);
781 static void fec_enet_mii_remove(struct fec_enet_private *fep)
784 phy_disconnect(fep->phy_dev);
785 mdiobus_unregister(fep->mii_bus);
786 kfree(fep->mii_bus->irq);
787 mdiobus_free(fep->mii_bus);
790 static int fec_enet_get_settings(struct net_device *dev,
791 struct ethtool_cmd *cmd)
793 struct fec_enet_private *fep = netdev_priv(dev);
794 struct phy_device *phydev = fep->phy_dev;
799 return phy_ethtool_gset(phydev, cmd);
802 static int fec_enet_set_settings(struct net_device *dev,
803 struct ethtool_cmd *cmd)
805 struct fec_enet_private *fep = netdev_priv(dev);
806 struct phy_device *phydev = fep->phy_dev;
811 return phy_ethtool_sset(phydev, cmd);
814 static void fec_enet_get_drvinfo(struct net_device *dev,
815 struct ethtool_drvinfo *info)
817 struct fec_enet_private *fep = netdev_priv(dev);
819 strcpy(info->driver, fep->pdev->dev.driver->name);
820 strcpy(info->version, "Revision: 1.0");
821 strcpy(info->bus_info, dev_name(&dev->dev));
824 static struct ethtool_ops fec_enet_ethtool_ops = {
825 .get_settings = fec_enet_get_settings,
826 .set_settings = fec_enet_set_settings,
827 .get_drvinfo = fec_enet_get_drvinfo,
828 .get_link = ethtool_op_get_link,
831 static int fec_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
833 struct fec_enet_private *fep = netdev_priv(dev);
834 struct phy_device *phydev = fep->phy_dev;
836 if (!netif_running(dev))
842 return phy_mii_ioctl(phydev, rq, cmd);
845 static void fec_enet_free_buffers(struct net_device *dev)
847 struct fec_enet_private *fep = netdev_priv(dev);
852 bdp = fep->rx_bd_base;
853 for (i = 0; i < RX_RING_SIZE; i++) {
854 skb = fep->rx_skbuff[i];
856 if (bdp->cbd_bufaddr)
857 dma_unmap_single(&dev->dev, bdp->cbd_bufaddr,
858 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
864 bdp = fep->tx_bd_base;
865 for (i = 0; i < TX_RING_SIZE; i++)
866 kfree(fep->tx_bounce[i]);
869 static int fec_enet_alloc_buffers(struct net_device *dev)
871 struct fec_enet_private *fep = netdev_priv(dev);
876 bdp = fep->rx_bd_base;
877 for (i = 0; i < RX_RING_SIZE; i++) {
878 skb = dev_alloc_skb(FEC_ENET_RX_FRSIZE);
880 fec_enet_free_buffers(dev);
883 fep->rx_skbuff[i] = skb;
885 bdp->cbd_bufaddr = dma_map_single(&dev->dev, skb->data,
886 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
887 bdp->cbd_sc = BD_ENET_RX_EMPTY;
891 /* Set the last buffer to wrap. */
893 bdp->cbd_sc |= BD_SC_WRAP;
895 bdp = fep->tx_bd_base;
896 for (i = 0; i < TX_RING_SIZE; i++) {
897 fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
900 bdp->cbd_bufaddr = 0;
904 /* Set the last buffer to wrap. */
906 bdp->cbd_sc |= BD_SC_WRAP;
912 fec_enet_open(struct net_device *dev)
914 struct fec_enet_private *fep = netdev_priv(dev);
917 /* I should reset the ring buffers here, but I don't yet know
918 * a simple way to do that.
921 ret = fec_enet_alloc_buffers(dev);
925 /* Probe and connect to PHY when open the interface */
926 ret = fec_enet_mii_probe(dev);
928 fec_enet_free_buffers(dev);
931 phy_start(fep->phy_dev);
932 netif_start_queue(dev);
938 fec_enet_close(struct net_device *dev)
940 struct fec_enet_private *fep = netdev_priv(dev);
942 /* Don't know what to do yet. */
944 netif_stop_queue(dev);
948 phy_disconnect(fep->phy_dev);
950 fec_enet_free_buffers(dev);
955 /* Set or clear the multicast filter for this adaptor.
956 * Skeleton taken from sunlance driver.
957 * The CPM Ethernet implementation allows Multicast as well as individual
958 * MAC address filtering. Some of the drivers check to make sure it is
959 * a group multicast address, and discard those that are not. I guess I
960 * will do the same for now, but just remove the test if you want
961 * individual filtering as well (do the upper net layers want or support
962 * this kind of feature?).
965 #define HASH_BITS 6 /* #bits in hash */
966 #define CRC32_POLY 0xEDB88320
968 static void set_multicast_list(struct net_device *dev)
970 struct fec_enet_private *fep = netdev_priv(dev);
971 struct netdev_hw_addr *ha;
972 unsigned int i, bit, data, crc, tmp;
975 if (dev->flags & IFF_PROMISC) {
976 tmp = readl(fep->hwp + FEC_R_CNTRL);
978 writel(tmp, fep->hwp + FEC_R_CNTRL);
982 tmp = readl(fep->hwp + FEC_R_CNTRL);
984 writel(tmp, fep->hwp + FEC_R_CNTRL);
986 if (dev->flags & IFF_ALLMULTI) {
987 /* Catch all multicast addresses, so set the
990 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
991 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
996 /* Clear filter and add the addresses in hash register
998 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
999 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1001 netdev_for_each_mc_addr(ha, dev) {
1002 /* Only support group multicast for now */
1003 if (!(ha->addr[0] & 1))
1006 /* calculate crc32 value of mac address */
1009 for (i = 0; i < dev->addr_len; i++) {
1011 for (bit = 0; bit < 8; bit++, data >>= 1) {
1013 (((crc ^ data) & 1) ? CRC32_POLY : 0);
1017 /* only upper 6 bits (HASH_BITS) are used
1018 * which point to specific bit in he hash registers
1020 hash = (crc >> (32 - HASH_BITS)) & 0x3f;
1023 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1024 tmp |= 1 << (hash - 32);
1025 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1027 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1029 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1034 /* Set a MAC change in hardware. */
1036 fec_set_mac_address(struct net_device *dev, void *p)
1038 struct fec_enet_private *fep = netdev_priv(dev);
1039 struct sockaddr *addr = p;
1041 if (!is_valid_ether_addr(addr->sa_data))
1042 return -EADDRNOTAVAIL;
1044 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1046 writel(dev->dev_addr[3] | (dev->dev_addr[2] << 8) |
1047 (dev->dev_addr[1] << 16) | (dev->dev_addr[0] << 24),
1048 fep->hwp + FEC_ADDR_LOW);
1049 writel((dev->dev_addr[5] << 16) | (dev->dev_addr[4] << 24),
1050 fep->hwp + FEC_ADDR_HIGH);
1054 static const struct net_device_ops fec_netdev_ops = {
1055 .ndo_open = fec_enet_open,
1056 .ndo_stop = fec_enet_close,
1057 .ndo_start_xmit = fec_enet_start_xmit,
1058 .ndo_set_multicast_list = set_multicast_list,
1059 .ndo_change_mtu = eth_change_mtu,
1060 .ndo_validate_addr = eth_validate_addr,
1061 .ndo_tx_timeout = fec_timeout,
1062 .ndo_set_mac_address = fec_set_mac_address,
1063 .ndo_do_ioctl = fec_enet_ioctl,
1067 * XXX: We need to clean up on failure exits here.
1070 static int fec_enet_init(struct net_device *dev)
1072 struct fec_enet_private *fep = netdev_priv(dev);
1073 struct bufdesc *cbd_base;
1074 struct bufdesc *bdp;
1077 /* Allocate memory for buffer descriptors. */
1078 cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma,
1081 printk("FEC: allocate descriptor memory failed?\n");
1085 spin_lock_init(&fep->hw_lock);
1087 fep->hwp = (void __iomem *)dev->base_addr;
1090 /* Set the Ethernet address */
1096 l = readl(fep->hwp + FEC_ADDR_LOW);
1097 dev->dev_addr[0] = (unsigned char)((l & 0xFF000000) >> 24);
1098 dev->dev_addr[1] = (unsigned char)((l & 0x00FF0000) >> 16);
1099 dev->dev_addr[2] = (unsigned char)((l & 0x0000FF00) >> 8);
1100 dev->dev_addr[3] = (unsigned char)((l & 0x000000FF) >> 0);
1101 l = readl(fep->hwp + FEC_ADDR_HIGH);
1102 dev->dev_addr[4] = (unsigned char)((l & 0xFF000000) >> 24);
1103 dev->dev_addr[5] = (unsigned char)((l & 0x00FF0000) >> 16);
1107 /* Set receive and transmit descriptor base. */
1108 fep->rx_bd_base = cbd_base;
1109 fep->tx_bd_base = cbd_base + RX_RING_SIZE;
1111 /* The FEC Ethernet specific entries in the device structure */
1112 dev->watchdog_timeo = TX_TIMEOUT;
1113 dev->netdev_ops = &fec_netdev_ops;
1114 dev->ethtool_ops = &fec_enet_ethtool_ops;
1116 /* Initialize the receive buffer descriptors. */
1117 bdp = fep->rx_bd_base;
1118 for (i = 0; i < RX_RING_SIZE; i++) {
1120 /* Initialize the BD for every fragment in the page. */
1125 /* Set the last buffer to wrap */
1127 bdp->cbd_sc |= BD_SC_WRAP;
1129 /* ...and the same for transmit */
1130 bdp = fep->tx_bd_base;
1131 for (i = 0; i < TX_RING_SIZE; i++) {
1133 /* Initialize the BD for every fragment in the page. */
1135 bdp->cbd_bufaddr = 0;
1139 /* Set the last buffer to wrap */
1141 bdp->cbd_sc |= BD_SC_WRAP;
1143 fec_restart(dev, 0);
1148 /* This function is called to start or restart the FEC during a link
1149 * change. This only happens when switching between half and full
1153 fec_restart(struct net_device *dev, int duplex)
1155 struct fec_enet_private *fep = netdev_priv(dev);
1158 /* Whack a reset. We should wait for this. */
1159 writel(1, fep->hwp + FEC_ECNTRL);
1162 /* Clear any outstanding interrupt. */
1163 writel(0xffc00000, fep->hwp + FEC_IEVENT);
1165 /* Reset all multicast. */
1166 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1167 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1168 #ifndef CONFIG_M5272
1169 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
1170 writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
1173 /* Set maximum receive buffer size. */
1174 writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
1176 /* Set receive and transmit descriptor base. */
1177 writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
1178 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE,
1179 fep->hwp + FEC_X_DES_START);
1181 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
1182 fep->cur_rx = fep->rx_bd_base;
1184 /* Reset SKB transmit buffers. */
1185 fep->skb_cur = fep->skb_dirty = 0;
1186 for (i = 0; i <= TX_RING_MOD_MASK; i++) {
1187 if (fep->tx_skbuff[i]) {
1188 dev_kfree_skb_any(fep->tx_skbuff[i]);
1189 fep->tx_skbuff[i] = NULL;
1193 /* Enable MII mode */
1195 /* MII enable / FD enable */
1196 writel(OPT_FRAME_SIZE | 0x04, fep->hwp + FEC_R_CNTRL);
1197 writel(0x04, fep->hwp + FEC_X_CNTRL);
1199 /* MII enable / No Rcv on Xmit */
1200 writel(OPT_FRAME_SIZE | 0x06, fep->hwp + FEC_R_CNTRL);
1201 writel(0x0, fep->hwp + FEC_X_CNTRL);
1203 fep->full_duplex = duplex;
1206 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1208 #ifdef FEC_MIIGSK_ENR
1209 if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
1210 /* disable the gasket and wait */
1211 writel(0, fep->hwp + FEC_MIIGSK_ENR);
1212 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
1215 /* configure the gasket: RMII, 50 MHz, no loopback, no echo */
1216 writel(1, fep->hwp + FEC_MIIGSK_CFGR);
1218 /* re-enable the gasket */
1219 writel(2, fep->hwp + FEC_MIIGSK_ENR);
1223 /* And last, enable the transmit and receive processing */
1224 writel(2, fep->hwp + FEC_ECNTRL);
1225 writel(0, fep->hwp + FEC_R_DES_ACTIVE);
1227 /* Enable interrupts we wish to service */
1228 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1232 fec_stop(struct net_device *dev)
1234 struct fec_enet_private *fep = netdev_priv(dev);
1236 /* We cannot expect a graceful transmit stop without link !!! */
1238 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
1240 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
1241 printk("fec_stop : Graceful transmit stop did not complete !\n");
1244 /* Whack a reset. We should wait for this. */
1245 writel(1, fep->hwp + FEC_ECNTRL);
1247 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1248 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1251 static int __devinit
1252 fec_probe(struct platform_device *pdev)
1254 struct fec_enet_private *fep;
1255 struct fec_platform_data *pdata;
1256 struct net_device *ndev;
1257 int i, irq, ret = 0;
1260 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1264 r = request_mem_region(r->start, resource_size(r), pdev->name);
1268 /* Init network device */
1269 ndev = alloc_etherdev(sizeof(struct fec_enet_private));
1273 SET_NETDEV_DEV(ndev, &pdev->dev);
1275 /* setup board info structure */
1276 fep = netdev_priv(ndev);
1277 memset(fep, 0, sizeof(*fep));
1279 ndev->base_addr = (unsigned long)ioremap(r->start, resource_size(r));
1282 if (!ndev->base_addr) {
1284 goto failed_ioremap;
1287 platform_set_drvdata(pdev, ndev);
1289 pdata = pdev->dev.platform_data;
1291 fep->phy_interface = pdata->phy;
1293 /* This device has up to three irqs on some platforms */
1294 for (i = 0; i < 3; i++) {
1295 irq = platform_get_irq(pdev, i);
1298 ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev);
1301 irq = platform_get_irq(pdev, i);
1302 free_irq(irq, ndev);
1309 fep->clk = clk_get(&pdev->dev, "fec_clk");
1310 if (IS_ERR(fep->clk)) {
1311 ret = PTR_ERR(fep->clk);
1314 clk_enable(fep->clk);
1316 ret = fec_enet_init(ndev);
1320 ret = fec_enet_mii_init(pdev);
1322 goto failed_mii_init;
1324 /* Carrier starts down, phylib will bring it up */
1325 netif_carrier_off(ndev);
1327 ret = register_netdev(ndev);
1329 goto failed_register;
1334 fec_enet_mii_remove(fep);
1337 clk_disable(fep->clk);
1340 for (i = 0; i < 3; i++) {
1341 irq = platform_get_irq(pdev, i);
1343 free_irq(irq, ndev);
1346 iounmap((void __iomem *)ndev->base_addr);
1353 static int __devexit
1354 fec_drv_remove(struct platform_device *pdev)
1356 struct net_device *ndev = platform_get_drvdata(pdev);
1357 struct fec_enet_private *fep = netdev_priv(ndev);
1359 platform_set_drvdata(pdev, NULL);
1362 fec_enet_mii_remove(fep);
1363 clk_disable(fep->clk);
1365 iounmap((void __iomem *)ndev->base_addr);
1366 unregister_netdev(ndev);
1373 fec_suspend(struct device *dev)
1375 struct net_device *ndev = dev_get_drvdata(dev);
1376 struct fec_enet_private *fep;
1379 fep = netdev_priv(ndev);
1380 if (netif_running(ndev))
1381 fec_enet_close(ndev);
1382 clk_disable(fep->clk);
1388 fec_resume(struct device *dev)
1390 struct net_device *ndev = dev_get_drvdata(dev);
1391 struct fec_enet_private *fep;
1394 fep = netdev_priv(ndev);
1395 clk_enable(fep->clk);
1396 if (netif_running(ndev))
1397 fec_enet_open(ndev);
1402 static const struct dev_pm_ops fec_pm_ops = {
1403 .suspend = fec_suspend,
1404 .resume = fec_resume,
1405 .freeze = fec_suspend,
1407 .poweroff = fec_suspend,
1408 .restore = fec_resume,
1412 static struct platform_driver fec_driver = {
1415 .owner = THIS_MODULE,
1421 .remove = __devexit_p(fec_drv_remove),
1425 fec_enet_module_init(void)
1427 printk(KERN_INFO "FEC Ethernet Driver\n");
1429 return platform_driver_register(&fec_driver);
1433 fec_enet_cleanup(void)
1435 platform_driver_unregister(&fec_driver);
1438 module_exit(fec_enet_cleanup);
1439 module_init(fec_enet_module_init);
1441 MODULE_LICENSE("GPL");