2 * DaVinci MDIO Module driver
4 * Copyright (C) 2010 Texas Instruments.
6 * Shamelessly ripped out of davinci_emac.c, original copyrights follow:
8 * Copyright (C) 2009 Texas Instruments.
10 * ---------------------------------------------------------------------------
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 * ---------------------------------------------------------------------------
27 #include <linux/module.h>
28 #include <linux/kernel.h>
29 #include <linux/platform_device.h>
30 #include <linux/delay.h>
31 #include <linux/sched.h>
32 #include <linux/slab.h>
33 #include <linux/phy.h>
34 #include <linux/clk.h>
35 #include <linux/err.h>
37 #include <linux/pm_runtime.h>
38 #include <linux/davinci_emac.h>
41 * This timeout definition is a worst-case ultra defensive measure against
42 * unexpected controller lock ups. Ideally, we should never ever hit this
43 * scenario in practice.
45 #define MDIO_TIMEOUT 100 /* msecs */
47 #define PHY_REG_MASK 0x1f
48 #define PHY_ID_MASK 0x1f
50 #define DEF_OUT_FREQ 2200000 /* 2.2 MHz */
52 struct davinci_mdio_regs {
55 #define CONTROL_IDLE BIT(31)
56 #define CONTROL_ENABLE BIT(30)
57 #define CONTROL_MAX_DIV (0xffff)
72 #define USERACCESS_GO BIT(31)
73 #define USERACCESS_WRITE BIT(30)
74 #define USERACCESS_ACK BIT(29)
75 #define USERACCESS_READ (0)
76 #define USERACCESS_DATA (0xffff)
82 struct mdio_platform_data default_pdata = {
83 .bus_freq = DEF_OUT_FREQ,
86 struct davinci_mdio_data {
87 struct mdio_platform_data pdata;
88 struct davinci_mdio_regs __iomem *regs;
94 unsigned long access_time; /* jiffies */
97 static void __davinci_mdio_reset(struct davinci_mdio_data *data)
99 u32 mdio_in, div, mdio_out_khz, access_time;
101 mdio_in = clk_get_rate(data->clk);
102 div = (mdio_in / data->pdata.bus_freq) - 1;
103 if (div > CONTROL_MAX_DIV)
104 div = CONTROL_MAX_DIV;
106 /* set enable and clock divider */
107 __raw_writel(div | CONTROL_ENABLE, &data->regs->control);
110 * One mdio transaction consists of:
111 * 32 bits of preamble
112 * 32 bits of transferred data
113 * 24 bits of bus yield (not needed unless shared?)
115 mdio_out_khz = mdio_in / (1000 * (div + 1));
116 access_time = (88 * 1000) / mdio_out_khz;
119 * In the worst case, we could be kicking off a user-access immediately
120 * after the mdio bus scan state-machine triggered its own read. If
121 * so, our request could get deferred by one access cycle. We
122 * defensively allow for 4 access cycles.
124 data->access_time = usecs_to_jiffies(access_time * 4);
125 if (!data->access_time)
126 data->access_time = 1;
129 static int davinci_mdio_reset(struct mii_bus *bus)
131 struct davinci_mdio_data *data = bus->priv;
134 __davinci_mdio_reset(data);
136 /* wait for scan logic to settle */
137 msleep(PHY_MAX_ADDR * data->access_time);
139 /* dump hardware version info */
140 ver = __raw_readl(&data->regs->version);
141 dev_info(data->dev, "davinci mdio revision %d.%d\n",
142 (ver >> 8) & 0xff, ver & 0xff);
144 /* get phy mask from the alive register */
145 phy_mask = __raw_readl(&data->regs->alive);
147 /* restrict mdio bus to live phys only */
148 dev_info(data->dev, "detected phy mask %x\n", ~phy_mask);
149 phy_mask = ~phy_mask;
151 /* desperately scan all phys */
152 dev_warn(data->dev, "no live phy, scanning all\n");
155 data->bus->phy_mask = phy_mask;
160 /* wait until hardware is ready for another user access */
161 static inline int wait_for_user_access(struct davinci_mdio_data *data)
163 struct davinci_mdio_regs __iomem *regs = data->regs;
164 unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT);
167 while (time_after(timeout, jiffies)) {
168 reg = __raw_readl(®s->user[0].access);
169 if ((reg & USERACCESS_GO) == 0)
172 reg = __raw_readl(®s->control);
173 if ((reg & CONTROL_IDLE) == 0)
177 * An emac soft_reset may have clobbered the mdio controller's
178 * state machine. We need to reset and retry the current
181 dev_warn(data->dev, "resetting idled controller\n");
182 __davinci_mdio_reset(data);
186 reg = __raw_readl(®s->user[0].access);
187 if ((reg & USERACCESS_GO) == 0)
190 dev_err(data->dev, "timed out waiting for user access\n");
194 /* wait until hardware state machine is idle */
195 static inline int wait_for_idle(struct davinci_mdio_data *data)
197 struct davinci_mdio_regs __iomem *regs = data->regs;
198 unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT);
200 while (time_after(timeout, jiffies)) {
201 if (__raw_readl(®s->control) & CONTROL_IDLE)
204 dev_err(data->dev, "timed out waiting for idle\n");
208 static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
210 struct davinci_mdio_data *data = bus->priv;
214 if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
217 spin_lock(&data->lock);
219 if (data->suspended) {
220 spin_unlock(&data->lock);
224 reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
228 ret = wait_for_user_access(data);
234 __raw_writel(reg, &data->regs->user[0].access);
236 ret = wait_for_user_access(data);
242 reg = __raw_readl(&data->regs->user[0].access);
243 ret = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -EIO;
247 spin_unlock(&data->lock);
252 static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
253 int phy_reg, u16 phy_data)
255 struct davinci_mdio_data *data = bus->priv;
259 if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
262 spin_lock(&data->lock);
264 if (data->suspended) {
265 spin_unlock(&data->lock);
269 reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
270 (phy_id << 16) | (phy_data & USERACCESS_DATA));
273 ret = wait_for_user_access(data);
279 __raw_writel(reg, &data->regs->user[0].access);
281 ret = wait_for_user_access(data);
287 spin_unlock(&data->lock);
292 static int __devinit davinci_mdio_probe(struct platform_device *pdev)
294 struct mdio_platform_data *pdata = pdev->dev.platform_data;
295 struct device *dev = &pdev->dev;
296 struct davinci_mdio_data *data;
297 struct resource *res;
298 struct phy_device *phy;
301 data = kzalloc(sizeof(*data), GFP_KERNEL);
303 dev_err(dev, "failed to alloc device data\n");
307 data->pdata = pdata ? (*pdata) : default_pdata;
309 data->bus = mdiobus_alloc();
311 dev_err(dev, "failed to alloc mii bus\n");
316 data->bus->name = dev_name(dev);
317 data->bus->read = davinci_mdio_read,
318 data->bus->write = davinci_mdio_write,
319 data->bus->reset = davinci_mdio_reset,
320 data->bus->parent = dev;
321 data->bus->priv = data;
322 snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x",
323 pdev->name, pdev->id);
325 pm_runtime_enable(&pdev->dev);
326 pm_runtime_get_sync(&pdev->dev);
327 data->clk = clk_get(&pdev->dev, "fck");
328 if (IS_ERR(data->clk)) {
329 dev_err(dev, "failed to get device clock\n");
330 ret = PTR_ERR(data->clk);
335 dev_set_drvdata(dev, data);
337 spin_lock_init(&data->lock);
339 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
341 dev_err(dev, "could not find register map resource\n");
346 res = devm_request_mem_region(dev, res->start, resource_size(res),
349 dev_err(dev, "could not allocate register map resource\n");
354 data->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
356 dev_err(dev, "could not map mdio registers\n");
361 /* register the mii bus */
362 ret = mdiobus_register(data->bus);
366 /* scan and dump the bus */
367 for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
368 phy = data->bus->phy_map[addr];
370 dev_info(dev, "phy[%d]: device %s, driver %s\n",
371 phy->addr, dev_name(&phy->dev),
372 phy->drv ? phy->drv->name : "unknown");
380 mdiobus_free(data->bus);
384 pm_runtime_put_sync(&pdev->dev);
385 pm_runtime_disable(&pdev->dev);
392 static int __devexit davinci_mdio_remove(struct platform_device *pdev)
394 struct device *dev = &pdev->dev;
395 struct davinci_mdio_data *data = dev_get_drvdata(dev);
398 mdiobus_unregister(data->bus);
399 mdiobus_free(data->bus);
404 pm_runtime_put_sync(&pdev->dev);
405 pm_runtime_disable(&pdev->dev);
407 dev_set_drvdata(dev, NULL);
414 static int davinci_mdio_suspend(struct device *dev)
416 struct davinci_mdio_data *data = dev_get_drvdata(dev);
419 spin_lock(&data->lock);
421 /* shutdown the scan state machine */
422 ctrl = __raw_readl(&data->regs->control);
423 ctrl &= ~CONTROL_ENABLE;
424 __raw_writel(ctrl, &data->regs->control);
427 pm_runtime_put_sync(data->dev);
429 data->suspended = true;
430 spin_unlock(&data->lock);
435 static int davinci_mdio_resume(struct device *dev)
437 struct davinci_mdio_data *data = dev_get_drvdata(dev);
440 spin_lock(&data->lock);
441 pm_runtime_put_sync(data->dev);
443 /* restart the scan state machine */
444 ctrl = __raw_readl(&data->regs->control);
445 ctrl |= CONTROL_ENABLE;
446 __raw_writel(ctrl, &data->regs->control);
448 data->suspended = false;
449 spin_unlock(&data->lock);
454 static const struct dev_pm_ops davinci_mdio_pm_ops = {
455 .suspend = davinci_mdio_suspend,
456 .resume = davinci_mdio_resume,
459 static struct platform_driver davinci_mdio_driver = {
461 .name = "davinci_mdio",
462 .owner = THIS_MODULE,
463 .pm = &davinci_mdio_pm_ops,
465 .probe = davinci_mdio_probe,
466 .remove = __devexit_p(davinci_mdio_remove),
469 static int __init davinci_mdio_init(void)
471 return platform_driver_register(&davinci_mdio_driver);
473 device_initcall(davinci_mdio_init);
475 static void __exit davinci_mdio_exit(void)
477 platform_driver_unregister(&davinci_mdio_driver);
479 module_exit(davinci_mdio_exit);
481 MODULE_LICENSE("GPL");
482 MODULE_DESCRIPTION("DaVinci MDIO driver");