5 * Copyright (c) 2004 Freescale Semiconductor, Inc.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/unistd.h>
20 #include <linux/slab.h>
21 #include <linux/interrupt.h>
22 #include <linux/init.h>
23 #include <linux/delay.h>
24 #include <linux/device.h>
25 #include <linux/of_device.h>
26 #include <linux/of_mdio.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29 #include <linux/skbuff.h>
30 #include <linux/spinlock.h>
32 #include <linux/module.h>
33 #include <linux/mii.h>
34 #include <linux/ethtool.h>
35 #include <linux/phy.h>
37 #include <linux/uaccess.h>
42 * mdiobus_alloc_size - allocate a mii_bus structure
43 * @size: extra amount of memory to allocate for private storage.
44 * If non-zero, then bus->priv is points to that memory.
46 * Description: called by a bus driver to allocate an mii_bus
47 * structure to fill in.
49 struct mii_bus *mdiobus_alloc_size(size_t size)
52 size_t aligned_size = ALIGN(sizeof(*bus), NETDEV_ALIGN);
55 /* If we alloc extra space, it should be aligned */
57 alloc_size = aligned_size + size;
59 alloc_size = sizeof(*bus);
61 bus = kzalloc(alloc_size, GFP_KERNEL);
63 bus->state = MDIOBUS_ALLOCATED;
65 bus->priv = (void *)bus + aligned_size;
70 EXPORT_SYMBOL(mdiobus_alloc_size);
73 * mdiobus_release - mii_bus device release callback
74 * @d: the target struct device that contains the mii_bus
76 * Description: called when the last reference to an mii_bus is
77 * dropped, to free the underlying memory.
79 static void mdiobus_release(struct device *d)
81 struct mii_bus *bus = to_mii_bus(d);
82 BUG_ON(bus->state != MDIOBUS_RELEASED &&
83 /* for compatibility with error handling in drivers */
84 bus->state != MDIOBUS_ALLOCATED);
88 static struct class mdio_bus_class = {
90 .dev_release = mdiobus_release,
93 #if IS_ENABLED(CONFIG_OF_MDIO)
94 /* Helper function for of_mdio_find_bus */
95 static int of_mdio_bus_match(struct device *dev, const void *mdio_bus_np)
97 return dev->of_node == mdio_bus_np;
100 * of_mdio_find_bus - Given an mii_bus node, find the mii_bus.
101 * @mdio_bus_np: Pointer to the mii_bus.
103 * Returns a pointer to the mii_bus, or NULL if none found.
105 * Because the association of a device_node and mii_bus is made via
106 * of_mdiobus_register(), the mii_bus cannot be found before it is
107 * registered with of_mdiobus_register().
110 struct mii_bus *of_mdio_find_bus(struct device_node *mdio_bus_np)
117 d = class_find_device(&mdio_bus_class, NULL, mdio_bus_np,
120 return d ? to_mii_bus(d) : NULL;
122 EXPORT_SYMBOL(of_mdio_find_bus);
126 * mdiobus_register - bring up all the PHYs on a given bus and attach them to bus
127 * @bus: target mii_bus
129 * Description: Called by a bus driver to bring up all the PHYs
130 * on a given bus, and attach them to the bus.
132 * Returns 0 on success or < 0 on error.
134 int mdiobus_register(struct mii_bus *bus)
138 if (NULL == bus || NULL == bus->name ||
139 NULL == bus->read || NULL == bus->write)
142 BUG_ON(bus->state != MDIOBUS_ALLOCATED &&
143 bus->state != MDIOBUS_UNREGISTERED);
145 bus->dev.parent = bus->parent;
146 bus->dev.class = &mdio_bus_class;
147 bus->dev.groups = NULL;
148 dev_set_name(&bus->dev, "%s", bus->id);
150 err = device_register(&bus->dev);
152 pr_err("mii_bus %s failed to register\n", bus->id);
156 mutex_init(&bus->mdio_lock);
161 for (i = 0; i < PHY_MAX_ADDR; i++) {
162 if ((bus->phy_mask & (1 << i)) == 0) {
163 struct phy_device *phydev;
165 phydev = mdiobus_scan(bus, i);
166 if (IS_ERR(phydev)) {
167 err = PTR_ERR(phydev);
173 bus->state = MDIOBUS_REGISTERED;
174 pr_info("%s: probed\n", bus->name);
180 device_unregister(&bus->phy_map[i]->dev);
182 device_del(&bus->dev);
185 EXPORT_SYMBOL(mdiobus_register);
187 void mdiobus_unregister(struct mii_bus *bus)
191 BUG_ON(bus->state != MDIOBUS_REGISTERED);
192 bus->state = MDIOBUS_UNREGISTERED;
194 device_del(&bus->dev);
195 for (i = 0; i < PHY_MAX_ADDR; i++) {
197 device_unregister(&bus->phy_map[i]->dev);
198 bus->phy_map[i] = NULL;
201 EXPORT_SYMBOL(mdiobus_unregister);
204 * mdiobus_free - free a struct mii_bus
205 * @bus: mii_bus to free
207 * This function releases the reference to the underlying device
208 * object in the mii_bus. If this is the last reference, the mii_bus
211 void mdiobus_free(struct mii_bus *bus)
213 /* For compatibility with error handling in drivers. */
214 if (bus->state == MDIOBUS_ALLOCATED) {
219 BUG_ON(bus->state != MDIOBUS_UNREGISTERED);
220 bus->state = MDIOBUS_RELEASED;
222 put_device(&bus->dev);
224 EXPORT_SYMBOL(mdiobus_free);
226 struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr)
228 struct phy_device *phydev;
231 phydev = get_phy_device(bus, addr, false);
232 if (IS_ERR(phydev) || phydev == NULL)
235 err = phy_device_register(phydev);
237 phy_device_free(phydev);
243 EXPORT_SYMBOL(mdiobus_scan);
246 * mdiobus_read - Convenience function for reading a given MII mgmt register
247 * @bus: the mii_bus struct
248 * @addr: the phy address
249 * @regnum: register number to read
251 * NOTE: MUST NOT be called from interrupt context,
252 * because the bus read/write functions may wait for an interrupt
253 * to conclude the operation.
255 int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
259 BUG_ON(in_interrupt());
261 mutex_lock(&bus->mdio_lock);
262 retval = bus->read(bus, addr, regnum);
263 mutex_unlock(&bus->mdio_lock);
267 EXPORT_SYMBOL(mdiobus_read);
270 * mdiobus_write - Convenience function for writing a given MII mgmt register
271 * @bus: the mii_bus struct
272 * @addr: the phy address
273 * @regnum: register number to write
274 * @val: value to write to @regnum
276 * NOTE: MUST NOT be called from interrupt context,
277 * because the bus read/write functions may wait for an interrupt
278 * to conclude the operation.
280 int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val)
284 BUG_ON(in_interrupt());
286 mutex_lock(&bus->mdio_lock);
287 err = bus->write(bus, addr, regnum, val);
288 mutex_unlock(&bus->mdio_lock);
292 EXPORT_SYMBOL(mdiobus_write);
295 * mdio_bus_match - determine if given PHY driver supports the given PHY device
296 * @dev: target PHY device
297 * @drv: given PHY driver
299 * Description: Given a PHY device, and a PHY driver, return 1 if
300 * the driver supports the device. Otherwise, return 0.
302 static int mdio_bus_match(struct device *dev, struct device_driver *drv)
304 struct phy_device *phydev = to_phy_device(dev);
305 struct phy_driver *phydrv = to_phy_driver(drv);
307 if (of_driver_match_device(dev, drv))
310 if (phydrv->match_phy_device)
311 return phydrv->match_phy_device(phydev);
313 return (phydrv->phy_id & phydrv->phy_id_mask) ==
314 (phydev->phy_id & phydrv->phy_id_mask);
319 static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
321 struct device_driver *drv = phydev->dev.driver;
322 struct phy_driver *phydrv = to_phy_driver(drv);
323 struct net_device *netdev = phydev->attached_dev;
325 if (!drv || !phydrv->suspend)
328 /* PHY not attached? May suspend. */
332 /* Don't suspend PHY if the attched netdev parent may wakeup.
333 * The parent may point to a PCI device, as in tg3 driver.
335 if (netdev->dev.parent && device_may_wakeup(netdev->dev.parent))
338 /* Also don't suspend PHY if the netdev itself may wakeup. This
339 * is the case for devices w/o underlaying pwr. mgmt. aware bus,
342 if (device_may_wakeup(&netdev->dev))
348 static int mdio_bus_suspend(struct device *dev)
350 struct phy_driver *phydrv = to_phy_driver(dev->driver);
351 struct phy_device *phydev = to_phy_device(dev);
353 /* We must stop the state machine manually, otherwise it stops out of
354 * control, possibly with the phydev->lock held. Upon resume, netdev
355 * may call phy routines that try to grab the same lock, and that may
356 * lead to a deadlock.
358 if (phydev->attached_dev && phydev->adjust_link)
359 phy_stop_machine(phydev);
361 if (!mdio_bus_phy_may_suspend(phydev))
364 return phydrv->suspend(phydev);
367 static int mdio_bus_resume(struct device *dev)
369 struct phy_driver *phydrv = to_phy_driver(dev->driver);
370 struct phy_device *phydev = to_phy_device(dev);
373 if (!mdio_bus_phy_may_suspend(phydev))
376 ret = phydrv->resume(phydev);
381 if (phydev->attached_dev && phydev->adjust_link)
382 phy_start_machine(phydev);
387 static int mdio_bus_restore(struct device *dev)
389 struct phy_device *phydev = to_phy_device(dev);
390 struct net_device *netdev = phydev->attached_dev;
396 ret = phy_init_hw(phydev);
400 /* The PHY needs to renegotiate. */
402 phydev->state = PHY_UP;
404 phy_start_machine(phydev);
409 static const struct dev_pm_ops mdio_bus_pm_ops = {
410 .suspend = mdio_bus_suspend,
411 .resume = mdio_bus_resume,
412 .freeze = mdio_bus_suspend,
413 .thaw = mdio_bus_resume,
414 .restore = mdio_bus_restore,
417 #define MDIO_BUS_PM_OPS (&mdio_bus_pm_ops)
421 #define MDIO_BUS_PM_OPS NULL
423 #endif /* CONFIG_PM */
426 phy_id_show(struct device *dev, struct device_attribute *attr, char *buf)
428 struct phy_device *phydev = to_phy_device(dev);
430 return sprintf(buf, "0x%.8lx\n", (unsigned long)phydev->phy_id);
432 static DEVICE_ATTR_RO(phy_id);
434 static struct attribute *mdio_dev_attrs[] = {
435 &dev_attr_phy_id.attr,
438 ATTRIBUTE_GROUPS(mdio_dev);
440 struct bus_type mdio_bus_type = {
442 .match = mdio_bus_match,
443 .pm = MDIO_BUS_PM_OPS,
444 .dev_groups = mdio_dev_groups,
446 EXPORT_SYMBOL(mdio_bus_type);
448 int __init mdio_bus_init(void)
452 ret = class_register(&mdio_bus_class);
454 ret = bus_register(&mdio_bus_type);
456 class_unregister(&mdio_bus_class);
462 void mdio_bus_exit(void)
464 class_unregister(&mdio_bus_class);
465 bus_unregister(&mdio_bus_type);