]> Pileus Git - ~andy/linux/commitdiff
netpoll: Fix __netpoll_rcu_free so that it can hold the rtnl lock
authorNeil Horman <nhorman@tuxdriver.com>
Mon, 11 Feb 2013 10:25:30 +0000 (10:25 +0000)
committerDavid S. Miller <davem@davemloft.net>
Tue, 12 Feb 2013 00:19:33 +0000 (19:19 -0500)
__netpoll_rcu_free is used to free netpoll structures when the rtnl_lock is
already held.  The mechanism is used to asynchronously call __netpoll_cleanup
outside of the holding of the rtnl_lock, so as to avoid deadlock.
Unfortunately, __netpoll_cleanup modifies pointers (dev->np), which means the
rtnl_lock must be held while calling it.  Further, it cannot be held, because
rcu callbacks may be issued in softirq contexts, which cannot sleep.

Fix this by converting the rcu callback to a work queue that is guaranteed to
get scheduled in process context, so that we can hold the rtnl properly while
calling __netpoll_cleanup

Tested successfully by myself.

Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
CC: "David S. Miller" <davem@davemloft.net>
CC: Cong Wang <amwang@redhat.com>
CC: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/bonding/bond_main.c
include/linux/netpoll.h
net/8021q/vlan_dev.c
net/bridge/br_device.c
net/core/netpoll.c

index 22399374b1e11a0d0cf852486d1af3feb474f66e..94c1534dd578a8f1fcef6bdd949fc59bdcebd169 100644 (file)
@@ -1249,7 +1249,7 @@ static inline void slave_disable_netpoll(struct slave *slave)
                return;
 
        slave->np = NULL;
-       __netpoll_free_rcu(np);
+       __netpoll_free_async(np);
 }
 static inline bool slave_dev_support_netpoll(struct net_device *slave_dev)
 {
index ab856d507b7ea44fa1bdf637a7fd616af2c2aad3..9d7d8c64f7c8664836131ab1258f24c94d5d29bd 100644 (file)
@@ -32,7 +32,7 @@ struct netpoll {
        u8 remote_mac[ETH_ALEN];
 
        struct list_head rx; /* rx_np list element */
-       struct rcu_head rcu;
+       struct work_struct cleanup_work;
 };
 
 struct netpoll_info {
@@ -68,7 +68,7 @@ int netpoll_setup(struct netpoll *np);
 int netpoll_trap(void);
 void netpoll_set_trap(int trap);
 void __netpoll_cleanup(struct netpoll *np);
-void __netpoll_free_rcu(struct netpoll *np);
+void __netpoll_free_async(struct netpoll *np);
 void netpoll_cleanup(struct netpoll *np);
 int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo);
 void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
index 34df5b3c9b75be64dc84cdca8e1b8b6dfaf570b2..19cf81bf9f692f0ac41f21bdf171f6ea2ab039a0 100644 (file)
@@ -733,7 +733,7 @@ static void vlan_dev_netpoll_cleanup(struct net_device *dev)
 
        vlan->netpoll = NULL;
 
-       __netpoll_free_rcu(netpoll);
+       __netpoll_free_async(netpoll);
 }
 #endif /* CONFIG_NET_POLL_CONTROLLER */
 
index ba6fb2d6094072a608ad8d2168be7a75f6dddcf8..ca98fa5b2c78dcf4797e37cc81a8cf8e8bb19ebb 100644 (file)
@@ -265,7 +265,7 @@ void br_netpoll_disable(struct net_bridge_port *p)
 
        p->np = NULL;
 
-       __netpoll_free_rcu(np);
+       __netpoll_free_async(np);
 }
 
 #endif
index edcd9ad95304141c7cd2892e55c2d7f7dfdab6c6..c536474e2260bc44f4fb8f30391c31f57f70edda 100644 (file)
@@ -61,6 +61,7 @@ static struct srcu_struct netpoll_srcu;
 
 static void zap_completion_queue(void);
 static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo);
+static void netpoll_async_cleanup(struct work_struct *work);
 
 static unsigned int carrier_timeout = 4;
 module_param(carrier_timeout, uint, 0644);
@@ -1020,6 +1021,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
 
        np->dev = ndev;
        strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
+       INIT_WORK(&np->cleanup_work, netpoll_async_cleanup);
 
        if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
            !ndev->netdev_ops->ndo_poll_controller) {
@@ -1255,25 +1257,27 @@ void __netpoll_cleanup(struct netpoll *np)
                if (ops->ndo_netpoll_cleanup)
                        ops->ndo_netpoll_cleanup(np->dev);
 
-               RCU_INIT_POINTER(np->dev->npinfo, NULL);
+               rcu_assign_pointer(np->dev->npinfo, NULL);
                call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info);
        }
 }
 EXPORT_SYMBOL_GPL(__netpoll_cleanup);
 
-static void rcu_cleanup_netpoll(struct rcu_head *rcu_head)
+static void netpoll_async_cleanup(struct work_struct *work)
 {
-       struct netpoll *np = container_of(rcu_head, struct netpoll, rcu);
+       struct netpoll *np = container_of(work, struct netpoll, cleanup_work);
 
+       rtnl_lock();
        __netpoll_cleanup(np);
+       rtnl_unlock();
        kfree(np);
 }
 
-void __netpoll_free_rcu(struct netpoll *np)
+void __netpoll_free_async(struct netpoll *np)
 {
-       call_rcu_bh(&np->rcu, rcu_cleanup_netpoll);
+       schedule_work(&np->cleanup_work);
 }
-EXPORT_SYMBOL_GPL(__netpoll_free_rcu);
+EXPORT_SYMBOL_GPL(__netpoll_free_async);
 
 void netpoll_cleanup(struct netpoll *np)
 {