X-Git-Url: http://pileus.org/git/?a=blobdiff_plain;f=drivers%2Fnet%2Ftun.c;h=8939d2117de2420784ba524cfc013bb85aaa5f92;hb=edfb6a148ce62e5e19354a1dcd9a34e00815c2a1;hp=af372d0957fe3c6c7bc47e12df7d2c1e827c7f20;hpb=3152ba0f86428cebe8a9f8462d5be0a9aefa6289;p=~andy%2Flinux diff --git a/drivers/net/tun.c b/drivers/net/tun.c index af372d0957f..8939d2117de 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -109,11 +109,10 @@ struct tap_filter { unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN]; }; -/* 1024 is probably a high enough limit: modern hypervisors seem to support on - * the order of 100-200 CPUs so this leaves us some breathing space if we want - * to match a queue per guest CPU. - */ -#define MAX_TAP_QUEUES 1024 +/* DEFAULT_MAX_NUM_RSS_QUEUES were choosed to let the rx/tx queues allocated for + * the netdevice to be fit in one page. So we can make sure the success of + * memory allocation. TODO: increase the limit. */ +#define MAX_TAP_QUEUES DEFAULT_MAX_NUM_RSS_QUEUES #define TUN_FLOW_EXPIRE (3 * HZ) @@ -185,6 +184,7 @@ struct tun_struct { unsigned long ageing_time; unsigned int numdisabled; struct list_head disabled; + void *security; }; static inline u32 tun_hashfn(u32 rxhash) @@ -490,6 +490,10 @@ static int tun_attach(struct tun_struct *tun, struct file *file) struct tun_file *tfile = file->private_data; int err; + err = security_tun_dev_attach(tfile->socket.sk, tun->security); + if (err < 0) + goto out; + err = -EINVAL; if (rtnl_dereference(tfile->tun)) goto out; @@ -1373,6 +1377,7 @@ static void tun_free_netdev(struct net_device *dev) BUG_ON(!(list_empty(&tun->disabled))); tun_flow_uninit(tun); + security_tun_dev_free_security(tun->security); free_netdev(dev); } @@ -1562,7 +1567,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) if (tun_not_capable(tun)) return -EPERM; - err = security_tun_dev_attach(tfile->socket.sk); + err = security_tun_dev_open(tun->security); if (err < 0) return err; @@ -1577,6 +1582,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) else { char *name; unsigned long flags = 0; + int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ? + MAX_TAP_QUEUES : 1; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; @@ -1600,8 +1607,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) name = ifr->ifr_name; dev = alloc_netdev_mqs(sizeof(struct tun_struct), name, - tun_setup, - MAX_TAP_QUEUES, MAX_TAP_QUEUES); + tun_setup, queues, queues); + if (!dev) return -ENOMEM; @@ -1619,7 +1626,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) spin_lock_init(&tun->lock); - security_tun_dev_post_create(&tfile->sk); + err = security_tun_dev_alloc_security(&tun->security); + if (err < 0) + goto err_free_dev; tun_net_init(dev); @@ -1789,10 +1798,14 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr) if (ifr->ifr_flags & IFF_ATTACH_QUEUE) { tun = tfile->detached; - if (!tun) + if (!tun) { ret = -EINVAL; - else - ret = tun_attach(tun, file); + goto unlock; + } + ret = security_tun_dev_attach_queue(tun->security); + if (ret < 0) + goto unlock; + ret = tun_attach(tun, file); } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { tun = rtnl_dereference(tfile->tun); if (!tun || !(tun->flags & TUN_TAP_MQ)) @@ -1802,6 +1815,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr) } else ret = -EINVAL; +unlock: rtnl_unlock(); return ret; }