]> Pileus Git - ~andy/linux/blobdiff - drivers/net/virtio_net.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[~andy/linux] / drivers / net / virtio_net.c
index 7bab4de658a91d9fb1231f5e45461a268efc8487..7b172408cff02b72b8c5eb90307b63dec164f8c2 100644 (file)
@@ -13,8 +13,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 //#define DEBUG
 #include <linux/netdevice.h>
@@ -299,35 +298,76 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
        return skb;
 }
 
-static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb)
+static struct sk_buff *receive_small(void *buf, unsigned int len)
 {
-       struct skb_vnet_hdr *hdr = skb_vnet_hdr(head_skb);
+       struct sk_buff * skb = buf;
+
+       len -= sizeof(struct virtio_net_hdr);
+       skb_trim(skb, len);
+
+       return skb;
+}
+
+static struct sk_buff *receive_big(struct net_device *dev,
+                                  struct receive_queue *rq,
+                                  void *buf,
+                                  unsigned int len)
+{
+       struct page *page = buf;
+       struct sk_buff *skb = page_to_skb(rq, page, 0, len, PAGE_SIZE);
+
+       if (unlikely(!skb))
+               goto err;
+
+       return skb;
+
+err:
+       dev->stats.rx_dropped++;
+       give_pages(rq, page);
+       return NULL;
+}
+
+static struct sk_buff *receive_mergeable(struct net_device *dev,
+                                        struct receive_queue *rq,
+                                        void *buf,
+                                        unsigned int len)
+{
+       struct skb_vnet_hdr *hdr = buf;
+       int num_buf = hdr->mhdr.num_buffers;
+       struct page *page = virt_to_head_page(buf);
+       int offset = buf - page_address(page);
+       struct sk_buff *head_skb = page_to_skb(rq, page, offset, len,
+                                              MERGE_BUFFER_LEN);
        struct sk_buff *curr_skb = head_skb;
-       char *buf;
-       struct page *page;
-       int num_buf, len, offset;
 
-       num_buf = hdr->mhdr.num_buffers;
+       if (unlikely(!curr_skb))
+               goto err_skb;
+
        while (--num_buf) {
-               int num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
+               int num_skb_frags;
+
                buf = virtqueue_get_buf(rq->vq, &len);
                if (unlikely(!buf)) {
-                       pr_debug("%s: rx error: %d buffers missing\n",
-                                head_skb->dev->name, hdr->mhdr.num_buffers);
-                       head_skb->dev->stats.rx_length_errors++;
-                       return -EINVAL;
+                       pr_debug("%s: rx error: %d buffers out of %d missing\n",
+                                dev->name, num_buf, hdr->mhdr.num_buffers);
+                       dev->stats.rx_length_errors++;
+                       goto err_buf;
                }
                if (unlikely(len > MERGE_BUFFER_LEN)) {
                        pr_debug("%s: rx error: merge buffer too long\n",
-                                head_skb->dev->name);
+                                dev->name);
                        len = MERGE_BUFFER_LEN;
                }
+
+               page = virt_to_head_page(buf);
+               --rq->num;
+
+               num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
                if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
                        struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
-                       if (unlikely(!nskb)) {
-                               head_skb->dev->stats.rx_dropped++;
-                               return -ENOMEM;
-                       }
+
+                       if (unlikely(!nskb))
+                               goto err_skb;
                        if (curr_skb == head_skb)
                                skb_shinfo(curr_skb)->frag_list = nskb;
                        else
@@ -341,8 +381,7 @@ static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb)
                        head_skb->len += len;
                        head_skb->truesize += MERGE_BUFFER_LEN;
                }
-               page = virt_to_head_page(buf);
-               offset = buf - (char *)page_address(page);
+               offset = buf - page_address(page);
                if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
                        put_page(page);
                        skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
@@ -351,9 +390,28 @@ static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb)
                        skb_add_rx_frag(curr_skb, num_skb_frags, page,
                                        offset, len, MERGE_BUFFER_LEN);
                }
+       }
+
+       return head_skb;
+
+err_skb:
+       put_page(page);
+       while (--num_buf) {
+               buf = virtqueue_get_buf(rq->vq, &len);
+               if (unlikely(!buf)) {
+                       pr_debug("%s: rx error: %d buffers missing\n",
+                                dev->name, num_buf);
+                       dev->stats.rx_length_errors++;
+                       break;
+               }
+               page = virt_to_head_page(buf);
+               put_page(page);
                --rq->num;
        }
-       return 0;
+err_buf:
+       dev->stats.rx_dropped++;
+       dev_kfree_skb(head_skb);
+       return NULL;
 }
 
 static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
@@ -362,48 +420,29 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
        struct net_device *dev = vi->dev;
        struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
        struct sk_buff *skb;
-       struct page *page;
        struct skb_vnet_hdr *hdr;
 
        if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
                pr_debug("%s: short packet %i\n", dev->name, len);
                dev->stats.rx_length_errors++;
-               if (vi->big_packets)
-                       give_pages(rq, buf);
-               else if (vi->mergeable_rx_bufs)
+               if (vi->mergeable_rx_bufs)
                        put_page(virt_to_head_page(buf));
+               else if (vi->big_packets)
+                       give_pages(rq, buf);
                else
                        dev_kfree_skb(buf);
                return;
        }
 
-       if (!vi->mergeable_rx_bufs && !vi->big_packets) {
-               skb = buf;
-               len -= sizeof(struct virtio_net_hdr);
-               skb_trim(skb, len);
-       } else if (vi->mergeable_rx_bufs) {
-               struct page *page = virt_to_head_page(buf);
-               skb = page_to_skb(rq, page,
-                                 (char *)buf - (char *)page_address(page),
-                                 len, MERGE_BUFFER_LEN);
-               if (unlikely(!skb)) {
-                       dev->stats.rx_dropped++;
-                       put_page(page);
-                       return;
-               }
-               if (receive_mergeable(rq, skb)) {
-                       dev_kfree_skb(skb);
-                       return;
-               }
-       } else {
-               page = buf;
-               skb = page_to_skb(rq, page, 0, len, PAGE_SIZE);
-               if (unlikely(!skb)) {
-                       dev->stats.rx_dropped++;
-                       give_pages(rq, page);
-                       return;
-               }
-       }
+       if (vi->mergeable_rx_bufs)
+               skb = receive_mergeable(dev, rq, buf, len);
+       else if (vi->big_packets)
+               skb = receive_big(dev, rq, buf, len);
+       else
+               skb = receive_small(buf, len);
+
+       if (unlikely(!skb))
+               return;
 
        hdr = skb_vnet_hdr(skb);
 
@@ -834,16 +873,15 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
 /*
  * Send command via the control virtqueue and check status.  Commands
  * supported by the hypervisor, as indicated by feature bits, should
- * never fail unless improperly formated.
+ * never fail unless improperly formatted.
  */
 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
-                                struct scatterlist *out,
-                                struct scatterlist *in)
+                                struct scatterlist *out)
 {
        struct scatterlist *sgs[4], hdr, stat;
        struct virtio_net_ctrl_hdr ctrl;
        virtio_net_ctrl_ack status = ~0;
-       unsigned out_num = 0, in_num = 0, tmp;
+       unsigned out_num = 0, tmp;
 
        /* Caller should know better */
        BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
@@ -856,16 +894,13 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
 
        if (out)
                sgs[out_num++] = out;
-       if (in)
-               sgs[out_num + in_num++] = in;
 
        /* Add return status. */
        sg_init_one(&stat, &status, sizeof(status));
-       sgs[out_num + in_num++] = &stat;
+       sgs[out_num] = &stat;
 
-       BUG_ON(out_num + in_num > ARRAY_SIZE(sgs));
-       BUG_ON(virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC)
-              < 0);
+       BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
+       BUG_ON(virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC) < 0);
 
        if (unlikely(!virtqueue_kick(vi->cvq)))
                return status == VIRTIO_NET_OK;
@@ -895,8 +930,7 @@ static int virtnet_set_mac_address(struct net_device *dev, void *p)
        if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
                sg_init_one(&sg, addr->sa_data, dev->addr_len);
                if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
-                                         VIRTIO_NET_CTRL_MAC_ADDR_SET,
-                                         &sg, NULL)) {
+                                         VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
                        dev_warn(&vdev->dev,
                                 "Failed to set mac address by vq command.\n");
                        return -EINVAL;
@@ -969,7 +1003,7 @@ static void virtnet_ack_link_announce(struct virtnet_info *vi)
 {
        rtnl_lock();
        if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
-                                 VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL, NULL))
+                                 VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
                dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
        rtnl_unlock();
 }
@@ -987,7 +1021,7 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
        sg_init_one(&sg, &s, sizeof(s));
 
        if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
-                                 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg, NULL)) {
+                                 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
                dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
                         queue_pairs);
                return -EINVAL;
@@ -1027,7 +1061,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
        void *buf;
        int i;
 
-       /* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */
+       /* We can't dynamically set ndo_set_rx_mode, so return gracefully */
        if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
                return;
 
@@ -1037,16 +1071,14 @@ static void virtnet_set_rx_mode(struct net_device *dev)
        sg_init_one(sg, &promisc, sizeof(promisc));
 
        if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
-                                 VIRTIO_NET_CTRL_RX_PROMISC,
-                                 sg, NULL))
+                                 VIRTIO_NET_CTRL_RX_PROMISC, sg))
                dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
                         promisc ? "en" : "dis");
 
        sg_init_one(sg, &allmulti, sizeof(allmulti));
 
        if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
-                                 VIRTIO_NET_CTRL_RX_ALLMULTI,
-                                 sg, NULL))
+                                 VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
                dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
                         allmulti ? "en" : "dis");
 
@@ -1082,9 +1114,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
                   sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
 
        if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
-                                 VIRTIO_NET_CTRL_MAC_TABLE_SET,
-                                 sg, NULL))
-               dev_warn(&dev->dev, "Failed to set MAC fitler table.\n");
+                                 VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
+               dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
 
        kfree(buf);
 }
@@ -1098,7 +1129,7 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev,
        sg_init_one(&sg, &vid, sizeof(vid));
 
        if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
-                                 VIRTIO_NET_CTRL_VLAN_ADD, &sg, NULL))
+                                 VIRTIO_NET_CTRL_VLAN_ADD, &sg))
                dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
        return 0;
 }
@@ -1112,7 +1143,7 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
        sg_init_one(&sg, &vid, sizeof(vid));
 
        if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
-                                 VIRTIO_NET_CTRL_VLAN_DEL, &sg, NULL))
+                                 VIRTIO_NET_CTRL_VLAN_DEL, &sg))
                dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
        return 0;
 }
@@ -1327,6 +1358,11 @@ static void virtnet_config_changed(struct virtio_device *vdev)
 
 static void virtnet_free_queues(struct virtnet_info *vi)
 {
+       int i;
+
+       for (i = 0; i < vi->max_queue_pairs; i++)
+               netif_napi_del(&vi->rq[i].napi);
+
        kfree(vi->rq);
        kfree(vi->sq);
 }
@@ -1356,10 +1392,10 @@ static void free_unused_bufs(struct virtnet_info *vi)
                struct virtqueue *vq = vi->rq[i].vq;
 
                while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
-                       if (vi->big_packets)
-                               give_pages(&vi->rq[i], buf);
-                       else if (vi->mergeable_rx_bufs)
+                       if (vi->mergeable_rx_bufs)
                                put_page(virt_to_head_page(buf));
+                       else if (vi->big_packets)
+                               give_pages(&vi->rq[i], buf);
                        else
                                dev_kfree_skb(buf);
                        --vi->rq[i].num;
@@ -1752,16 +1788,17 @@ static int virtnet_restore(struct virtio_device *vdev)
        if (err)
                return err;
 
-       if (netif_running(vi->dev))
+       if (netif_running(vi->dev)) {
+               for (i = 0; i < vi->curr_queue_pairs; i++)
+                       if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
+                               schedule_delayed_work(&vi->refill, 0);
+
                for (i = 0; i < vi->max_queue_pairs; i++)
                        virtnet_napi_enable(&vi->rq[i]);
+       }
 
        netif_device_attach(vi->dev);
 
-       for (i = 0; i < vi->curr_queue_pairs; i++)
-               if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
-                       schedule_delayed_work(&vi->refill, 0);
-
        mutex_lock(&vi->config_lock);
        vi->config_enable = true;
        mutex_unlock(&vi->config_lock);