]> Pileus Git - ~andy/linux/blob - drivers/net/virtio_net.c
virtio_net: verify if virtqueue_kick() succeeded
[~andy/linux] / drivers / net / virtio_net.c
1 /* A network driver using virtio.
2  *
3  * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
18  */
19 //#define DEBUG
20 #include <linux/netdevice.h>
21 #include <linux/etherdevice.h>
22 #include <linux/ethtool.h>
23 #include <linux/module.h>
24 #include <linux/virtio.h>
25 #include <linux/virtio_net.h>
26 #include <linux/scatterlist.h>
27 #include <linux/if_vlan.h>
28 #include <linux/slab.h>
29 #include <linux/cpu.h>
30
31 static int napi_weight = NAPI_POLL_WEIGHT;
32 module_param(napi_weight, int, 0444);
33
34 static bool csum = true, gso = true;
35 module_param(csum, bool, 0444);
36 module_param(gso, bool, 0444);
37
38 /* FIXME: MTU in config. */
39 #define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
40 #define GOOD_COPY_LEN   128
41
42 #define VIRTNET_DRIVER_VERSION "1.0.0"
43
44 struct virtnet_stats {
45         struct u64_stats_sync tx_syncp;
46         struct u64_stats_sync rx_syncp;
47         u64 tx_bytes;
48         u64 tx_packets;
49
50         u64 rx_bytes;
51         u64 rx_packets;
52 };
53
54 /* Internal representation of a send virtqueue */
55 struct send_queue {
56         /* Virtqueue associated with this send _queue */
57         struct virtqueue *vq;
58
59         /* TX: fragments + linear part + virtio header */
60         struct scatterlist sg[MAX_SKB_FRAGS + 2];
61
62         /* Name of the send queue: output.$index */
63         char name[40];
64 };
65
66 /* Internal representation of a receive virtqueue */
67 struct receive_queue {
68         /* Virtqueue associated with this receive_queue */
69         struct virtqueue *vq;
70
71         struct napi_struct napi;
72
73         /* Number of input buffers, and max we've ever had. */
74         unsigned int num, max;
75
76         /* Chain pages by the private ptr. */
77         struct page *pages;
78
79         /* RX: fragments + linear part + virtio header */
80         struct scatterlist sg[MAX_SKB_FRAGS + 2];
81
82         /* Name of this receive queue: input.$index */
83         char name[40];
84 };
85
86 struct virtnet_info {
87         struct virtio_device *vdev;
88         struct virtqueue *cvq;
89         struct net_device *dev;
90         struct send_queue *sq;
91         struct receive_queue *rq;
92         unsigned int status;
93
94         /* Max # of queue pairs supported by the device */
95         u16 max_queue_pairs;
96
97         /* # of queue pairs currently used by the driver */
98         u16 curr_queue_pairs;
99
100         /* I like... big packets and I cannot lie! */
101         bool big_packets;
102
103         /* Host will merge rx buffers for big packets (shake it! shake it!) */
104         bool mergeable_rx_bufs;
105
106         /* Has control virtqueue */
107         bool has_cvq;
108
109         /* Host can handle any s/g split between our header and packet data */
110         bool any_header_sg;
111
112         /* enable config space updates */
113         bool config_enable;
114
115         /* Active statistics */
116         struct virtnet_stats __percpu *stats;
117
118         /* Work struct for refilling if we run low on memory. */
119         struct delayed_work refill;
120
121         /* Work struct for config space updates */
122         struct work_struct config_work;
123
124         /* Lock for config space updates */
125         struct mutex config_lock;
126
127         /* Does the affinity hint is set for virtqueues? */
128         bool affinity_hint_set;
129
130         /* Per-cpu variable to show the mapping from CPU to virtqueue */
131         int __percpu *vq_index;
132
133         /* CPU hot plug notifier */
134         struct notifier_block nb;
135 };
136
137 struct skb_vnet_hdr {
138         union {
139                 struct virtio_net_hdr hdr;
140                 struct virtio_net_hdr_mrg_rxbuf mhdr;
141         };
142 };
143
144 struct padded_vnet_hdr {
145         struct virtio_net_hdr hdr;
146         /*
147          * virtio_net_hdr should be in a separated sg buffer because of a
148          * QEMU bug, and data sg buffer shares same page with this header sg.
149          * This padding makes next sg 16 byte aligned after virtio_net_hdr.
150          */
151         char padding[6];
152 };
153
154 /* Converting between virtqueue no. and kernel tx/rx queue no.
155  * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
156  */
157 static int vq2txq(struct virtqueue *vq)
158 {
159         return (vq->index - 1) / 2;
160 }
161
162 static int txq2vq(int txq)
163 {
164         return txq * 2 + 1;
165 }
166
167 static int vq2rxq(struct virtqueue *vq)
168 {
169         return vq->index / 2;
170 }
171
172 static int rxq2vq(int rxq)
173 {
174         return rxq * 2;
175 }
176
177 static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
178 {
179         return (struct skb_vnet_hdr *)skb->cb;
180 }
181
182 /*
183  * private is used to chain pages for big packets, put the whole
184  * most recent used list in the beginning for reuse
185  */
186 static void give_pages(struct receive_queue *rq, struct page *page)
187 {
188         struct page *end;
189
190         /* Find end of list, sew whole thing into vi->rq.pages. */
191         for (end = page; end->private; end = (struct page *)end->private);
192         end->private = (unsigned long)rq->pages;
193         rq->pages = page;
194 }
195
196 static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
197 {
198         struct page *p = rq->pages;
199
200         if (p) {
201                 rq->pages = (struct page *)p->private;
202                 /* clear private here, it is used to chain pages */
203                 p->private = 0;
204         } else
205                 p = alloc_page(gfp_mask);
206         return p;
207 }
208
209 static void skb_xmit_done(struct virtqueue *vq)
210 {
211         struct virtnet_info *vi = vq->vdev->priv;
212
213         /* Suppress further interrupts. */
214         virtqueue_disable_cb(vq);
215
216         /* We were probably waiting for more output buffers. */
217         netif_wake_subqueue(vi->dev, vq2txq(vq));
218 }
219
220 static void set_skb_frag(struct sk_buff *skb, struct page *page,
221                          unsigned int offset, unsigned int *len)
222 {
223         int size = min((unsigned)PAGE_SIZE - offset, *len);
224         int i = skb_shinfo(skb)->nr_frags;
225
226         __skb_fill_page_desc(skb, i, page, offset, size);
227
228         skb->data_len += size;
229         skb->len += size;
230         skb->truesize += PAGE_SIZE;
231         skb_shinfo(skb)->nr_frags++;
232         skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
233         *len -= size;
234 }
235
236 /* Called from bottom half context */
237 static struct sk_buff *page_to_skb(struct receive_queue *rq,
238                                    struct page *page, unsigned int len)
239 {
240         struct virtnet_info *vi = rq->vq->vdev->priv;
241         struct sk_buff *skb;
242         struct skb_vnet_hdr *hdr;
243         unsigned int copy, hdr_len, offset;
244         char *p;
245
246         p = page_address(page);
247
248         /* copy small packet so we can reuse these pages for small data */
249         skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
250         if (unlikely(!skb))
251                 return NULL;
252
253         hdr = skb_vnet_hdr(skb);
254
255         if (vi->mergeable_rx_bufs) {
256                 hdr_len = sizeof hdr->mhdr;
257                 offset = hdr_len;
258         } else {
259                 hdr_len = sizeof hdr->hdr;
260                 offset = sizeof(struct padded_vnet_hdr);
261         }
262
263         memcpy(hdr, p, hdr_len);
264
265         len -= hdr_len;
266         p += offset;
267
268         copy = len;
269         if (copy > skb_tailroom(skb))
270                 copy = skb_tailroom(skb);
271         memcpy(skb_put(skb, copy), p, copy);
272
273         len -= copy;
274         offset += copy;
275
276         /*
277          * Verify that we can indeed put this data into a skb.
278          * This is here to handle cases when the device erroneously
279          * tries to receive more than is possible. This is usually
280          * the case of a broken device.
281          */
282         if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
283                 net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
284                 dev_kfree_skb(skb);
285                 return NULL;
286         }
287
288         while (len) {
289                 set_skb_frag(skb, page, offset, &len);
290                 page = (struct page *)page->private;
291                 offset = 0;
292         }
293
294         if (page)
295                 give_pages(rq, page);
296
297         return skb;
298 }
299
300 static int receive_mergeable(struct receive_queue *rq, struct sk_buff *skb)
301 {
302         struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
303         struct page *page;
304         int num_buf, i, len;
305
306         num_buf = hdr->mhdr.num_buffers;
307         while (--num_buf) {
308                 i = skb_shinfo(skb)->nr_frags;
309                 if (i >= MAX_SKB_FRAGS) {
310                         pr_debug("%s: packet too long\n", skb->dev->name);
311                         skb->dev->stats.rx_length_errors++;
312                         return -EINVAL;
313                 }
314                 page = virtqueue_get_buf(rq->vq, &len);
315                 if (!page) {
316                         pr_debug("%s: rx error: %d buffers missing\n",
317                                  skb->dev->name, hdr->mhdr.num_buffers);
318                         skb->dev->stats.rx_length_errors++;
319                         return -EINVAL;
320                 }
321
322                 if (len > PAGE_SIZE)
323                         len = PAGE_SIZE;
324
325                 set_skb_frag(skb, page, 0, &len);
326
327                 --rq->num;
328         }
329         return 0;
330 }
331
332 static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
333 {
334         struct virtnet_info *vi = rq->vq->vdev->priv;
335         struct net_device *dev = vi->dev;
336         struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
337         struct sk_buff *skb;
338         struct page *page;
339         struct skb_vnet_hdr *hdr;
340
341         if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
342                 pr_debug("%s: short packet %i\n", dev->name, len);
343                 dev->stats.rx_length_errors++;
344                 if (vi->mergeable_rx_bufs || vi->big_packets)
345                         give_pages(rq, buf);
346                 else
347                         dev_kfree_skb(buf);
348                 return;
349         }
350
351         if (!vi->mergeable_rx_bufs && !vi->big_packets) {
352                 skb = buf;
353                 len -= sizeof(struct virtio_net_hdr);
354                 skb_trim(skb, len);
355         } else {
356                 page = buf;
357                 skb = page_to_skb(rq, page, len);
358                 if (unlikely(!skb)) {
359                         dev->stats.rx_dropped++;
360                         give_pages(rq, page);
361                         return;
362                 }
363                 if (vi->mergeable_rx_bufs)
364                         if (receive_mergeable(rq, skb)) {
365                                 dev_kfree_skb(skb);
366                                 return;
367                         }
368         }
369
370         hdr = skb_vnet_hdr(skb);
371
372         u64_stats_update_begin(&stats->rx_syncp);
373         stats->rx_bytes += skb->len;
374         stats->rx_packets++;
375         u64_stats_update_end(&stats->rx_syncp);
376
377         if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
378                 pr_debug("Needs csum!\n");
379                 if (!skb_partial_csum_set(skb,
380                                           hdr->hdr.csum_start,
381                                           hdr->hdr.csum_offset))
382                         goto frame_err;
383         } else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) {
384                 skb->ip_summed = CHECKSUM_UNNECESSARY;
385         }
386
387         skb->protocol = eth_type_trans(skb, dev);
388         pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
389                  ntohs(skb->protocol), skb->len, skb->pkt_type);
390
391         if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
392                 pr_debug("GSO!\n");
393                 switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
394                 case VIRTIO_NET_HDR_GSO_TCPV4:
395                         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
396                         break;
397                 case VIRTIO_NET_HDR_GSO_UDP:
398                         skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
399                         break;
400                 case VIRTIO_NET_HDR_GSO_TCPV6:
401                         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
402                         break;
403                 default:
404                         net_warn_ratelimited("%s: bad gso type %u.\n",
405                                              dev->name, hdr->hdr.gso_type);
406                         goto frame_err;
407                 }
408
409                 if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
410                         skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
411
412                 skb_shinfo(skb)->gso_size = hdr->hdr.gso_size;
413                 if (skb_shinfo(skb)->gso_size == 0) {
414                         net_warn_ratelimited("%s: zero gso size.\n", dev->name);
415                         goto frame_err;
416                 }
417
418                 /* Header must be checked, and gso_segs computed. */
419                 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
420                 skb_shinfo(skb)->gso_segs = 0;
421         }
422
423         netif_receive_skb(skb);
424         return;
425
426 frame_err:
427         dev->stats.rx_frame_errors++;
428         dev_kfree_skb(skb);
429 }
430
431 static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp)
432 {
433         struct virtnet_info *vi = rq->vq->vdev->priv;
434         struct sk_buff *skb;
435         struct skb_vnet_hdr *hdr;
436         int err;
437
438         skb = __netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN, gfp);
439         if (unlikely(!skb))
440                 return -ENOMEM;
441
442         skb_put(skb, MAX_PACKET_LEN);
443
444         hdr = skb_vnet_hdr(skb);
445         sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr);
446
447         skb_to_sgvec(skb, rq->sg + 1, 0, skb->len);
448
449         err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp);
450         if (err < 0)
451                 dev_kfree_skb(skb);
452
453         return err;
454 }
455
456 static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp)
457 {
458         struct page *first, *list = NULL;
459         char *p;
460         int i, err, offset;
461
462         /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */
463         for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
464                 first = get_a_page(rq, gfp);
465                 if (!first) {
466                         if (list)
467                                 give_pages(rq, list);
468                         return -ENOMEM;
469                 }
470                 sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
471
472                 /* chain new page in list head to match sg */
473                 first->private = (unsigned long)list;
474                 list = first;
475         }
476
477         first = get_a_page(rq, gfp);
478         if (!first) {
479                 give_pages(rq, list);
480                 return -ENOMEM;
481         }
482         p = page_address(first);
483
484         /* rq->sg[0], rq->sg[1] share the same page */
485         /* a separated rq->sg[0] for virtio_net_hdr only due to QEMU bug */
486         sg_set_buf(&rq->sg[0], p, sizeof(struct virtio_net_hdr));
487
488         /* rq->sg[1] for data packet, from offset */
489         offset = sizeof(struct padded_vnet_hdr);
490         sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
491
492         /* chain first in list head */
493         first->private = (unsigned long)list;
494         err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2,
495                                   first, gfp);
496         if (err < 0)
497                 give_pages(rq, first);
498
499         return err;
500 }
501
502 static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp)
503 {
504         struct page *page;
505         int err;
506
507         page = get_a_page(rq, gfp);
508         if (!page)
509                 return -ENOMEM;
510
511         sg_init_one(rq->sg, page_address(page), PAGE_SIZE);
512
513         err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, page, gfp);
514         if (err < 0)
515                 give_pages(rq, page);
516
517         return err;
518 }
519
520 /*
521  * Returns false if we couldn't fill entirely (OOM).
522  *
523  * Normally run in the receive path, but can also be run from ndo_open
524  * before we're receiving packets, or from refill_work which is
525  * careful to disable receiving (using napi_disable).
526  */
527 static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp)
528 {
529         struct virtnet_info *vi = rq->vq->vdev->priv;
530         int err;
531         bool oom;
532
533         do {
534                 if (vi->mergeable_rx_bufs)
535                         err = add_recvbuf_mergeable(rq, gfp);
536                 else if (vi->big_packets)
537                         err = add_recvbuf_big(rq, gfp);
538                 else
539                         err = add_recvbuf_small(rq, gfp);
540
541                 oom = err == -ENOMEM;
542                 if (err)
543                         break;
544                 ++rq->num;
545         } while (rq->vq->num_free);
546         if (unlikely(rq->num > rq->max))
547                 rq->max = rq->num;
548         if (unlikely(!virtqueue_kick(rq->vq)))
549                 return false;
550         return !oom;
551 }
552
553 static void skb_recv_done(struct virtqueue *rvq)
554 {
555         struct virtnet_info *vi = rvq->vdev->priv;
556         struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
557
558         /* Schedule NAPI, Suppress further interrupts if successful. */
559         if (napi_schedule_prep(&rq->napi)) {
560                 virtqueue_disable_cb(rvq);
561                 __napi_schedule(&rq->napi);
562         }
563 }
564
565 static void virtnet_napi_enable(struct receive_queue *rq)
566 {
567         napi_enable(&rq->napi);
568
569         /* If all buffers were filled by other side before we napi_enabled, we
570          * won't get another interrupt, so process any outstanding packets
571          * now.  virtnet_poll wants re-enable the queue, so we disable here.
572          * We synchronize against interrupts via NAPI_STATE_SCHED */
573         if (napi_schedule_prep(&rq->napi)) {
574                 virtqueue_disable_cb(rq->vq);
575                 local_bh_disable();
576                 __napi_schedule(&rq->napi);
577                 local_bh_enable();
578         }
579 }
580
581 static void refill_work(struct work_struct *work)
582 {
583         struct virtnet_info *vi =
584                 container_of(work, struct virtnet_info, refill.work);
585         bool still_empty;
586         int i;
587
588         for (i = 0; i < vi->curr_queue_pairs; i++) {
589                 struct receive_queue *rq = &vi->rq[i];
590
591                 napi_disable(&rq->napi);
592                 still_empty = !try_fill_recv(rq, GFP_KERNEL);
593                 virtnet_napi_enable(rq);
594
595                 /* In theory, this can happen: if we don't get any buffers in
596                  * we will *never* try to fill again.
597                  */
598                 if (still_empty)
599                         schedule_delayed_work(&vi->refill, HZ/2);
600         }
601 }
602
603 static int virtnet_poll(struct napi_struct *napi, int budget)
604 {
605         struct receive_queue *rq =
606                 container_of(napi, struct receive_queue, napi);
607         struct virtnet_info *vi = rq->vq->vdev->priv;
608         void *buf;
609         unsigned int r, len, received = 0;
610
611 again:
612         while (received < budget &&
613                (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
614                 receive_buf(rq, buf, len);
615                 --rq->num;
616                 received++;
617         }
618
619         if (rq->num < rq->max / 2) {
620                 if (!try_fill_recv(rq, GFP_ATOMIC))
621                         schedule_delayed_work(&vi->refill, 0);
622         }
623
624         /* Out of packets? */
625         if (received < budget) {
626                 r = virtqueue_enable_cb_prepare(rq->vq);
627                 napi_complete(napi);
628                 if (unlikely(virtqueue_poll(rq->vq, r)) &&
629                     napi_schedule_prep(napi)) {
630                         virtqueue_disable_cb(rq->vq);
631                         __napi_schedule(napi);
632                         goto again;
633                 }
634         }
635
636         return received;
637 }
638
639 static int virtnet_open(struct net_device *dev)
640 {
641         struct virtnet_info *vi = netdev_priv(dev);
642         int i;
643
644         for (i = 0; i < vi->max_queue_pairs; i++) {
645                 if (i < vi->curr_queue_pairs)
646                         /* Make sure we have some buffers: if oom use wq. */
647                         if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
648                                 schedule_delayed_work(&vi->refill, 0);
649                 virtnet_napi_enable(&vi->rq[i]);
650         }
651
652         return 0;
653 }
654
655 static void free_old_xmit_skbs(struct send_queue *sq)
656 {
657         struct sk_buff *skb;
658         unsigned int len;
659         struct virtnet_info *vi = sq->vq->vdev->priv;
660         struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
661
662         while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) {
663                 pr_debug("Sent skb %p\n", skb);
664
665                 u64_stats_update_begin(&stats->tx_syncp);
666                 stats->tx_bytes += skb->len;
667                 stats->tx_packets++;
668                 u64_stats_update_end(&stats->tx_syncp);
669
670                 dev_kfree_skb_any(skb);
671         }
672 }
673
674 static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
675 {
676         struct skb_vnet_hdr *hdr;
677         const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
678         struct virtnet_info *vi = sq->vq->vdev->priv;
679         unsigned num_sg;
680         unsigned hdr_len;
681         bool can_push;
682
683         pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
684         if (vi->mergeable_rx_bufs)
685                 hdr_len = sizeof hdr->mhdr;
686         else
687                 hdr_len = sizeof hdr->hdr;
688
689         can_push = vi->any_header_sg &&
690                 !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
691                 !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
692         /* Even if we can, don't push here yet as this would skew
693          * csum_start offset below. */
694         if (can_push)
695                 hdr = (struct skb_vnet_hdr *)(skb->data - hdr_len);
696         else
697                 hdr = skb_vnet_hdr(skb);
698
699         if (skb->ip_summed == CHECKSUM_PARTIAL) {
700                 hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
701                 hdr->hdr.csum_start = skb_checksum_start_offset(skb);
702                 hdr->hdr.csum_offset = skb->csum_offset;
703         } else {
704                 hdr->hdr.flags = 0;
705                 hdr->hdr.csum_offset = hdr->hdr.csum_start = 0;
706         }
707
708         if (skb_is_gso(skb)) {
709                 hdr->hdr.hdr_len = skb_headlen(skb);
710                 hdr->hdr.gso_size = skb_shinfo(skb)->gso_size;
711                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
712                         hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
713                 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
714                         hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
715                 else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
716                         hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
717                 else
718                         BUG();
719                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
720                         hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
721         } else {
722                 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
723                 hdr->hdr.gso_size = hdr->hdr.hdr_len = 0;
724         }
725
726         if (vi->mergeable_rx_bufs)
727                 hdr->mhdr.num_buffers = 0;
728
729         if (can_push) {
730                 __skb_push(skb, hdr_len);
731                 num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
732                 /* Pull header back to avoid skew in tx bytes calculations. */
733                 __skb_pull(skb, hdr_len);
734         } else {
735                 sg_set_buf(sq->sg, hdr, hdr_len);
736                 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
737         }
738         return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
739 }
740
741 static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
742 {
743         struct virtnet_info *vi = netdev_priv(dev);
744         int qnum = skb_get_queue_mapping(skb);
745         struct send_queue *sq = &vi->sq[qnum];
746         int err;
747
748         /* Free up any pending old buffers before queueing new ones. */
749         free_old_xmit_skbs(sq);
750
751         /* Try to transmit */
752         err = xmit_skb(sq, skb);
753
754         /* This should not happen! */
755         if (unlikely(err) || unlikely(!virtqueue_kick(sq->vq))) {
756                 dev->stats.tx_fifo_errors++;
757                 if (net_ratelimit())
758                         dev_warn(&dev->dev,
759                                  "Unexpected TXQ (%d) queue failure: %d\n", qnum, err);
760                 dev->stats.tx_dropped++;
761                 kfree_skb(skb);
762                 return NETDEV_TX_OK;
763         }
764
765         /* Don't wait up for transmitted skbs to be freed. */
766         skb_orphan(skb);
767         nf_reset(skb);
768
769         /* Apparently nice girls don't return TX_BUSY; stop the queue
770          * before it gets out of hand.  Naturally, this wastes entries. */
771         if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
772                 netif_stop_subqueue(dev, qnum);
773                 if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
774                         /* More just got used, free them then recheck. */
775                         free_old_xmit_skbs(sq);
776                         if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
777                                 netif_start_subqueue(dev, qnum);
778                                 virtqueue_disable_cb(sq->vq);
779                         }
780                 }
781         }
782
783         return NETDEV_TX_OK;
784 }
785
786 /*
787  * Send command via the control virtqueue and check status.  Commands
788  * supported by the hypervisor, as indicated by feature bits, should
789  * never fail unless improperly formated.
790  */
791 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
792                                  struct scatterlist *out,
793                                  struct scatterlist *in)
794 {
795         struct scatterlist *sgs[4], hdr, stat;
796         struct virtio_net_ctrl_hdr ctrl;
797         virtio_net_ctrl_ack status = ~0;
798         unsigned out_num = 0, in_num = 0, tmp;
799
800         /* Caller should know better */
801         BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
802
803         ctrl.class = class;
804         ctrl.cmd = cmd;
805         /* Add header */
806         sg_init_one(&hdr, &ctrl, sizeof(ctrl));
807         sgs[out_num++] = &hdr;
808
809         if (out)
810                 sgs[out_num++] = out;
811         if (in)
812                 sgs[out_num + in_num++] = in;
813
814         /* Add return status. */
815         sg_init_one(&stat, &status, sizeof(status));
816         sgs[out_num + in_num++] = &stat;
817
818         BUG_ON(out_num + in_num > ARRAY_SIZE(sgs));
819         BUG_ON(virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC)
820                < 0);
821
822         if (unlikely(!virtqueue_kick(vi->cvq)))
823                 return status == VIRTIO_NET_OK;
824
825         /* Spin for a response, the kick causes an ioport write, trapping
826          * into the hypervisor, so the request should be handled immediately.
827          */
828         while (!virtqueue_get_buf(vi->cvq, &tmp))
829                 cpu_relax();
830
831         return status == VIRTIO_NET_OK;
832 }
833
834 static int virtnet_set_mac_address(struct net_device *dev, void *p)
835 {
836         struct virtnet_info *vi = netdev_priv(dev);
837         struct virtio_device *vdev = vi->vdev;
838         int ret;
839         struct sockaddr *addr = p;
840         struct scatterlist sg;
841
842         ret = eth_prepare_mac_addr_change(dev, p);
843         if (ret)
844                 return ret;
845
846         if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
847                 sg_init_one(&sg, addr->sa_data, dev->addr_len);
848                 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
849                                           VIRTIO_NET_CTRL_MAC_ADDR_SET,
850                                           &sg, NULL)) {
851                         dev_warn(&vdev->dev,
852                                  "Failed to set mac address by vq command.\n");
853                         return -EINVAL;
854                 }
855         } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
856                 unsigned int i;
857
858                 /* Naturally, this has an atomicity problem. */
859                 for (i = 0; i < dev->addr_len; i++)
860                         virtio_cwrite8(vdev,
861                                        offsetof(struct virtio_net_config, mac) +
862                                        i, addr->sa_data[i]);
863         }
864
865         eth_commit_mac_addr_change(dev, p);
866
867         return 0;
868 }
869
870 static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
871                                                struct rtnl_link_stats64 *tot)
872 {
873         struct virtnet_info *vi = netdev_priv(dev);
874         int cpu;
875         unsigned int start;
876
877         for_each_possible_cpu(cpu) {
878                 struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu);
879                 u64 tpackets, tbytes, rpackets, rbytes;
880
881                 do {
882                         start = u64_stats_fetch_begin_bh(&stats->tx_syncp);
883                         tpackets = stats->tx_packets;
884                         tbytes   = stats->tx_bytes;
885                 } while (u64_stats_fetch_retry_bh(&stats->tx_syncp, start));
886
887                 do {
888                         start = u64_stats_fetch_begin_bh(&stats->rx_syncp);
889                         rpackets = stats->rx_packets;
890                         rbytes   = stats->rx_bytes;
891                 } while (u64_stats_fetch_retry_bh(&stats->rx_syncp, start));
892
893                 tot->rx_packets += rpackets;
894                 tot->tx_packets += tpackets;
895                 tot->rx_bytes   += rbytes;
896                 tot->tx_bytes   += tbytes;
897         }
898
899         tot->tx_dropped = dev->stats.tx_dropped;
900         tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
901         tot->rx_dropped = dev->stats.rx_dropped;
902         tot->rx_length_errors = dev->stats.rx_length_errors;
903         tot->rx_frame_errors = dev->stats.rx_frame_errors;
904
905         return tot;
906 }
907
908 #ifdef CONFIG_NET_POLL_CONTROLLER
909 static void virtnet_netpoll(struct net_device *dev)
910 {
911         struct virtnet_info *vi = netdev_priv(dev);
912         int i;
913
914         for (i = 0; i < vi->curr_queue_pairs; i++)
915                 napi_schedule(&vi->rq[i].napi);
916 }
917 #endif
918
919 static void virtnet_ack_link_announce(struct virtnet_info *vi)
920 {
921         rtnl_lock();
922         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
923                                   VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL, NULL))
924                 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
925         rtnl_unlock();
926 }
927
928 static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
929 {
930         struct scatterlist sg;
931         struct virtio_net_ctrl_mq s;
932         struct net_device *dev = vi->dev;
933
934         if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
935                 return 0;
936
937         s.virtqueue_pairs = queue_pairs;
938         sg_init_one(&sg, &s, sizeof(s));
939
940         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
941                                   VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg, NULL)) {
942                 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
943                          queue_pairs);
944                 return -EINVAL;
945         } else {
946                 vi->curr_queue_pairs = queue_pairs;
947                 schedule_delayed_work(&vi->refill, 0);
948         }
949
950         return 0;
951 }
952
953 static int virtnet_close(struct net_device *dev)
954 {
955         struct virtnet_info *vi = netdev_priv(dev);
956         int i;
957
958         /* Make sure refill_work doesn't re-enable napi! */
959         cancel_delayed_work_sync(&vi->refill);
960
961         for (i = 0; i < vi->max_queue_pairs; i++)
962                 napi_disable(&vi->rq[i].napi);
963
964         return 0;
965 }
966
967 static void virtnet_set_rx_mode(struct net_device *dev)
968 {
969         struct virtnet_info *vi = netdev_priv(dev);
970         struct scatterlist sg[2];
971         u8 promisc, allmulti;
972         struct virtio_net_ctrl_mac *mac_data;
973         struct netdev_hw_addr *ha;
974         int uc_count;
975         int mc_count;
976         void *buf;
977         int i;
978
979         /* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */
980         if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
981                 return;
982
983         promisc = ((dev->flags & IFF_PROMISC) != 0);
984         allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
985
986         sg_init_one(sg, &promisc, sizeof(promisc));
987
988         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
989                                   VIRTIO_NET_CTRL_RX_PROMISC,
990                                   sg, NULL))
991                 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
992                          promisc ? "en" : "dis");
993
994         sg_init_one(sg, &allmulti, sizeof(allmulti));
995
996         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
997                                   VIRTIO_NET_CTRL_RX_ALLMULTI,
998                                   sg, NULL))
999                 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
1000                          allmulti ? "en" : "dis");
1001
1002         uc_count = netdev_uc_count(dev);
1003         mc_count = netdev_mc_count(dev);
1004         /* MAC filter - use one buffer for both lists */
1005         buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
1006                       (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
1007         mac_data = buf;
1008         if (!buf)
1009                 return;
1010
1011         sg_init_table(sg, 2);
1012
1013         /* Store the unicast list and count in the front of the buffer */
1014         mac_data->entries = uc_count;
1015         i = 0;
1016         netdev_for_each_uc_addr(ha, dev)
1017                 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
1018
1019         sg_set_buf(&sg[0], mac_data,
1020                    sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
1021
1022         /* multicast list and count fill the end */
1023         mac_data = (void *)&mac_data->macs[uc_count][0];
1024
1025         mac_data->entries = mc_count;
1026         i = 0;
1027         netdev_for_each_mc_addr(ha, dev)
1028                 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
1029
1030         sg_set_buf(&sg[1], mac_data,
1031                    sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
1032
1033         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
1034                                   VIRTIO_NET_CTRL_MAC_TABLE_SET,
1035                                   sg, NULL))
1036                 dev_warn(&dev->dev, "Failed to set MAC fitler table.\n");
1037
1038         kfree(buf);
1039 }
1040
1041 static int virtnet_vlan_rx_add_vid(struct net_device *dev,
1042                                    __be16 proto, u16 vid)
1043 {
1044         struct virtnet_info *vi = netdev_priv(dev);
1045         struct scatterlist sg;
1046
1047         sg_init_one(&sg, &vid, sizeof(vid));
1048
1049         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
1050                                   VIRTIO_NET_CTRL_VLAN_ADD, &sg, NULL))
1051                 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
1052         return 0;
1053 }
1054
1055 static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
1056                                     __be16 proto, u16 vid)
1057 {
1058         struct virtnet_info *vi = netdev_priv(dev);
1059         struct scatterlist sg;
1060
1061         sg_init_one(&sg, &vid, sizeof(vid));
1062
1063         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
1064                                   VIRTIO_NET_CTRL_VLAN_DEL, &sg, NULL))
1065                 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
1066         return 0;
1067 }
1068
1069 static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
1070 {
1071         int i;
1072         int cpu;
1073
1074         if (vi->affinity_hint_set) {
1075                 for (i = 0; i < vi->max_queue_pairs; i++) {
1076                         virtqueue_set_affinity(vi->rq[i].vq, -1);
1077                         virtqueue_set_affinity(vi->sq[i].vq, -1);
1078                 }
1079
1080                 vi->affinity_hint_set = false;
1081         }
1082
1083         i = 0;
1084         for_each_online_cpu(cpu) {
1085                 if (cpu == hcpu) {
1086                         *per_cpu_ptr(vi->vq_index, cpu) = -1;
1087                 } else {
1088                         *per_cpu_ptr(vi->vq_index, cpu) =
1089                                 ++i % vi->curr_queue_pairs;
1090                 }
1091         }
1092 }
1093
1094 static void virtnet_set_affinity(struct virtnet_info *vi)
1095 {
1096         int i;
1097         int cpu;
1098
1099         /* In multiqueue mode, when the number of cpu is equal to the number of
1100          * queue pairs, we let the queue pairs to be private to one cpu by
1101          * setting the affinity hint to eliminate the contention.
1102          */
1103         if (vi->curr_queue_pairs == 1 ||
1104             vi->max_queue_pairs != num_online_cpus()) {
1105                 virtnet_clean_affinity(vi, -1);
1106                 return;
1107         }
1108
1109         i = 0;
1110         for_each_online_cpu(cpu) {
1111                 virtqueue_set_affinity(vi->rq[i].vq, cpu);
1112                 virtqueue_set_affinity(vi->sq[i].vq, cpu);
1113                 *per_cpu_ptr(vi->vq_index, cpu) = i;
1114                 i++;
1115         }
1116
1117         vi->affinity_hint_set = true;
1118 }
1119
1120 static int virtnet_cpu_callback(struct notifier_block *nfb,
1121                                 unsigned long action, void *hcpu)
1122 {
1123         struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb);
1124
1125         switch(action & ~CPU_TASKS_FROZEN) {
1126         case CPU_ONLINE:
1127         case CPU_DOWN_FAILED:
1128         case CPU_DEAD:
1129                 virtnet_set_affinity(vi);
1130                 break;
1131         case CPU_DOWN_PREPARE:
1132                 virtnet_clean_affinity(vi, (long)hcpu);
1133                 break;
1134         default:
1135                 break;
1136         }
1137         return NOTIFY_OK;
1138 }
1139
1140 static void virtnet_get_ringparam(struct net_device *dev,
1141                                 struct ethtool_ringparam *ring)
1142 {
1143         struct virtnet_info *vi = netdev_priv(dev);
1144
1145         ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq);
1146         ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq);
1147         ring->rx_pending = ring->rx_max_pending;
1148         ring->tx_pending = ring->tx_max_pending;
1149 }
1150
1151
1152 static void virtnet_get_drvinfo(struct net_device *dev,
1153                                 struct ethtool_drvinfo *info)
1154 {
1155         struct virtnet_info *vi = netdev_priv(dev);
1156         struct virtio_device *vdev = vi->vdev;
1157
1158         strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1159         strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
1160         strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
1161
1162 }
1163
1164 /* TODO: Eliminate OOO packets during switching */
1165 static int virtnet_set_channels(struct net_device *dev,
1166                                 struct ethtool_channels *channels)
1167 {
1168         struct virtnet_info *vi = netdev_priv(dev);
1169         u16 queue_pairs = channels->combined_count;
1170         int err;
1171
1172         /* We don't support separate rx/tx channels.
1173          * We don't allow setting 'other' channels.
1174          */
1175         if (channels->rx_count || channels->tx_count || channels->other_count)
1176                 return -EINVAL;
1177
1178         if (queue_pairs > vi->max_queue_pairs)
1179                 return -EINVAL;
1180
1181         get_online_cpus();
1182         err = virtnet_set_queues(vi, queue_pairs);
1183         if (!err) {
1184                 netif_set_real_num_tx_queues(dev, queue_pairs);
1185                 netif_set_real_num_rx_queues(dev, queue_pairs);
1186
1187                 virtnet_set_affinity(vi);
1188         }
1189         put_online_cpus();
1190
1191         return err;
1192 }
1193
1194 static void virtnet_get_channels(struct net_device *dev,
1195                                  struct ethtool_channels *channels)
1196 {
1197         struct virtnet_info *vi = netdev_priv(dev);
1198
1199         channels->combined_count = vi->curr_queue_pairs;
1200         channels->max_combined = vi->max_queue_pairs;
1201         channels->max_other = 0;
1202         channels->rx_count = 0;
1203         channels->tx_count = 0;
1204         channels->other_count = 0;
1205 }
1206
1207 static const struct ethtool_ops virtnet_ethtool_ops = {
1208         .get_drvinfo = virtnet_get_drvinfo,
1209         .get_link = ethtool_op_get_link,
1210         .get_ringparam = virtnet_get_ringparam,
1211         .set_channels = virtnet_set_channels,
1212         .get_channels = virtnet_get_channels,
1213 };
1214
1215 #define MIN_MTU 68
1216 #define MAX_MTU 65535
1217
1218 static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
1219 {
1220         if (new_mtu < MIN_MTU || new_mtu > MAX_MTU)
1221                 return -EINVAL;
1222         dev->mtu = new_mtu;
1223         return 0;
1224 }
1225
1226 /* To avoid contending a lock hold by a vcpu who would exit to host, select the
1227  * txq based on the processor id.
1228  */
1229 static u16 virtnet_select_queue(struct net_device *dev, struct sk_buff *skb)
1230 {
1231         int txq;
1232         struct virtnet_info *vi = netdev_priv(dev);
1233
1234         if (skb_rx_queue_recorded(skb)) {
1235                 txq = skb_get_rx_queue(skb);
1236         } else {
1237                 txq = *__this_cpu_ptr(vi->vq_index);
1238                 if (txq == -1)
1239                         txq = 0;
1240         }
1241
1242         while (unlikely(txq >= dev->real_num_tx_queues))
1243                 txq -= dev->real_num_tx_queues;
1244
1245         return txq;
1246 }
1247
1248 static const struct net_device_ops virtnet_netdev = {
1249         .ndo_open            = virtnet_open,
1250         .ndo_stop            = virtnet_close,
1251         .ndo_start_xmit      = start_xmit,
1252         .ndo_validate_addr   = eth_validate_addr,
1253         .ndo_set_mac_address = virtnet_set_mac_address,
1254         .ndo_set_rx_mode     = virtnet_set_rx_mode,
1255         .ndo_change_mtu      = virtnet_change_mtu,
1256         .ndo_get_stats64     = virtnet_stats,
1257         .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
1258         .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
1259         .ndo_select_queue     = virtnet_select_queue,
1260 #ifdef CONFIG_NET_POLL_CONTROLLER
1261         .ndo_poll_controller = virtnet_netpoll,
1262 #endif
1263 };
1264
1265 static void virtnet_config_changed_work(struct work_struct *work)
1266 {
1267         struct virtnet_info *vi =
1268                 container_of(work, struct virtnet_info, config_work);
1269         u16 v;
1270
1271         mutex_lock(&vi->config_lock);
1272         if (!vi->config_enable)
1273                 goto done;
1274
1275         if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
1276                                  struct virtio_net_config, status, &v) < 0)
1277                 goto done;
1278
1279         if (v & VIRTIO_NET_S_ANNOUNCE) {
1280                 netdev_notify_peers(vi->dev);
1281                 virtnet_ack_link_announce(vi);
1282         }
1283
1284         /* Ignore unknown (future) status bits */
1285         v &= VIRTIO_NET_S_LINK_UP;
1286
1287         if (vi->status == v)
1288                 goto done;
1289
1290         vi->status = v;
1291
1292         if (vi->status & VIRTIO_NET_S_LINK_UP) {
1293                 netif_carrier_on(vi->dev);
1294                 netif_tx_wake_all_queues(vi->dev);
1295         } else {
1296                 netif_carrier_off(vi->dev);
1297                 netif_tx_stop_all_queues(vi->dev);
1298         }
1299 done:
1300         mutex_unlock(&vi->config_lock);
1301 }
1302
1303 static void virtnet_config_changed(struct virtio_device *vdev)
1304 {
1305         struct virtnet_info *vi = vdev->priv;
1306
1307         schedule_work(&vi->config_work);
1308 }
1309
1310 static void virtnet_free_queues(struct virtnet_info *vi)
1311 {
1312         kfree(vi->rq);
1313         kfree(vi->sq);
1314 }
1315
1316 static void free_receive_bufs(struct virtnet_info *vi)
1317 {
1318         int i;
1319
1320         for (i = 0; i < vi->max_queue_pairs; i++) {
1321                 while (vi->rq[i].pages)
1322                         __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
1323         }
1324 }
1325
1326 static void free_unused_bufs(struct virtnet_info *vi)
1327 {
1328         void *buf;
1329         int i;
1330
1331         for (i = 0; i < vi->max_queue_pairs; i++) {
1332                 struct virtqueue *vq = vi->sq[i].vq;
1333                 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
1334                         dev_kfree_skb(buf);
1335         }
1336
1337         for (i = 0; i < vi->max_queue_pairs; i++) {
1338                 struct virtqueue *vq = vi->rq[i].vq;
1339
1340                 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
1341                         if (vi->mergeable_rx_bufs || vi->big_packets)
1342                                 give_pages(&vi->rq[i], buf);
1343                         else
1344                                 dev_kfree_skb(buf);
1345                         --vi->rq[i].num;
1346                 }
1347                 BUG_ON(vi->rq[i].num != 0);
1348         }
1349 }
1350
1351 static void virtnet_del_vqs(struct virtnet_info *vi)
1352 {
1353         struct virtio_device *vdev = vi->vdev;
1354
1355         virtnet_clean_affinity(vi, -1);
1356
1357         vdev->config->del_vqs(vdev);
1358
1359         virtnet_free_queues(vi);
1360 }
1361
1362 static int virtnet_find_vqs(struct virtnet_info *vi)
1363 {
1364         vq_callback_t **callbacks;
1365         struct virtqueue **vqs;
1366         int ret = -ENOMEM;
1367         int i, total_vqs;
1368         const char **names;
1369
1370         /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
1371          * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
1372          * possible control vq.
1373          */
1374         total_vqs = vi->max_queue_pairs * 2 +
1375                     virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);
1376
1377         /* Allocate space for find_vqs parameters */
1378         vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL);
1379         if (!vqs)
1380                 goto err_vq;
1381         callbacks = kmalloc(total_vqs * sizeof(*callbacks), GFP_KERNEL);
1382         if (!callbacks)
1383                 goto err_callback;
1384         names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL);
1385         if (!names)
1386                 goto err_names;
1387
1388         /* Parameters for control virtqueue, if any */
1389         if (vi->has_cvq) {
1390                 callbacks[total_vqs - 1] = NULL;
1391                 names[total_vqs - 1] = "control";
1392         }
1393
1394         /* Allocate/initialize parameters for send/receive virtqueues */
1395         for (i = 0; i < vi->max_queue_pairs; i++) {
1396                 callbacks[rxq2vq(i)] = skb_recv_done;
1397                 callbacks[txq2vq(i)] = skb_xmit_done;
1398                 sprintf(vi->rq[i].name, "input.%d", i);
1399                 sprintf(vi->sq[i].name, "output.%d", i);
1400                 names[rxq2vq(i)] = vi->rq[i].name;
1401                 names[txq2vq(i)] = vi->sq[i].name;
1402         }
1403
1404         ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks,
1405                                          names);
1406         if (ret)
1407                 goto err_find;
1408
1409         if (vi->has_cvq) {
1410                 vi->cvq = vqs[total_vqs - 1];
1411                 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
1412                         vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1413         }
1414
1415         for (i = 0; i < vi->max_queue_pairs; i++) {
1416                 vi->rq[i].vq = vqs[rxq2vq(i)];
1417                 vi->sq[i].vq = vqs[txq2vq(i)];
1418         }
1419
1420         kfree(names);
1421         kfree(callbacks);
1422         kfree(vqs);
1423
1424         return 0;
1425
1426 err_find:
1427         kfree(names);
1428 err_names:
1429         kfree(callbacks);
1430 err_callback:
1431         kfree(vqs);
1432 err_vq:
1433         return ret;
1434 }
1435
1436 static int virtnet_alloc_queues(struct virtnet_info *vi)
1437 {
1438         int i;
1439
1440         vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL);
1441         if (!vi->sq)
1442                 goto err_sq;
1443         vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL);
1444         if (!vi->rq)
1445                 goto err_rq;
1446
1447         INIT_DELAYED_WORK(&vi->refill, refill_work);
1448         for (i = 0; i < vi->max_queue_pairs; i++) {
1449                 vi->rq[i].pages = NULL;
1450                 netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
1451                                napi_weight);
1452
1453                 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
1454                 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
1455         }
1456
1457         return 0;
1458
1459 err_rq:
1460         kfree(vi->sq);
1461 err_sq:
1462         return -ENOMEM;
1463 }
1464
1465 static int init_vqs(struct virtnet_info *vi)
1466 {
1467         int ret;
1468
1469         /* Allocate send & receive queues */
1470         ret = virtnet_alloc_queues(vi);
1471         if (ret)
1472                 goto err;
1473
1474         ret = virtnet_find_vqs(vi);
1475         if (ret)
1476                 goto err_free;
1477
1478         get_online_cpus();
1479         virtnet_set_affinity(vi);
1480         put_online_cpus();
1481
1482         return 0;
1483
1484 err_free:
1485         virtnet_free_queues(vi);
1486 err:
1487         return ret;
1488 }
1489
1490 static int virtnet_probe(struct virtio_device *vdev)
1491 {
1492         int i, err;
1493         struct net_device *dev;
1494         struct virtnet_info *vi;
1495         u16 max_queue_pairs;
1496
1497         /* Find if host supports multiqueue virtio_net device */
1498         err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
1499                                    struct virtio_net_config,
1500                                    max_virtqueue_pairs, &max_queue_pairs);
1501
1502         /* We need at least 2 queue's */
1503         if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
1504             max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
1505             !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
1506                 max_queue_pairs = 1;
1507
1508         /* Allocate ourselves a network device with room for our info */
1509         dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
1510         if (!dev)
1511                 return -ENOMEM;
1512
1513         /* Set up network device as normal. */
1514         dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
1515         dev->netdev_ops = &virtnet_netdev;
1516         dev->features = NETIF_F_HIGHDMA;
1517
1518         SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops);
1519         SET_NETDEV_DEV(dev, &vdev->dev);
1520
1521         /* Do we support "hardware" checksums? */
1522         if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
1523                 /* This opens up the world of extra features. */
1524                 dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
1525                 if (csum)
1526                         dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
1527
1528                 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
1529                         dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
1530                                 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
1531                 }
1532                 /* Individual feature bits: what can host handle? */
1533                 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
1534                         dev->hw_features |= NETIF_F_TSO;
1535                 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
1536                         dev->hw_features |= NETIF_F_TSO6;
1537                 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
1538                         dev->hw_features |= NETIF_F_TSO_ECN;
1539                 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
1540                         dev->hw_features |= NETIF_F_UFO;
1541
1542                 if (gso)
1543                         dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
1544                 /* (!csum && gso) case will be fixed by register_netdev() */
1545         }
1546         if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
1547                 dev->features |= NETIF_F_RXCSUM;
1548
1549         dev->vlan_features = dev->features;
1550
1551         /* Configuration may specify what MAC to use.  Otherwise random. */
1552         if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
1553                 virtio_cread_bytes(vdev,
1554                                    offsetof(struct virtio_net_config, mac),
1555                                    dev->dev_addr, dev->addr_len);
1556         else
1557                 eth_hw_addr_random(dev);
1558
1559         /* Set up our device-specific information */
1560         vi = netdev_priv(dev);
1561         vi->dev = dev;
1562         vi->vdev = vdev;
1563         vdev->priv = vi;
1564         vi->stats = alloc_percpu(struct virtnet_stats);
1565         err = -ENOMEM;
1566         if (vi->stats == NULL)
1567                 goto free;
1568
1569         vi->vq_index = alloc_percpu(int);
1570         if (vi->vq_index == NULL)
1571                 goto free_stats;
1572
1573         mutex_init(&vi->config_lock);
1574         vi->config_enable = true;
1575         INIT_WORK(&vi->config_work, virtnet_config_changed_work);
1576
1577         /* If we can receive ANY GSO packets, we must allocate large ones. */
1578         if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
1579             virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
1580             virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
1581                 vi->big_packets = true;
1582
1583         if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
1584                 vi->mergeable_rx_bufs = true;
1585
1586         if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT))
1587                 vi->any_header_sg = true;
1588
1589         if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
1590                 vi->has_cvq = true;
1591
1592         /* Use single tx/rx queue pair as default */
1593         vi->curr_queue_pairs = 1;
1594         vi->max_queue_pairs = max_queue_pairs;
1595
1596         /* Allocate/initialize the rx/tx queues, and invoke find_vqs */
1597         err = init_vqs(vi);
1598         if (err)
1599                 goto free_index;
1600
1601         netif_set_real_num_tx_queues(dev, 1);
1602         netif_set_real_num_rx_queues(dev, 1);
1603
1604         err = register_netdev(dev);
1605         if (err) {
1606                 pr_debug("virtio_net: registering device failed\n");
1607                 goto free_vqs;
1608         }
1609
1610         /* Last of all, set up some receive buffers. */
1611         for (i = 0; i < vi->curr_queue_pairs; i++) {
1612                 try_fill_recv(&vi->rq[i], GFP_KERNEL);
1613
1614                 /* If we didn't even get one input buffer, we're useless. */
1615                 if (vi->rq[i].num == 0) {
1616                         free_unused_bufs(vi);
1617                         err = -ENOMEM;
1618                         goto free_recv_bufs;
1619                 }
1620         }
1621
1622         vi->nb.notifier_call = &virtnet_cpu_callback;
1623         err = register_hotcpu_notifier(&vi->nb);
1624         if (err) {
1625                 pr_debug("virtio_net: registering cpu notifier failed\n");
1626                 goto free_recv_bufs;
1627         }
1628
1629         /* Assume link up if device can't report link status,
1630            otherwise get link status from config. */
1631         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
1632                 netif_carrier_off(dev);
1633                 schedule_work(&vi->config_work);
1634         } else {
1635                 vi->status = VIRTIO_NET_S_LINK_UP;
1636                 netif_carrier_on(dev);
1637         }
1638
1639         pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
1640                  dev->name, max_queue_pairs);
1641
1642         return 0;
1643
1644 free_recv_bufs:
1645         free_receive_bufs(vi);
1646         unregister_netdev(dev);
1647 free_vqs:
1648         cancel_delayed_work_sync(&vi->refill);
1649         virtnet_del_vqs(vi);
1650 free_index:
1651         free_percpu(vi->vq_index);
1652 free_stats:
1653         free_percpu(vi->stats);
1654 free:
1655         free_netdev(dev);
1656         return err;
1657 }
1658
1659 static void remove_vq_common(struct virtnet_info *vi)
1660 {
1661         vi->vdev->config->reset(vi->vdev);
1662
1663         /* Free unused buffers in both send and recv, if any. */
1664         free_unused_bufs(vi);
1665
1666         free_receive_bufs(vi);
1667
1668         virtnet_del_vqs(vi);
1669 }
1670
1671 static void virtnet_remove(struct virtio_device *vdev)
1672 {
1673         struct virtnet_info *vi = vdev->priv;
1674
1675         unregister_hotcpu_notifier(&vi->nb);
1676
1677         /* Prevent config work handler from accessing the device. */
1678         mutex_lock(&vi->config_lock);
1679         vi->config_enable = false;
1680         mutex_unlock(&vi->config_lock);
1681
1682         unregister_netdev(vi->dev);
1683
1684         remove_vq_common(vi);
1685
1686         flush_work(&vi->config_work);
1687
1688         free_percpu(vi->vq_index);
1689         free_percpu(vi->stats);
1690         free_netdev(vi->dev);
1691 }
1692
1693 #ifdef CONFIG_PM_SLEEP
1694 static int virtnet_freeze(struct virtio_device *vdev)
1695 {
1696         struct virtnet_info *vi = vdev->priv;
1697         int i;
1698
1699         /* Prevent config work handler from accessing the device */
1700         mutex_lock(&vi->config_lock);
1701         vi->config_enable = false;
1702         mutex_unlock(&vi->config_lock);
1703
1704         netif_device_detach(vi->dev);
1705         cancel_delayed_work_sync(&vi->refill);
1706
1707         if (netif_running(vi->dev))
1708                 for (i = 0; i < vi->max_queue_pairs; i++) {
1709                         napi_disable(&vi->rq[i].napi);
1710                         netif_napi_del(&vi->rq[i].napi);
1711                 }
1712
1713         remove_vq_common(vi);
1714
1715         flush_work(&vi->config_work);
1716
1717         return 0;
1718 }
1719
1720 static int virtnet_restore(struct virtio_device *vdev)
1721 {
1722         struct virtnet_info *vi = vdev->priv;
1723         int err, i;
1724
1725         err = init_vqs(vi);
1726         if (err)
1727                 return err;
1728
1729         if (netif_running(vi->dev))
1730                 for (i = 0; i < vi->max_queue_pairs; i++)
1731                         virtnet_napi_enable(&vi->rq[i]);
1732
1733         netif_device_attach(vi->dev);
1734
1735         for (i = 0; i < vi->curr_queue_pairs; i++)
1736                 if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
1737                         schedule_delayed_work(&vi->refill, 0);
1738
1739         mutex_lock(&vi->config_lock);
1740         vi->config_enable = true;
1741         mutex_unlock(&vi->config_lock);
1742
1743         virtnet_set_queues(vi, vi->curr_queue_pairs);
1744
1745         return 0;
1746 }
1747 #endif
1748
1749 static struct virtio_device_id id_table[] = {
1750         { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
1751         { 0 },
1752 };
1753
1754 static unsigned int features[] = {
1755         VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
1756         VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
1757         VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
1758         VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
1759         VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
1760         VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
1761         VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
1762         VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
1763         VIRTIO_NET_F_CTRL_MAC_ADDR,
1764         VIRTIO_F_ANY_LAYOUT,
1765 };
1766
1767 static struct virtio_driver virtio_net_driver = {
1768         .feature_table = features,
1769         .feature_table_size = ARRAY_SIZE(features),
1770         .driver.name =  KBUILD_MODNAME,
1771         .driver.owner = THIS_MODULE,
1772         .id_table =     id_table,
1773         .probe =        virtnet_probe,
1774         .remove =       virtnet_remove,
1775         .config_changed = virtnet_config_changed,
1776 #ifdef CONFIG_PM_SLEEP
1777         .freeze =       virtnet_freeze,
1778         .restore =      virtnet_restore,
1779 #endif
1780 };
1781
1782 module_virtio_driver(virtio_net_driver);
1783
1784 MODULE_DEVICE_TABLE(virtio, id_table);
1785 MODULE_DESCRIPTION("Virtio network driver");
1786 MODULE_LICENSE("GPL");